From 26281d569f3755e19237560f2049760610ba78eb Mon Sep 17 00:00:00 2001 From: Cong Zhao Date: Tue, 27 Sep 2022 06:50:22 +0800 Subject: [PATCH 01/59] [fix][doc] Fix maxNumberOfRejectedRequestPerConnection doc (#17821) * Fix maxNumberOfRejectedRequestPerConnection doc * fix doc in 2.8.x docs --- .../org/apache/pulsar/client/api/ClientConfiguration.java | 4 ++-- .../main/java/org/apache/pulsar/client/api/ClientBuilder.java | 2 +- .../pulsar/client/impl/conf/ClientConfigurationData.java | 2 +- site2/docs/client-libraries-java.md | 2 +- .../versioned_docs/version-2.10.x/client-libraries-java.md | 2 +- .../versioned_docs/version-2.8.x/client-libraries-java.md | 2 +- .../versioned_docs/version-2.9.x/client-libraries-java.md | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java index 52a86e23f06be..6ccb442c70487 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java +++ b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java @@ -319,7 +319,7 @@ public void setConcurrentLookupRequest(int concurrentLookupRequest) { } /** - * Get configured max number of reject-request in a time-frame (30 seconds) after which connection will be closed. + * Get configured max number of reject-request in a time-frame (60 seconds) after which connection will be closed. * * @return */ @@ -328,7 +328,7 @@ public int getMaxNumberOfRejectedRequestPerConnection() { } /** - * Set max number of broker-rejected requests in a certain time-frame (30 seconds) after which current connection. + * Set max number of broker-rejected requests in a certain time-frame (60 seconds) after which current connection. * will be closed and client creates a new connection that give chance to connect a different broker (default: * 50) * diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ClientBuilder.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ClientBuilder.java index 6d44f20b8eef4..17852bc545dc0 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ClientBuilder.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ClientBuilder.java @@ -489,7 +489,7 @@ ClientBuilder authentication(String authPluginClassName, Map aut ClientBuilder maxLookupRedirects(int maxLookupRedirects); /** - * Set max number of broker-rejected requests in a certain time-frame (30 seconds) after which current connection + * Set max number of broker-rejected requests in a certain time-frame (60 seconds) after which current connection * will be closed and client creates a new connection that give chance to connect a different broker (default: * 50). * diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ClientConfigurationData.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ClientConfigurationData.java index e6f25f4acf187..653dded4de67b 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ClientConfigurationData.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ClientConfigurationData.java @@ -195,7 +195,7 @@ public class ClientConfigurationData implements Serializable, Cloneable { @ApiModelProperty( name = "maxNumberOfRejectedRequestPerConnection", - value = "Maximum number of rejected requests of a broker in a certain time frame (30 seconds) " + value = "Maximum number of rejected requests of a broker in a certain time frame (60 seconds) " + "after the current connection is closed and the client " + "creating a new connection to connect to a different broker." ) diff --git a/site2/docs/client-libraries-java.md b/site2/docs/client-libraries-java.md index cbe3156b5456b..2135c165a45b3 100644 --- a/site2/docs/client-libraries-java.md +++ b/site2/docs/client-libraries-java.md @@ -127,7 +127,7 @@ If you create a client, you can use the `loadConf` configuration. The following `tlsHostnameVerificationEnable` |boolean | Whether to enable TLS hostname verification|false `concurrentLookupRequest`|int|The number of concurrent lookup requests allowed to send on each broker connection to prevent overload on broker|5000 `maxLookupRequest`|int|The maximum number of lookup requests allowed on each broker connection to prevent overload on broker | 50000 -`maxNumberOfRejectedRequestPerConnection`|int|The maximum number of rejected requests of a broker in a certain time frame (30 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50 +`maxNumberOfRejectedRequestPerConnection`|int|The maximum number of rejected requests of a broker in a certain time frame (60 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50 `keepAliveIntervalSeconds`|int|Seconds of keeping alive interval for each client broker connection|30 `connectionTimeoutMs`|int|Duration of waiting for a connection to a broker to be established

If the duration passes without a response from a broker, the connection attempt is dropped|10000 `requestTimeoutMs`|int|Maximum duration for completing a request |60000 diff --git a/site2/website/versioned_docs/version-2.10.x/client-libraries-java.md b/site2/website/versioned_docs/version-2.10.x/client-libraries-java.md index 2709dab581581..6761a10f31704 100644 --- a/site2/website/versioned_docs/version-2.10.x/client-libraries-java.md +++ b/site2/website/versioned_docs/version-2.10.x/client-libraries-java.md @@ -147,7 +147,7 @@ If you create a client, you can use the `loadConf` configuration. The following `tlsHostnameVerificationEnable` |boolean | Whether to enable TLS hostname verification|false `concurrentLookupRequest`|int|The number of concurrent lookup requests allowed to send on each broker connection to prevent overload on broker|5000 `maxLookupRequest`|int|The maximum number of lookup requests allowed on each broker connection to prevent overload on broker | 50000 -`maxNumberOfRejectedRequestPerConnection`|int|The maximum number of rejected requests of a broker in a certain time frame (30 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50 +`maxNumberOfRejectedRequestPerConnection`|int|The maximum number of rejected requests of a broker in a certain time frame (60 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50 `keepAliveIntervalSeconds`|int|Seconds of keeping alive interval for each client broker connection|30 `connectionTimeoutMs`|int|Duration of waiting for a connection to a broker to be established

If the duration passes without a response from a broker, the connection attempt is dropped|10000 `requestTimeoutMs`|int|Maximum duration for completing a request |60000 diff --git a/site2/website/versioned_docs/version-2.8.x/client-libraries-java.md b/site2/website/versioned_docs/version-2.8.x/client-libraries-java.md index 930b59ac19bb4..df8fad4678f13 100644 --- a/site2/website/versioned_docs/version-2.8.x/client-libraries-java.md +++ b/site2/website/versioned_docs/version-2.8.x/client-libraries-java.md @@ -134,7 +134,7 @@ boolean|`tlsAllowInsecureConnection`|Whether the Pulsar client accepts untrusted boolean | `tlsHostnameVerificationEnable` | Whether to enable TLS hostname verification|false int|`concurrentLookupRequest`|The number of concurrent lookup requests allowed to send on each broker connection to prevent overload on broker|5000 int|`maxLookupRequest`|The maximum number of lookup requests allowed on each broker connection to prevent overload on broker | 50000 -int|`maxNumberOfRejectedRequestPerConnection`|The maximum number of rejected requests of a broker in a certain time frame (30 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50 +int|`maxNumberOfRejectedRequestPerConnection`|The maximum number of rejected requests of a broker in a certain time frame (60 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50 int|`keepAliveIntervalSeconds`|Seconds of keeping alive interval for each client broker connection|30 int|`connectionTimeoutMs`|Duration of waiting for a connection to a broker to be established

If the duration passes without a response from a broker, the connection attempt is dropped|10000 int|`requestTimeoutMs`|Maximum duration for completing a request |60000 diff --git a/site2/website/versioned_docs/version-2.9.x/client-libraries-java.md b/site2/website/versioned_docs/version-2.9.x/client-libraries-java.md index 56fb54d633daf..a611617b6f584 100644 --- a/site2/website/versioned_docs/version-2.9.x/client-libraries-java.md +++ b/site2/website/versioned_docs/version-2.9.x/client-libraries-java.md @@ -134,7 +134,7 @@ If you create a client, you can use the `loadConf` configuration. The following `tlsHostnameVerificationEnable` |boolean | Whether to enable TLS hostname verification|false `concurrentLookupRequest`|int|The number of concurrent lookup requests allowed to send on each broker connection to prevent overload on broker|5000 `maxLookupRequest`|int|The maximum number of lookup requests allowed on each broker connection to prevent overload on broker | 50000 -`maxNumberOfRejectedRequestPerConnection`|int|The maximum number of rejected requests of a broker in a certain time frame (30 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50 +`maxNumberOfRejectedRequestPerConnection`|int|The maximum number of rejected requests of a broker in a certain time frame (60 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50 `keepAliveIntervalSeconds`|int|Seconds of keeping alive interval for each client broker connection|30 `connectionTimeoutMs`|int|Duration of waiting for a connection to a broker to be established

If the duration passes without a response from a broker, the connection attempt is dropped|10000 `requestTimeoutMs`|int|Maximum duration for completing a request |60000 From 518cdcd9c2c17a35c7843341b1a8ff85ebce7113 Mon Sep 17 00:00:00 2001 From: fengyubiao Date: Tue, 27 Sep 2022 10:43:38 +0800 Subject: [PATCH 02/59] [fix][metrics]wrong metrics text generated when label_cluster specified (#17704) * [fix][metrics]wrong metrics text generated when label_cluster specified * improve logic branch * mark test group --- .../PrometheusMetricsGeneratorUtils.java | 13 ++- .../PrometheusMetricsGeneratorUtilsTest.java | 102 ++++++++++++++++++ .../broker/stats/prometheus/package-info.java | 19 ++++ 3 files changed, 131 insertions(+), 3 deletions(-) create mode 100644 pulsar-broker-common/src/test/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGeneratorUtilsTest.java create mode 100644 pulsar-broker-common/src/test/java/org/apache/pulsar/broker/stats/prometheus/package-info.java diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGeneratorUtils.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGeneratorUtils.java index d341e699bf7e8..c2aa40e030d80 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGeneratorUtils.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGeneratorUtils.java @@ -26,6 +26,7 @@ import java.io.OutputStream; import java.util.Enumeration; import java.util.List; +import org.apache.commons.collections4.CollectionUtils; import org.apache.pulsar.common.util.SimpleTextOutputStream; /** @@ -65,16 +66,22 @@ public static void generateSystemMetrics(SimpleTextOutputStream stream, String c for (int i = 0; i < metricFamily.samples.size(); i++) { Collector.MetricFamilySamples.Sample sample = metricFamily.samples.get(i); stream.write(sample.name); + stream.write("{"); if (!sample.labelNames.contains("cluster")) { - stream.write("{cluster=\"").write(cluster).write('"'); + stream.write("cluster=\"").write(cluster).write('"'); + // If label is empty, should not append ','. + if (!CollectionUtils.isEmpty(sample.labelNames)){ + stream.write(","); + } } for (int j = 0; j < sample.labelNames.size(); j++) { String labelValue = sample.labelValues.get(j); if (labelValue != null) { labelValue = labelValue.replace("\"", "\\\""); } - - stream.write(","); + if (j > 0) { + stream.write(","); + } stream.write(sample.labelNames.get(j)); stream.write("=\""); stream.write(labelValue); diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGeneratorUtilsTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGeneratorUtilsTest.java new file mode 100644 index 0000000000000..9bbfa5d771438 --- /dev/null +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGeneratorUtilsTest.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.stats.prometheus; + +import static org.testng.Assert.*; +import io.prometheus.client.CollectorRegistry; +import io.prometheus.client.Counter; +import java.io.ByteArrayOutputStream; +import java.util.Collections; +import java.util.UUID; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class PrometheusMetricsGeneratorUtilsTest { + + private static final String LABEL_NAME_CLUSTER = "cluster"; + + @Test + public void testGenerateSystemMetricsWithSpecifyCluster() throws Exception { + String defaultClusterValue = "cluster_test"; + String specifyClusterValue = "lb_x"; + String metricsName = "label_contains_cluster" + randomString(); + Counter counter = new Counter.Builder() + .name(metricsName) + .labelNames(LABEL_NAME_CLUSTER) + .help("x") + .register(CollectorRegistry.defaultRegistry); + counter.labels(specifyClusterValue).inc(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + PrometheusMetricsGeneratorUtils.generate(defaultClusterValue, out, Collections.emptyList()); + assertTrue(out.toString().contains( + String.format("%s_total{cluster=\"%s\"} 1.0", metricsName, specifyClusterValue) + )); + // cleanup + out.close(); + CollectorRegistry.defaultRegistry.unregister(counter); + } + + @Test + public void testGenerateSystemMetricsWithDefaultCluster() throws Exception { + String defaultClusterValue = "cluster_test"; + String labelName = "lb_name"; + String labelValue = "lb_value"; + // default cluster. + String metricsName = "label_use_default_cluster" + randomString(); + Counter counter = new Counter.Builder() + .name(metricsName) + .labelNames(labelName) + .help("x") + .register(CollectorRegistry.defaultRegistry); + counter.labels(labelValue).inc(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + PrometheusMetricsGeneratorUtils.generate(defaultClusterValue, out, Collections.emptyList()); + assertTrue(out.toString().contains( + String.format("%s_total{cluster=\"%s\",%s=\"%s\"} 1.0", + metricsName, defaultClusterValue, labelName, labelValue) + )); + // cleanup + out.close(); + CollectorRegistry.defaultRegistry.unregister(counter); + } + + @Test + public void testGenerateSystemMetricsWithoutCustomizedLabel() throws Exception { + String defaultClusterValue = "cluster_test"; + // default cluster. + String metricsName = "label_without_customized_label" + randomString(); + Counter counter = new Counter.Builder() + .name(metricsName) + .help("x") + .register(CollectorRegistry.defaultRegistry); + counter.inc(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + PrometheusMetricsGeneratorUtils.generate(defaultClusterValue, out, Collections.emptyList()); + assertTrue(out.toString().contains( + String.format("%s_total{cluster=\"%s\"} 1.0", metricsName, defaultClusterValue) + )); + // cleanup + out.close(); + CollectorRegistry.defaultRegistry.unregister(counter); + } + + private static String randomString(){ + return UUID.randomUUID().toString().replaceAll("-", ""); + } +} diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/stats/prometheus/package-info.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/stats/prometheus/package-info.java new file mode 100644 index 0000000000000..3723fb4ff5c99 --- /dev/null +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/stats/prometheus/package-info.java @@ -0,0 +1,19 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.stats.prometheus; From 6528a914350ed8d99ca669901b66839cdf88fd64 Mon Sep 17 00:00:00 2001 From: Rajan Dhabalia Date: Mon, 26 Sep 2022 21:02:36 -0700 Subject: [PATCH 03/59] [Pulsar-init] Support cluster init using proxy url and protocol (#17844) --- .../apache/pulsar/PulsarClusterMetadataSetup.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java b/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java index a95e6121b3e53..7b498455d4bcf 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java @@ -35,6 +35,7 @@ import org.apache.pulsar.broker.resources.NamespaceResources; import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.broker.resources.TenantResources; +import org.apache.pulsar.client.api.ProxyProtocol; import org.apache.pulsar.common.conf.InternalConfigurationData; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.SystemTopicNames; @@ -139,6 +140,16 @@ private static class Arguments { hidden = true) private String bookieMetadataServiceUri; + @Parameter(names = { "-pp", + "--proxy-protocol" }, + description = "Proxy protocol to select type of routing at proxy. Possible Values: [SNI]", + required = false) + private ProxyProtocol clusterProxyProtocol; + + @Parameter(names = { "-pu", + "--proxy-url" }, description = "Proxy-server URL to which to connect.", required = false) + private String clusterProxyUrl; + @Parameter(names = { "-h", "--help" }, description = "Show this help message") private boolean help = false; @@ -299,6 +310,8 @@ private static void initializeCluster(Arguments arguments) throws Exception { .serviceUrlTls(arguments.clusterWebServiceUrlTls) .brokerServiceUrl(arguments.clusterBrokerServiceUrl) .brokerServiceUrlTls(arguments.clusterBrokerServiceUrlTls) + .proxyServiceUrl(arguments.clusterProxyUrl) + .proxyProtocol(arguments.clusterProxyProtocol) .build(); if (!resources.getClusterResources().clusterExists(arguments.cluster)) { resources.getClusterResources().createCluster(arguments.cluster, clusterData); From 5e42e4d82a4ffc3298023c8cbce4adfb6867d6ad Mon Sep 17 00:00:00 2001 From: tison Date: Tue, 27 Sep 2022 16:15:51 +0800 Subject: [PATCH 04/59] docs: Updating Python installation section (#17796) --- site2/docs/client-libraries-cpp.md | 2 +- site2/docs/client-libraries-python.md | 45 +++++++-------------------- 2 files changed, 13 insertions(+), 34 deletions(-) diff --git a/site2/docs/client-libraries-cpp.md b/site2/docs/client-libraries-cpp.md index 05c24a17d2be1..027644b007dec 100644 --- a/site2/docs/client-libraries-cpp.md +++ b/site2/docs/client-libraries-cpp.md @@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem'; You can use a Pulsar C++ client to create producers, consumers, and readers. -All the methods in producer, consumer, and reader of a C++ client are thread-safe. You can read the Doxygen-generated [API docs](/api/cpp) for the C++ client +All the methods in producer, consumer, and reader of a C++ client are thread-safe. You can read the [API docs](/api/cpp) for the C++ client. ## Installation diff --git a/site2/docs/client-libraries-python.md b/site2/docs/client-libraries-python.md index 8e4358fee83f1..6d45547aab2fa 100644 --- a/site2/docs/client-libraries-python.md +++ b/site2/docs/client-libraries-python.md @@ -9,29 +9,21 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; ```` +You can use a Pulsar Python client to create producers, consumers, and readers. -Pulsar Python client library is a wrapper over the existing [C++ client library](client-libraries-cpp.md) and exposes all of the [same features](/api/cpp). You can find the code in the [Python directory](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/python) of the C++ client code. +All the methods in producer, consumer, and reader of a Python client are thread-safe. You can read the [API docs](/api/python) for the Python client. -All the methods in producer, consumer, and reader of a Python client are thread-safe. +## Installation -pdoc-generated API docs for the Python client are available [here](/api/python). +Use [pip](https://pip.pypa.io/) to install the latest version: -## Install - -You can install the [`pulsar-client`](https://pypi.python.org/pypi/pulsar-client) library either via [PyPi](https://pypi.python.org/pypi), using [pip](#installation-using-pip), or by building the library from [source](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp). - -### Install using pip - -To install the `pulsar-client` library as a pre-built package using the [pip](https://pip.pypa.io/en/stable/) package manager: - -```shell -pip install pulsar-client==@pulsar:version_number@ +```bash +pip install 'pulsar-client==@pulsar:version_number@' ``` -### Optional dependencies -If you install the client libraries on Linux to support services like Pulsar functions or Avro serialization, you can install optional components alongside the `pulsar-client` library. +You can install optional components alongside the client library: -```shell +```bash # avro serialization pip install 'pulsar-client[avro]==@pulsar:version_number@' @@ -44,23 +36,10 @@ pip install 'pulsar-client[all]==@pulsar:version_number@' Installation via PyPi is available for the following Python versions: -Platform | Supported Python versions -:--------|:------------------------- -MacOS >= 11.0 | 3.7, 3.8, 3.9 and 3.10 -Linux (including Alpine Linux) | 3.7, 3.8, 3.9 and 3.10 - - -### Install from source - -To install the `pulsar-client` library by building from source, follow [instructions](client-libraries-cpp.md#compilation) and compile the Pulsar C++ client library. That builds the Python binding for the library. - -To install the built Python bindings: - -```shell -git clone https://github.com/apache/pulsar -cd pulsar/pulsar-client-cpp/python -sudo python setup.py install -``` +| Platform | Supported Python versions | +|:-------------------------------|:--------------------------| +| macOS (>= 11.0) | 3.7, 3.8, 3.9 and 3.10 | +| Linux (including Alpine Linux) | 3.7, 3.8, 3.9 and 3.10 | ## Connection URLs From 59ce90ce6b438717e4c23d3d2960354c10c2cb72 Mon Sep 17 00:00:00 2001 From: Andras Beni Date: Tue, 27 Sep 2022 10:46:52 +0200 Subject: [PATCH 05/59] [fix][cli] Quit PerformanceConsumer after receiving numMessages messages (#17750) --- .../testclient/PerformanceConsumer.java | 6 ++ .../tests/integration/cli/PerfToolTest.java | 91 +++++++++++++++++++ .../src/test/resources/pulsar-cli.xml | 1 + 3 files changed, 98 insertions(+) create mode 100644 tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/PerfToolTest.java diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceConsumer.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceConsumer.java index d187796f3f24a..7f3a93b8ba66f 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceConsumer.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceConsumer.java @@ -315,6 +315,12 @@ public static void main(String[] args) throws Exception { totalMessagesReceived.increment(); totalBytesReceived.add(msg.size()); + if (arguments.numMessages > 0 && totalMessagesReceived.sum() >= arguments.numMessages) { + log.info("------------------- DONE -----------------------"); + PerfClientUtils.exit(0); + thread.interrupt(); + } + if (limiter != null) { limiter.acquire(); } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/PerfToolTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/PerfToolTest.java new file mode 100644 index 0000000000000..55af57d3b5224 --- /dev/null +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/PerfToolTest.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.tests.integration.cli; + +import static org.testng.Assert.fail; +import org.apache.pulsar.tests.integration.containers.ChaosContainer; +import org.apache.pulsar.tests.integration.containers.PulsarContainer; +import org.apache.pulsar.tests.integration.containers.ZKContainer; +import org.apache.pulsar.tests.integration.docker.ContainerExecResult; +import org.apache.pulsar.tests.integration.messaging.TopicMessagingBase; +import org.testng.Assert; +import org.testng.annotations.Test; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class PerfToolTest extends TopicMessagingBase { + + private static final int MESSAGE_COUNT = 50; + + @Test + private void testProduce() throws Exception { + String serviceUrl = "pulsar://" + pulsarCluster.getProxy().getContainerName() + ":" + PulsarContainer.BROKER_PORT; + final String topicName = getNonPartitionedTopic("testProduce", true); + // Using the ZK container as it is separate from brokers, so its environment resembles real world usage more + ZKContainer clientToolContainer = pulsarCluster.getZooKeeper(); + ContainerExecResult produceResult = produceWithPerfTool(clientToolContainer, serviceUrl, topicName); + checkOutputForLogs(produceResult,"PerformanceProducer - Aggregated throughput stats", + "PerformanceProducer - Aggregated latency stats"); + } + + @Test + private void testConsume() throws Exception { + String serviceUrl = "pulsar://" + pulsarCluster.getProxy().getContainerName() + ":" + PulsarContainer.BROKER_PORT; + final String topicName = getNonPartitionedTopic("testConsume", true); + // Using the ZK container as it is separate from brokers, so its environment resembles real world usage more + ZKContainer clientToolContainer = pulsarCluster.getZooKeeper(); + ContainerExecResult consumeResult = consumeWithPerfTool(clientToolContainer, serviceUrl, topicName); + checkOutputForLogs(consumeResult,"PerformanceConsumer - Aggregated throughput stats", + "PerformanceConsumer - Aggregated latency stats"); + } + + private ContainerExecResult produceWithPerfTool(ChaosContainer container, String url, String topic) throws Exception { + ContainerExecResult result = container.execCmd("bin/pulsar-perf", "produce", "-u", url, "-m", String.valueOf(MESSAGE_COUNT), topic); + + return failOnError("Performance producer", result); + } + + private ContainerExecResult consumeWithPerfTool(ChaosContainer container, String url, String topic) throws Exception { + CompletableFuture resultFuture = + container.execCmdAsync("bin/pulsar-perf", "consume", "-u", url, "-m", String.valueOf(MESSAGE_COUNT), topic); + produceWithPerfTool(container, url, topic); + + ContainerExecResult result = resultFuture.get(5, TimeUnit.SECONDS); + return failOnError("Performance consumer", result); + } + + private static ContainerExecResult failOnError(String processDesc, ContainerExecResult result) { + if (result.getExitCode() != 0) { + fail(processDesc + " failed. Command output:\n" + result.getStdout() + + "\nError output:\n" + result.getStderr()); + } + return result; + } + + private static void checkOutputForLogs(ContainerExecResult result, String... logs) { + String output = result.getStdout(); + for (String log : logs) { + Assert.assertTrue(output.contains(log), + "command output did not contain log message '" + log + "'.\nFull stdout is:\n" + output); + } + } + +} diff --git a/tests/integration/src/test/resources/pulsar-cli.xml b/tests/integration/src/test/resources/pulsar-cli.xml index 6cbf538c2162c..af55aca8a0098 100644 --- a/tests/integration/src/test/resources/pulsar-cli.xml +++ b/tests/integration/src/test/resources/pulsar-cli.xml @@ -32,6 +32,7 @@ + From 91f747f119c312c2abe5f2da79468285e6bfceec Mon Sep 17 00:00:00 2001 From: tison Date: Tue, 27 Sep 2022 17:07:15 +0800 Subject: [PATCH 06/59] docs: add developers-landing page to sidebars (#17780) Signed-off-by: tison Signed-off-by: tison --- site2/website/sidebars.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/site2/website/sidebars.json b/site2/website/sidebars.json index beb8078a181a4..1284959aa3153 100644 --- a/site2/website/sidebars.json +++ b/site2/website/sidebars.json @@ -355,6 +355,10 @@ { "type": "category", "label": "Development", + "link": { + "type": "doc", + "id": "developers-landing" + }, "items": [ "develop-tools", "developing-binary-protocol", From b89c1451551a6bbe681465726906a2e61c9d8a69 Mon Sep 17 00:00:00 2001 From: Ayman Khalil Date: Tue, 27 Sep 2022 02:17:03 -0700 Subject: [PATCH 07/59] [improve][pulsar-io-kafka] Add option to copy Kafka headers to Pulsar properties (#17829) --- .../pulsar/io/kafka/KafkaAbstractSource.java | 35 ++++++++++- .../pulsar/io/kafka/KafkaBytesSource.java | 6 +- .../pulsar/io/kafka/KafkaSourceConfig.java | 6 ++ .../pulsar/io/kafka/KafkaStringSource.java | 3 +- .../pulsar/io/kafka/KafkaBytesSourceTest.java | 60 +++++++++++++++++++ .../kafka/source/KafkaAbstractSourceTest.java | 5 +- 6 files changed, 107 insertions(+), 8 deletions(-) diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java index 661c747871d3d..ba6498ba5b771 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java @@ -19,8 +19,10 @@ package org.apache.pulsar.io.kafka; +import io.jsonwebtoken.io.Encoders; import java.time.Duration; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -38,6 +40,7 @@ import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.config.SslConfigs; +import org.apache.kafka.common.header.Header; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.schema.KeyValue; import org.apache.pulsar.common.schema.KeyValueEncodingType; @@ -52,6 +55,9 @@ * Simple Kafka Source to transfer messages from a Kafka topic. */ public abstract class KafkaAbstractSource extends PushSource { + public static final String HEADER_KAFKA_TOPIC_KEY = "__kafka_topic"; + public static final String HEADER_KAFKA_PTN_KEY = "__kafka_partition"; + public static final String HEADER_KAFKA_OFFSET_KEY = "__kafka_offset"; private static final Logger LOG = LoggerFactory.getLogger(KafkaAbstractSource.class); @@ -189,19 +195,36 @@ public void start() { public abstract KafkaRecord buildRecord(ConsumerRecord consumerRecord); + protected Map copyKafkaHeaders(ConsumerRecord consumerRecord) { + if (!kafkaSourceConfig.isCopyHeadersEnabled()) { + return Collections.emptyMap(); + } + Map properties = new HashMap<>(); + properties.put(HEADER_KAFKA_TOPIC_KEY, consumerRecord.topic()); + properties.put(HEADER_KAFKA_PTN_KEY, Integer.toString(consumerRecord.partition())); + properties.put(HEADER_KAFKA_OFFSET_KEY, Long.toString(consumerRecord.offset())); + for (Header header: consumerRecord.headers()) { + properties.put(header.key(), Encoders.BASE64.encode(header.value())); + } + return properties; + } + @Slf4j protected static class KafkaRecord implements Record { private final ConsumerRecord record; private final V value; private final Schema schema; + private final Map properties; @Getter private final CompletableFuture completableFuture = new CompletableFuture<>(); - public KafkaRecord(ConsumerRecord record, V value, Schema schema) { + public KafkaRecord(ConsumerRecord record, V value, Schema schema, + Map properties) { this.record = record; this.value = value; this.schema = schema; + this.properties = properties; } @Override public Optional getPartitionId() { @@ -237,6 +260,11 @@ public void ack() { public Schema getSchema() { return schema; } + + @Override + public Map getProperties(){ + return properties; + } } protected static class KeyValueKafkaRecord extends KafkaRecord implements KVRecord { @@ -244,8 +272,9 @@ protected static class KeyValueKafkaRecord extends KafkaRecord implements KVR private final Schema valueSchema; public KeyValueKafkaRecord(ConsumerRecord record, KeyValue value, - Schema keySchema, Schema valueSchema) { - super(record, value, null); + Schema keySchema, Schema valueSchema, + Map properties) { + super(record, value, null, properties); this.keySchema = keySchema; this.valueSchema = valueSchema; } diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaBytesSource.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaBytesSource.java index eacc3e8423a41..a270706e8a558 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaBytesSource.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaBytesSource.java @@ -124,13 +124,15 @@ public KafkaRecord buildRecord(ConsumerRecord consumerRecord) { return new KeyValueKafkaRecord(consumerRecord, new KeyValue<>(key, value), currentKeySchema, - currentValueSchema); + currentValueSchema, + copyKafkaHeaders(consumerRecord)); } else { Object value = consumerRecord.value(); return new KafkaRecord(consumerRecord, extractSimpleValue(value), - getSchemaFromObject(value, valueSchema)); + getSchemaFromObject(value, valueSchema), + copyKafkaHeaders(consumerRecord)); } } diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java index 28b5944ff3007..fb16352fc81f6 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java @@ -147,6 +147,12 @@ public class KafkaSourceConfig implements Serializable { "The consumer config properties to be passed to Consumer. Note that other properties specified " + "in the connector config file take precedence over this config.") private Map consumerConfigProperties; + @FieldDoc( + defaultValue = "false", + help = + "If true the Kafka message headers will be copied into Pulsar message properties. Since Pulsar properties " + + "is a Map, byte array values in the Kafka headers will be base64 encoded. ") + private boolean copyHeadersEnabled = false; public static KafkaSourceConfig load(String yamlFile) throws IOException { ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaStringSource.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaStringSource.java index fb333ebea7cfe..a8a7d4686e028 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaStringSource.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaStringSource.java @@ -33,7 +33,8 @@ public class KafkaStringSource extends KafkaAbstractSource { public KafkaRecord buildRecord(ConsumerRecord consumerRecord) { KafkaRecord record = new KafkaRecord(consumerRecord, new String((byte[]) consumerRecord.value(), StandardCharsets.UTF_8), - Schema.STRING); + Schema.STRING, + copyKafkaHeaders(consumerRecord)); return record; } diff --git a/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/KafkaBytesSourceTest.java b/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/KafkaBytesSourceTest.java index 4a3c295449972..98aaf2ffd724b 100644 --- a/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/KafkaBytesSourceTest.java +++ b/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/KafkaBytesSourceTest.java @@ -28,6 +28,7 @@ import com.google.common.collect.ImmutableMap; import io.confluent.kafka.serializers.KafkaAvroDeserializer; +import java.nio.charset.StandardCharsets; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.serialization.ByteBufferDeserializer; @@ -44,6 +45,7 @@ import org.apache.pulsar.common.schema.KeyValue; import org.apache.pulsar.common.schema.KeyValueEncodingType; import org.apache.pulsar.io.core.SourceContext; +import org.bouncycastle.util.encoders.Base64; import org.mockito.Mockito; import org.testng.annotations.Test; @@ -115,6 +117,64 @@ public void testKeyValueSchema() throws Exception { ByteBuffer.wrap(new StringSerializer().serialize("test", "test"))); } + @Test + public void testCopyKafkaHeadersEnabled() throws Exception { + ByteBuffer key = ByteBuffer.wrap(new IntegerSerializer().serialize("test", 10)); + ByteBuffer value = ByteBuffer.wrap(new StringSerializer().serialize("test", "test")); + KafkaBytesSource source = new KafkaBytesSource(); + Map config = new HashMap<>(); + config.put("copyHeadersEnabled", true); + config.put("topic","test"); + config.put("bootstrapServers","localhost:9092"); + config.put("groupId", "test"); + config.put("valueDeserializationClass", IntegerDeserializer.class.getName()); + config.put("keyDeserializationClass", StringDeserializer.class.getName()); + config.put("consumerConfigProperties", ImmutableMap.builder() + .put("schema.registry.url", "http://localhost:8081") + .build()); + source.open(config, Mockito.mock(SourceContext.class)); + ConsumerRecord record = new ConsumerRecord("test", 88, 99, key, value); + record.headers().add("k1", "v1".getBytes(StandardCharsets.UTF_8)); + record.headers().add("k2", new byte[]{0xF}); + + Map props = source.copyKafkaHeaders(record); + assertEquals(props.size(), 5); + assertTrue(props.containsKey("__kafka_topic")); + assertTrue(props.containsKey("__kafka_partition")); + assertTrue(props.containsKey("__kafka_offset")); + assertTrue(props.containsKey("k1")); + assertTrue(props.containsKey("k2")); + + assertEquals(props.get("__kafka_topic"), "test"); + assertEquals(props.get("__kafka_partition"), "88"); + assertEquals(props.get("__kafka_offset"), "99"); + assertEquals(Base64.decode(props.get("k1")), "v1".getBytes(StandardCharsets.UTF_8)); + assertEquals(Base64.decode(props.get("k2")), new byte[]{0xF}); + } + + @Test + public void testCopyKafkaHeadersDisabled() throws Exception { + ByteBuffer key = ByteBuffer.wrap(new IntegerSerializer().serialize("test", 10)); + ByteBuffer value = ByteBuffer.wrap(new StringSerializer().serialize("test", "test")); + KafkaBytesSource source = new KafkaBytesSource(); + Map config = new HashMap<>(); + config.put("topic","test"); + config.put("bootstrapServers","localhost:9092"); + config.put("groupId", "test"); + config.put("valueDeserializationClass", IntegerDeserializer.class.getName()); + config.put("keyDeserializationClass", StringDeserializer.class.getName()); + config.put("consumerConfigProperties", ImmutableMap.builder() + .put("schema.registry.url", "http://localhost:8081") + .build()); + source.open(config, Mockito.mock(SourceContext.class)); + ConsumerRecord record = new ConsumerRecord("test", 88, 99, key, value); + record.headers().add("k1", "v1".getBytes(StandardCharsets.UTF_8)); + record.headers().add("k2", new byte[]{0xF}); + + Map props = source.copyKafkaHeaders(record); + assertTrue(props.isEmpty()); + } + private void validateSchemaKeyValue(String keyDeserializationClass, Schema expectedKeySchema, String valueDeserializationClass, Schema expectedValueSchema, ByteBuffer key, diff --git a/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/source/KafkaAbstractSourceTest.java b/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/source/KafkaAbstractSourceTest.java index a9a5c22eb41e8..4eb30447fbf35 100644 --- a/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/source/KafkaAbstractSourceTest.java +++ b/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/source/KafkaAbstractSourceTest.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableMap; +import java.util.Collections; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.security.auth.SecurityProtocol; @@ -55,10 +56,10 @@ private static class DummySource extends KafkaAbstractSource { public KafkaRecord buildRecord(ConsumerRecord consumerRecord) { KafkaRecord record = new KafkaRecord(consumerRecord, new String((byte[]) consumerRecord.value(), StandardCharsets.UTF_8), - Schema.STRING); + Schema.STRING, + Collections.emptyMap()); return record; } - } @Test From 43ad6f951b6567dd2c4b015d602fa3316f45a74f Mon Sep 17 00:00:00 2001 From: Lari Hotari Date: Tue, 27 Sep 2022 21:32:45 +0300 Subject: [PATCH 08/59] Skip creating a subscription replication snapshot if no messages have been published after the topic gets activated on a broker (#16618) * Skip creating a replication snapshot if no messages have been published * Adapt test to new behavior where replication snapshots happen only when there are new messages --- .../ReplicatedSubscriptionsController.java | 3 +- .../service/ReplicatorSubscriptionTest.java | 55 ++++++++++++++----- 2 files changed, 42 insertions(+), 16 deletions(-) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java index a77ac76be8ff4..1e1245ed36b6f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java @@ -202,7 +202,8 @@ private void receiveSubscriptionUpdated(ReplicatedSubscriptionsUpdate update) { private void startNewSnapshot() { cleanupTimedOutSnapshots(); - if (topic.getLastDataMessagePublishedTimestamp() < lastCompletedSnapshotStartTime) { + if (topic.getLastDataMessagePublishedTimestamp() < lastCompletedSnapshotStartTime + || topic.getLastDataMessagePublishedTimestamp() == 0) { // There was no message written since the last snapshot, we can skip creating a new snapshot if (log.isDebugEnabled()) { log.debug("[{}] There is no new data in topic. Skipping snapshot creation.", topic.getName()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java index 9c426b2a43587..ea9a9a2b9f2b1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java @@ -28,6 +28,7 @@ import java.lang.reflect.Method; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; @@ -174,6 +175,14 @@ public void testReplicationSnapshotStopWhenNoTraffic() throws Exception { // create subscription in r1 createReplicatedSubscription(client1, topicName, subscriptionName, true); + // Validate that no snapshots are created before messages are published + Thread.sleep(2 * config1.getReplicatedSubscriptionsSnapshotFrequencyMillis()); + PersistentTopic t1 = (PersistentTopic) pulsar1.getBrokerService() + .getTopic(topicName, false).get().get(); + ReplicatedSubscriptionsController rsc1 = t1.getReplicatedSubscriptionController().get(); + // no snapshot should have been created before any messages are published + assertTrue(rsc1.getLastCompletedSnapshotId().isEmpty()); + @Cleanup PulsarClient client2 = PulsarClient.builder() .serviceUrl(url2.toString()) @@ -197,9 +206,6 @@ public void testReplicationSnapshotStopWhenNoTraffic() throws Exception { Thread.sleep(2 * config1.getReplicatedSubscriptionsSnapshotFrequencyMillis()); // In R1 - PersistentTopic t1 = (PersistentTopic) pulsar1.getBrokerService() - .getTopic(topicName, false).get().get(); - ReplicatedSubscriptionsController rsc1 = t1.getReplicatedSubscriptionController().get(); Position p1 = t1.getLastPosition(); String snapshot1 = rsc1.getLastCompletedSnapshotId().get(); @@ -541,22 +547,35 @@ public void testReplicatedSubscriptionWhenReplicatorProducerIsClosed() throws Ex .statsInterval(0, TimeUnit.SECONDS) .build(); - // create consumer in r1 - @Cleanup - Consumer consumer1 = client1.newConsumer() - .topic(topicName) - .subscriptionName(subscriptionName) - .replicateSubscriptionState(true) - .subscribe(); + { + // create consumer in r1 + @Cleanup + Consumer consumer = client1.newConsumer() + .topic(topicName) + .subscriptionName(subscriptionName) + .replicateSubscriptionState(true) + .subscribe(); - // waiting to replicate topic/subscription to r1->r2 - Awaitility.await().until(() -> pulsar2.getBrokerService().getTopics().containsKey(topicName)); - final PersistentTopic topic2 = (PersistentTopic) pulsar2.getBrokerService().getTopic(topicName, false).join().get(); - Awaitility.await().untilAsserted(() -> assertTrue(topic2.getReplicators().get("r1").isConnected())); - Awaitility.await().untilAsserted(() -> assertNotNull(topic2.getSubscription(subscriptionName))); + // send one message to trigger replication + @Cleanup + Producer producer = client1.newProducer().topic(topicName) + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); + producer.send("message".getBytes(StandardCharsets.UTF_8)); + + assertEquals(readMessages(consumer, new HashSet<>(), 1, false), 1); + + // waiting to replicate topic/subscription to r1->r2 + Awaitility.await().until(() -> pulsar2.getBrokerService().getTopics().containsKey(topicName)); + final PersistentTopic topic2 = (PersistentTopic) pulsar2.getBrokerService().getTopic(topicName, false).join().get(); + Awaitility.await().untilAsserted(() -> assertTrue(topic2.getReplicators().get("r1").isConnected())); + Awaitility.await().untilAsserted(() -> assertNotNull(topic2.getSubscription(subscriptionName))); + } // unsubscribe replicated subscription in r2 admin2.topics().deleteSubscription(topicName, subscriptionName); + final PersistentTopic topic2 = (PersistentTopic) pulsar2.getBrokerService().getTopic(topicName, false).join().get(); assertNull(topic2.getSubscription(subscriptionName)); // close replicator producer in r2 @@ -581,6 +600,12 @@ public void testReplicatedSubscriptionWhenReplicatorProducerIsClosed() throws Ex // consume 6 messages in r1 Set receivedMessages = new LinkedHashSet<>(); + @Cleanup + Consumer consumer1 = client1.newConsumer() + .topic(topicName) + .subscriptionName(subscriptionName) + .replicateSubscriptionState(true) + .subscribe(); assertEquals(readMessages(consumer1, receivedMessages, numMessages, false), numMessages); // wait for subscription to be replicated From d4893d1b26a128329c9fe66f86454dd2a15422aa Mon Sep 17 00:00:00 2001 From: Lari Hotari Date: Tue, 27 Sep 2022 23:27:33 +0300 Subject: [PATCH 09/59] [fix][tests] Fix Mockito mocks memory leak (#17851) * Call cleanup method in finally block to ensure it's not skipped * Clear invocations for the mocks that are left around without cleanup * Cleanup PulsarService and PulsarAdmin mocks/spies in MockedPulsarServiceBaseTest * Don't record invocations at all for PulsarService and PulsarAdmin in MockedPulsarServiceBaseTest * Don't record invocations for spies by default * Simplify reseting mocks * Fix PersistentTopicTest * Fix TokenExpirationProducerConsumerTest * Fix SimpleLoadManagerImplTest * Fix FilterEntryTest --- .../pulsar/tests/MockitoCleanupListener.java | 13 +++++--- .../tests/MockitoThreadLocalStateCleaner.java | 19 +++++++++++ .../apache/pulsar/broker/BrokerTestUtil.java | 33 +++++++++++++++++++ .../auth/MockedPulsarServiceBaseTest.java | 22 ++++++++++--- .../SimpleLoadManagerImplTest.java | 4 +-- .../broker/service/PersistentTopicTest.java | 13 ++++---- .../service/ServerCnxAuthorizationTest.java | 26 +++++++-------- .../service/plugin/FilterEntryTest.java | 6 ++-- .../TokenExpirationProduceConsumerTest.java | 8 +++++ .../impl/BrokerClientIntegrationTest.java | 8 +++-- 10 files changed, 115 insertions(+), 37 deletions(-) diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/MockitoCleanupListener.java b/buildtools/src/main/java/org/apache/pulsar/tests/MockitoCleanupListener.java index 73fff1bb7e269..ff590c05bbcf7 100644 --- a/buildtools/src/main/java/org/apache/pulsar/tests/MockitoCleanupListener.java +++ b/buildtools/src/main/java/org/apache/pulsar/tests/MockitoCleanupListener.java @@ -38,11 +38,15 @@ public class MockitoCleanupListener extends BetweenTestClassesListenerAdapter { @Override protected void onBetweenTestClasses(Class endedTestClass, Class startedTestClass) { if (MOCKITO_CLEANUP_ENABLED) { - if (MockitoThreadLocalStateCleaner.INSTANCE.isEnabled()) { - LOG.info("Cleaning up Mockito's ThreadSafeMockingProgress.MOCKING_PROGRESS_PROVIDER thread local state."); - MockitoThreadLocalStateCleaner.INSTANCE.cleanup(); + try { + if (MockitoThreadLocalStateCleaner.INSTANCE.isEnabled()) { + LOG.info( + "Cleaning up Mockito's ThreadSafeMockingProgress.MOCKING_PROGRESS_PROVIDER thread local state."); + MockitoThreadLocalStateCleaner.INSTANCE.cleanup(); + } + } finally { + cleanupMockitoInline(); } - cleanupMockitoInline(); } } @@ -54,5 +58,4 @@ protected void onBetweenTestClasses(Class endedTestClass, Class startedTes private void cleanupMockitoInline() { Mockito.framework().clearInlineMocks(); } - } diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/MockitoThreadLocalStateCleaner.java b/buildtools/src/main/java/org/apache/pulsar/tests/MockitoThreadLocalStateCleaner.java index 3c383fb1d92fd..4fe4ea7e0e2d3 100644 --- a/buildtools/src/main/java/org/apache/pulsar/tests/MockitoThreadLocalStateCleaner.java +++ b/buildtools/src/main/java/org/apache/pulsar/tests/MockitoThreadLocalStateCleaner.java @@ -23,6 +23,8 @@ import org.apache.commons.lang3.ClassUtils; import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.commons.lang3.reflect.MethodUtils; +import org.mockito.internal.stubbing.InvocationContainerImpl; +import org.mockito.internal.util.MockUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,16 +74,33 @@ public void cleanup() { LOG.warn("Invalid usage of Mockito detected on thread {}." + " There is ongoing stubbing on mock of class={} instance={}", thread, mock.getClass().getName(), mock); + try { + clearInvocations(thread, mock); + } catch (Exception e) { + LOG.warn("Clearing invocations failed", e); + } } } } catch (NoSuchMethodException | IllegalAccessException e) { LOG.debug("Cannot call validateState on existing Mockito ProgressProvider"); } catch (InvocationTargetException e) { LOG.warn("Invalid usage of Mockito detected on thread {}", thread, e.getCause()); + } catch (Exception e) { + LOG.warn("Removing {} instance from thread {} failed", mockingProgress.getClass().getName(), thread, e); } }); } + private static void clearInvocations(Thread thread, Object mock) { + InvocationContainerImpl invocationContainer = MockUtil.getInvocationContainer(mock); + if (invocationContainer.hasInvocationForPotentialStubbing()) { + LOG.warn("Mock contains registered invocations that should be cleared. thread {} class={} " + + "instance={}", + thread, mock.getClass().getName(), mock); + invocationContainer.clearInvocations(); + } + } + public boolean isEnabled() { return MOCKING_PROGRESS_PROVIDER != null; } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerTestUtil.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerTestUtil.java index 224060c9d912e..7d7a3ebd2621d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerTestUtil.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerTestUtil.java @@ -34,14 +34,47 @@ public static String newUniqueName(String prefix) { * Creates a Mockito spy directly without an intermediate instance to spy. * This is to address flaky test issue where a spy created with a given instance fails with * {@link org.mockito.exceptions.misusing.WrongTypeOfReturnValue} exception. + * The spy is stub-only which does not record method invocations. * * @param classToSpy the class to spy * @param args the constructor arguments to use when creating the spy instance * @return a spy of the provided class created with given constructor arguments */ public static T spyWithClassAndConstructorArgs(Class classToSpy, Object... args) { + return Mockito.mock(classToSpy, Mockito.withSettings() + .useConstructor(args) + .defaultAnswer(Mockito.CALLS_REAL_METHODS) + .stubOnly()); + } + + /** + * Creates a Mockito spy directly without an intermediate instance to spy. + * This is to address flaky test issue where a spy created with a given instance fails with + * {@link org.mockito.exceptions.misusing.WrongTypeOfReturnValue} exception. + * The spy records method invocations. + * + * @param classToSpy the class to spy + * @param args the constructor arguments to use when creating the spy instance + * @return a spy of the provided class created with given constructor arguments + */ + public static T spyWithClassAndConstructorArgsRecordingInvocations(Class classToSpy, Object... args) { return Mockito.mock(classToSpy, Mockito.withSettings() .useConstructor(args) .defaultAnswer(Mockito.CALLS_REAL_METHODS)); } + + /** + * Create a Mockito spy that is stub-only which does not record method invocations, + * thus saving memory but disallowing verification of invocations. + * + * @param object to spy on + * @return a spy of the real object + * @param type of object + */ + public static T spyWithoutRecordingInvocations(T object) { + return Mockito.mock((Class) object.getClass(), Mockito.withSettings() + .spiedInstance(object) + .defaultAnswer(Mockito.CALLS_REAL_METHODS) + .stubOnly()); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java index d3eea7996a187..62fd69b3811d6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.auth; import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithoutRecordingInvocations; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; @@ -47,6 +48,8 @@ import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; +import javax.ws.rs.container.AsyncResponse; +import javax.ws.rs.container.TimeoutHandler; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.EnsemblePlacementPolicy; import org.apache.bookkeeper.client.PulsarMockBookKeeper; @@ -78,12 +81,11 @@ import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.MockZooKeeper; import org.apache.zookeeper.data.ACL; +import org.mockito.Mockito; +import org.mockito.internal.util.MockUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.ws.rs.container.AsyncResponse; -import javax.ws.rs.container.TimeoutHandler; - /** * Base class for all tests that need a Pulsar instance without a ZK and BK cluster. */ @@ -232,6 +234,9 @@ protected final void internalCleanup() throws Exception { // an NPE in shutdown, obscuring the real error if (admin != null) { admin.close(); + if (MockUtil.isMock(admin)) { + Mockito.reset(admin); + } admin = null; } if (pulsarClient != null) { @@ -248,6 +253,7 @@ protected final void internalCleanup() throws Exception { resetConfig(); if (mockBookKeeper != null) { mockBookKeeper.reallyShutdown(); + Mockito.reset(mockBookKeeper); mockBookKeeper = null; } if (mockZooKeeperGlobal != null) { @@ -304,6 +310,9 @@ protected void stopBroker() throws Exception { // set shutdown timeout to 0 for forceful shutdown pulsar.getConfiguration().setBrokerShutdownTimeoutMs(0L); pulsar.close(); + if (MockUtil.isMock(pulsar)) { + Mockito.reset(pulsar); + } pulsar = null; // Simulate cleanup of ephemeral nodes //mockZooKeeper.delete("/loadbalance/brokers/localhost:" + pulsar.getConfiguration().getWebServicePort(), -1); @@ -320,12 +329,15 @@ protected void startBroker() throws Exception { if (admin != null) { admin.close(); + if (MockUtil.isMock(admin)) { + Mockito.reset(admin); + } } PulsarAdminBuilder pulsarAdminBuilder = PulsarAdmin.builder().serviceHttpUrl(brokerUrl != null ? brokerUrl.toString() : brokerUrlTls.toString()); customizeNewPulsarAdminBuilder(pulsarAdminBuilder); - admin = spy(pulsarAdminBuilder.build()); + admin = spyWithoutRecordingInvocations(pulsarAdminBuilder.build()); } protected void customizeNewPulsarAdminBuilder(PulsarAdminBuilder pulsarAdminBuilder) { @@ -338,7 +350,7 @@ protected PulsarService startBroker(ServiceConfiguration conf) throws Exception protected PulsarService startBrokerWithoutAuthorization(ServiceConfiguration conf) throws Exception { conf.setBrokerShutdownTimeoutMs(0L); - PulsarService pulsar = spy(newPulsarService(conf)); + PulsarService pulsar = spyWithoutRecordingInvocations(newPulsarService(conf)); setupBrokerMocks(pulsar); beforePulsarStartMocks(pulsar); pulsar.start(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java index 8ac7a94d2082e..9e17cf85155e5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java @@ -18,7 +18,7 @@ */ package org.apache.pulsar.broker.loadbalance; -import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgsRecordingInvocations; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -335,7 +335,7 @@ public void testLoadReportParsing() throws Exception { @Test(enabled = true) public void testDoLoadShedding() throws Exception { - SimpleLoadManagerImpl loadManager = spyWithClassAndConstructorArgs(SimpleLoadManagerImpl.class, pulsar1); + SimpleLoadManagerImpl loadManager = spyWithClassAndConstructorArgsRecordingInvocations(SimpleLoadManagerImpl.class, pulsar1); PulsarResourceDescription rd = new PulsarResourceDescription(); rd.put("memory", new ResourceUsage(1024, 4096)); rd.put("cpu", new ResourceUsage(10, 100)); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java index 1404ce7c7988e..2d8ff60fc27d9 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.service; import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgsRecordingInvocations; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockBookKeeper; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockZooKeeper; import static org.apache.pulsar.common.protocol.Commands.DEFAULT_CONSUMER_EPOCH; @@ -230,7 +231,7 @@ public void setup() throws Exception { doReturn(brokerService).when(pulsar).getBrokerService(); }); // Mock serviceCnx. - serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); + serverCnx = spyWithClassAndConstructorArgsRecordingInvocations(ServerCnx.class, pulsar); doReturn(true).when(serverCnx).isActive(); doReturn(true).when(serverCnx).isWritable(); doReturn(new InetSocketAddress("localhost", 1234)).when(serverCnx).clientAddress(); @@ -369,7 +370,7 @@ public void setMetadataFromEntryData(ByteBuf entryData) { @Test public void testDispatcherMultiConsumerReadFailed() throws Exception { - PersistentTopic topic = spyWithClassAndConstructorArgs(PersistentTopic.class, successTopicName, ledgerMock, brokerService); + PersistentTopic topic = spyWithClassAndConstructorArgsRecordingInvocations(PersistentTopic.class, successTopicName, ledgerMock, brokerService); ManagedCursor cursor = mock(ManagedCursor.class); when(cursor.getName()).thenReturn("cursor"); Subscription subscription = mock(Subscription.class); @@ -381,7 +382,7 @@ public void testDispatcherMultiConsumerReadFailed() throws Exception { @Test public void testDispatcherSingleConsumerReadFailed() throws Exception { - PersistentTopic topic = spyWithClassAndConstructorArgs(PersistentTopic.class, successTopicName, ledgerMock, brokerService); + PersistentTopic topic = spyWithClassAndConstructorArgsRecordingInvocations(PersistentTopic.class, successTopicName, ledgerMock, brokerService); ManagedCursor cursor = mock(ManagedCursor.class); when(cursor.getName()).thenReturn("cursor"); PersistentDispatcherSingleActiveConsumer dispatcher = new PersistentDispatcherSingleActiveConsumer(cursor, @@ -2105,13 +2106,13 @@ public void testCheckInactiveSubscriptions() throws Exception { .concurrencyLevel(1) .build(); // This subscription is connected by consumer. - PersistentSubscription nonDeletableSubscription1 = spyWithClassAndConstructorArgs(PersistentSubscription.class, topic, "nonDeletableSubscription1", cursorMock, false); + PersistentSubscription nonDeletableSubscription1 = spyWithClassAndConstructorArgsRecordingInvocations(PersistentSubscription.class, topic, "nonDeletableSubscription1", cursorMock, false); subscriptions.put(nonDeletableSubscription1.getName(), nonDeletableSubscription1); // This subscription is not connected by consumer. - PersistentSubscription deletableSubscription1 = spyWithClassAndConstructorArgs(PersistentSubscription.class, topic, "deletableSubscription1", cursorMock, false); + PersistentSubscription deletableSubscription1 = spyWithClassAndConstructorArgsRecordingInvocations(PersistentSubscription.class, topic, "deletableSubscription1", cursorMock, false); subscriptions.put(deletableSubscription1.getName(), deletableSubscription1); // This subscription is replicated. - PersistentSubscription nonDeletableSubscription2 = spyWithClassAndConstructorArgs(PersistentSubscription.class, topic, "nonDeletableSubscription2", cursorMock, true); + PersistentSubscription nonDeletableSubscription2 = spyWithClassAndConstructorArgsRecordingInvocations(PersistentSubscription.class, topic, "nonDeletableSubscription2", cursorMock, true); subscriptions.put(nonDeletableSubscription2.getName(), nonDeletableSubscription2); Field field = topic.getClass().getDeclaredField("subscriptions"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxAuthorizationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxAuthorizationTest.java index 6d108ce675d17..c005b96d004cc 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxAuthorizationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxAuthorizationTest.java @@ -19,7 +19,7 @@ package org.apache.pulsar.broker.service; -import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgsRecordingInvocations; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockBookKeeper; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockZooKeeper; import static org.mockito.ArgumentMatchers.argThat; @@ -41,7 +41,6 @@ import io.netty.channel.ChannelPipeline; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; -import javax.crypto.SecretKey; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.nio.charset.StandardCharsets; @@ -50,6 +49,7 @@ import java.util.Optional; import java.util.Properties; import java.util.concurrent.CompletableFuture; +import javax.crypto.SecretKey; import org.apache.bookkeeper.common.util.OrderedExecutor; import org.apache.bookkeeper.mledger.ManagedLedgerFactory; import org.apache.pulsar.broker.PulsarService; @@ -113,7 +113,7 @@ public void beforeMethod() throws Exception { + Base64.getEncoder().encodeToString(SECRET_KEY.getEncoded())); svcConfig.setProperties(properties); - pulsar = spyWithClassAndConstructorArgs(PulsarService.class, svcConfig); + pulsar = spyWithClassAndConstructorArgsRecordingInvocations(PulsarService.class, svcConfig); doReturn(new DefaultSchemaRegistryService()).when(pulsar).getSchemaRegistryService(); doReturn(svcConfig).when(pulsar).getConfiguration(); @@ -135,21 +135,21 @@ public void beforeMethod() throws Exception { doReturn(store).when(pulsar).getLocalMetadataStore(); doReturn(store).when(pulsar).getConfigurationMetadataStore(); - pulsarResources = spyWithClassAndConstructorArgs(PulsarResources.class, store, store); + pulsarResources = spyWithClassAndConstructorArgsRecordingInvocations(PulsarResources.class, store, store); PulsarServiceMockSupport.mockPulsarServiceProps(pulsar, () -> { doReturn(pulsarResources).when(pulsar).getPulsarResources(); }); NamespaceResources namespaceResources = - spyWithClassAndConstructorArgs(NamespaceResources.class, store, store, 30); + spyWithClassAndConstructorArgsRecordingInvocations(NamespaceResources.class, store, store, 30); doReturn(namespaceResources).when(pulsarResources).getNamespaceResources(); - TenantResources tenantResources = spyWithClassAndConstructorArgs(TenantResources.class, store, 30); + TenantResources tenantResources = spyWithClassAndConstructorArgsRecordingInvocations(TenantResources.class, store, 30); doReturn(tenantResources).when(pulsarResources).getTenantResources(); doReturn(CompletableFuture.completedFuture(Optional.of(TenantInfo.builder().build()))).when(tenantResources) .getTenantAsync("public"); - brokerService = spyWithClassAndConstructorArgs(BrokerService.class, pulsar, eventLoopGroup); + brokerService = spyWithClassAndConstructorArgsRecordingInvocations(BrokerService.class, pulsar, eventLoopGroup); BrokerInterceptor interceptor = mock(BrokerInterceptor.class); doReturn(interceptor).when(brokerService).getInterceptor(); PulsarServiceMockSupport.mockPulsarServiceProps(pulsar, () -> { @@ -162,7 +162,7 @@ public void beforeMethod() throws Exception { public void testVerifyOriginalPrincipalWithAuthDataForwardedFromProxy() throws Exception { doReturn(true).when(svcConfig).isAuthenticateOriginalAuthData(); - ServerCnx serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); + ServerCnx serverCnx = spyWithClassAndConstructorArgsRecordingInvocations(ServerCnx.class, pulsar); ChannelHandlerContext channelHandlerContext = mock(ChannelHandlerContext.class); Channel channel = mock(Channel.class); ChannelPipeline channelPipeline = mock(ChannelPipeline.class); @@ -198,7 +198,7 @@ public void testVerifyOriginalPrincipalWithAuthDataForwardedFromProxy() throws E assertEquals(serverCnx.getAuthState().getAuthRole(), PROXY_PRINCIPAL); AuthorizationService authorizationService = - spyWithClassAndConstructorArgs(AuthorizationService.class, svcConfig, pulsarResources); + spyWithClassAndConstructorArgsRecordingInvocations(AuthorizationService.class, svcConfig, pulsarResources); doReturn(authorizationService).when(brokerService).getAuthorizationService(); // lookup @@ -268,7 +268,7 @@ public void testVerifyOriginalPrincipalWithAuthDataForwardedFromProxy() throws E public void testVerifyOriginalPrincipalWithoutAuthDataForwardedFromProxy() throws Exception { doReturn(false).when(svcConfig).isAuthenticateOriginalAuthData(); - ServerCnx serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); + ServerCnx serverCnx = spyWithClassAndConstructorArgsRecordingInvocations(ServerCnx.class, pulsar); ChannelHandlerContext channelHandlerContext = mock(ChannelHandlerContext.class); Channel channel = mock(Channel.class); ChannelPipeline channelPipeline = mock(ChannelPipeline.class); @@ -299,7 +299,7 @@ public void testVerifyOriginalPrincipalWithoutAuthDataForwardedFromProxy() throw assertEquals(serverCnx.getAuthState().getAuthRole(), PROXY_PRINCIPAL); AuthorizationService authorizationService = - spyWithClassAndConstructorArgs(AuthorizationService.class, svcConfig, pulsarResources); + spyWithClassAndConstructorArgsRecordingInvocations(AuthorizationService.class, svcConfig, pulsarResources); doReturn(authorizationService).when(brokerService).getAuthorizationService(); // lookup @@ -360,7 +360,7 @@ public void testVerifyOriginalPrincipalWithoutAuthDataForwardedFromProxy() throw @Test public void testVerifyAuthRoleAndAuthDataFromDirectConnectionBroker() throws Exception { - ServerCnx serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); + ServerCnx serverCnx = spyWithClassAndConstructorArgsRecordingInvocations(ServerCnx.class, pulsar); ChannelHandlerContext channelHandlerContext = mock(ChannelHandlerContext.class); Channel channel = mock(Channel.class); @@ -391,7 +391,7 @@ public void testVerifyAuthRoleAndAuthDataFromDirectConnectionBroker() throws Exc assertEquals(serverCnx.getAuthState().getAuthRole(), CLIENT_PRINCIPAL); AuthorizationService authorizationService = - spyWithClassAndConstructorArgs(AuthorizationService.class, svcConfig, pulsarResources); + spyWithClassAndConstructorArgsRecordingInvocations(AuthorizationService.class, svcConfig, pulsarResources); doReturn(authorizationService).when(brokerService).getAuthorizationService(); // lookup diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java index a5f8b5ab38f03..c10ab392da052 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.service.plugin; import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgsRecordingInvocations; import static org.apache.pulsar.client.api.SubscriptionInitialPosition.Earliest; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -36,7 +37,6 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; - import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.impl.PositionImpl; @@ -165,9 +165,9 @@ public void testFilter() throws Exception { field.setAccessible(true); NarClassLoader narClassLoader = mock(NarClassLoader.class); EntryFilter filter1 = new EntryFilterTest(); - EntryFilterWithClassLoader loader1 = spyWithClassAndConstructorArgs(EntryFilterWithClassLoader.class, filter1, narClassLoader); + EntryFilterWithClassLoader loader1 = spyWithClassAndConstructorArgsRecordingInvocations(EntryFilterWithClassLoader.class, filter1, narClassLoader); EntryFilter filter2 = new EntryFilter2Test(); - EntryFilterWithClassLoader loader2 = spyWithClassAndConstructorArgs(EntryFilterWithClassLoader.class, filter2, narClassLoader); + EntryFilterWithClassLoader loader2 = spyWithClassAndConstructorArgsRecordingInvocations(EntryFilterWithClassLoader.class, filter2, narClassLoader); field.set(dispatcher, ImmutableList.of(loader1, loader2)); Producer producer = pulsarClient.newProducer(Schema.STRING) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenExpirationProduceConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenExpirationProduceConsumerTest.java index 6b26d1740429a..520c8743cbed2 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenExpirationProduceConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenExpirationProduceConsumerTest.java @@ -36,6 +36,8 @@ import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.awaitility.Awaitility; +import org.mockito.Mockito; +import org.mockito.internal.util.MockUtil; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -64,6 +66,12 @@ protected void setup() throws Exception { // Start Broker super.init(); + if (admin != null) { + admin.close(); + if (MockUtil.isMock(admin)) { + Mockito.reset(admin); + } + } admin = getAdmin(ADMIN_TOKEN); admin.clusters().createCluster(configClusterName, ClusterData.builder() diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/BrokerClientIntegrationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/BrokerClientIntegrationTest.java index 0039fc92cb803..a8c3d353e84f5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/BrokerClientIntegrationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/BrokerClientIntegrationTest.java @@ -20,7 +20,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.UUID.randomUUID; -import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgsRecordingInvocations; import static org.mockito.Mockito.any; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doAnswer; @@ -819,8 +819,10 @@ public void testJsonSchemaProducerConsumerWithSpecifiedReaderAndWriter() throws final String topicName = "persistent://my-property/my-ns/my-topic1"; ObjectMapper mapper = new ObjectMapper(); SchemaReader reader = - spyWithClassAndConstructorArgs(JacksonJsonReader.class, mapper, TestMessageObject.class); - SchemaWriter writer = spyWithClassAndConstructorArgs(JacksonJsonWriter.class, mapper); + spyWithClassAndConstructorArgsRecordingInvocations(JacksonJsonReader.class, mapper, + TestMessageObject.class); + SchemaWriter writer = + spyWithClassAndConstructorArgsRecordingInvocations(JacksonJsonWriter.class, mapper); SchemaDefinition schemaDefinition = new SchemaDefinitionBuilderImpl() .withPojo(TestMessageObject.class) From 6dd38a4c3c12147ad2d8b4860b035bc71674a2b5 Mon Sep 17 00:00:00 2001 From: Lari Hotari Date: Tue, 27 Sep 2022 23:28:10 +0300 Subject: [PATCH 10/59] [improve][common] Make Bookkeeper metadata options configurable (#17834) - use Bookkeeper defaults by setting BK_METADATA_OPTIONS=none --- bin/pulsar | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/bin/pulsar b/bin/pulsar index 30fe596f60bc9..28738d84d5d86 100755 --- a/bin/pulsar +++ b/bin/pulsar @@ -348,9 +348,13 @@ ZK_OPTS=" -Dzookeeper.4lw.commands.whitelist=* -Dzookeeper.snapshot.trust.empty= LOG4J2_SHUTDOWN_HOOK_DISABLED="-Dlog4j.shutdownHookEnabled=false" -# Adding pulsar metadata as a recognized provider -BK_METADATA_OPTIONS="-Dbookkeeper.metadata.bookie.drivers=org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver -Dbookkeeper.metadata.client.drivers=org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver" -OPTS="$OPTS $BK_METADATA_OPTIONS" +# By default, Pulsar Metadata driver will be used for Bookkeeper client and server metadata operations +# This can be disabled by setting BK_METADATA_OPTIONS=none +if [[ "$BK_METADATA_OPTIONS" != "none" ]]; then + # Adding pulsar metadata as a recognized provider + BK_METADATA_OPTIONS="${BK_METADATA_OPTIONS:-"-Dbookkeeper.metadata.bookie.drivers=org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver -Dbookkeeper.metadata.client.drivers=org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"}" + OPTS="$OPTS $BK_METADATA_OPTIONS" +fi #Change to PULSAR_HOME to support relative paths cd "$PULSAR_HOME" From 7e4c74671f0f13f0a5d860d19655f09c0a351f58 Mon Sep 17 00:00:00 2001 From: Kai Wang Date: Wed, 28 Sep 2022 10:08:24 +0800 Subject: [PATCH 11/59] [feature][client-cpp] Support inclusive seek for cpp client (#17209) Fixes #17186 ### Motivation There are some cases in which it is useful to be able to include current position of the message when reset of cursor was made. ### Modifications * Support inclusive seek in c++ consumers. * Add a unit test to verify. --- .../include/pulsar/ConsumerConfiguration.h | 14 ++ pulsar-client-cpp/lib/ClientImpl.cc | 4 +- .../lib/ConsumerConfiguration.cc | 7 + .../lib/ConsumerConfigurationImpl.h | 1 + pulsar-client-cpp/lib/ConsumerImpl.cc | 165 +++++++++++------- pulsar-client-cpp/lib/ConsumerImpl.h | 16 +- .../lib/MultiTopicsConsumerImpl.cc | 9 +- pulsar-client-cpp/lib/ReaderImpl.cc | 7 +- pulsar-client-cpp/lib/Synchronized.h | 42 +++++ pulsar-client-cpp/python/pulsar_test.py | 8 +- pulsar-client-cpp/tests/ConsumerTest.cc | 63 +++++++ 11 files changed, 257 insertions(+), 79 deletions(-) create mode 100644 pulsar-client-cpp/lib/Synchronized.h diff --git a/pulsar-client-cpp/include/pulsar/ConsumerConfiguration.h b/pulsar-client-cpp/include/pulsar/ConsumerConfiguration.h index b326ca8fb3151..4347c3b2d5fc3 100644 --- a/pulsar-client-cpp/include/pulsar/ConsumerConfiguration.h +++ b/pulsar-client-cpp/include/pulsar/ConsumerConfiguration.h @@ -499,6 +499,20 @@ class PULSAR_PUBLIC ConsumerConfiguration { */ bool isAutoAckOldestChunkedMessageOnQueueFull() const; + /** + * Set the consumer to include the given position of any reset operation like Consumer::seek. + * + * Default: false + * + * @param startMessageIdInclusive whether to include the reset position + */ + ConsumerConfiguration& setStartMessageIdInclusive(bool startMessageIdInclusive); + + /** + * The associated getter of setStartMessageIdInclusive + */ + bool isStartMessageIdInclusive() const; + friend class PulsarWrapper; private: diff --git a/pulsar-client-cpp/lib/ClientImpl.cc b/pulsar-client-cpp/lib/ClientImpl.cc index 08adb1d6423ea..29e92f3b815a6 100644 --- a/pulsar-client-cpp/lib/ClientImpl.cc +++ b/pulsar-client-cpp/lib/ClientImpl.cc @@ -378,8 +378,8 @@ void ClientImpl::handleSubscribe(const Result result, const LookupDataResultPtr partitionMetadata->getPartitions(), subscriptionName, conf, lookupServicePtr_); } else { - auto consumerImpl = std::make_shared(shared_from_this(), topicName->toString(), - subscriptionName, conf); + auto consumerImpl = std::make_shared( + shared_from_this(), topicName->toString(), subscriptionName, conf, topicName->isPersistent()); consumerImpl->setPartitionIndex(topicName->getPartitionIndex()); consumer = consumerImpl; } diff --git a/pulsar-client-cpp/lib/ConsumerConfiguration.cc b/pulsar-client-cpp/lib/ConsumerConfiguration.cc index 2b58835cdbea3..f9fe499b9541e 100644 --- a/pulsar-client-cpp/lib/ConsumerConfiguration.cc +++ b/pulsar-client-cpp/lib/ConsumerConfiguration.cc @@ -260,4 +260,11 @@ bool ConsumerConfiguration::isAutoAckOldestChunkedMessageOnQueueFull() const { return impl_->autoAckOldestChunkedMessageOnQueueFull; } +ConsumerConfiguration& ConsumerConfiguration::setStartMessageIdInclusive(bool startMessageIdInclusive) { + impl_->startMessageIdInclusive = startMessageIdInclusive; + return *this; +} + +bool ConsumerConfiguration::isStartMessageIdInclusive() const { return impl_->startMessageIdInclusive; } + } // namespace pulsar diff --git a/pulsar-client-cpp/lib/ConsumerConfigurationImpl.h b/pulsar-client-cpp/lib/ConsumerConfigurationImpl.h index 1c13f729b55e0..cca83a3882930 100644 --- a/pulsar-client-cpp/lib/ConsumerConfigurationImpl.h +++ b/pulsar-client-cpp/lib/ConsumerConfigurationImpl.h @@ -53,6 +53,7 @@ struct ConsumerConfigurationImpl { KeySharedPolicy keySharedPolicy; size_t maxPendingChunkedMessage{10}; bool autoAckOldestChunkedMessageOnQueueFull{false}; + bool startMessageIdInclusive{false}; }; } // namespace pulsar #endif /* LIB_CONSUMERCONFIGURATIONIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/ConsumerImpl.cc b/pulsar-client-cpp/lib/ConsumerImpl.cc index 79c20d84649b3..37fcd95248af4 100644 --- a/pulsar-client-cpp/lib/ConsumerImpl.cc +++ b/pulsar-client-cpp/lib/ConsumerImpl.cc @@ -38,6 +38,7 @@ DECLARE_LOG_OBJECT() ConsumerImpl::ConsumerImpl(const ClientImplPtr client, const std::string& topic, const std::string& subscriptionName, const ConsumerConfiguration& conf, + bool isPersistent, const ExecutorServicePtr listenerExecutor /* = NULL by default */, bool hasParent /* = false by default */, const ConsumerTopicType consumerTopicType /* = NonPartitioned by default */, @@ -47,6 +48,7 @@ ConsumerImpl::ConsumerImpl(const ClientImplPtr client, const std::string& topic, config_(conf), subscription_(subscriptionName), originalSubscriptionName_(subscriptionName), + isPersistent_(isPersistent), messageListener_(config_.getMessageListener()), eventListener_(config_.getConsumerEventListener()), hasParent_(hasParent), @@ -169,14 +171,17 @@ void ConsumerImpl::connectionOpened(const ClientConnectionPtr& cnx) { // sending the subscribe request. cnx->registerConsumer(consumerId_, shared_from_this()); - Lock lockForMessageId(mutexForMessageId_); - Optional firstMessageInQueue = clearReceiveQueue(); - if (subscriptionMode_ == Commands::SubscriptionModeNonDurable) { - // Update startMessageId so that we can discard messages after delivery - // restarts - startMessageId_ = firstMessageInQueue; + if (duringSeek_) { + ackGroupingTrackerPtr_->flushAndClean(); } - const auto startMessageId = startMessageId_; + + Lock lockForMessageId(mutexForMessageId_); + // Update startMessageId so that we can discard messages after delivery restarts + const auto startMessageId = clearReceiveQueue(); + const auto subscribeMessageId = (subscriptionMode_ == Commands::SubscriptionModeNonDurable) + ? startMessageId + : Optional::empty(); + startMessageId_ = startMessageId; lockForMessageId.unlock(); unAckedMessageTrackerPtr_->clear(); @@ -186,7 +191,7 @@ void ConsumerImpl::connectionOpened(const ClientConnectionPtr& cnx) { uint64_t requestId = client->newRequestId(); SharedBuffer cmd = Commands::newSubscribe( topic_, subscription_, consumerId_, requestId, getSubType(), consumerName_, subscriptionMode_, - startMessageId, readCompacted_, config_.getProperties(), config_.getSubscriptionProperties(), + subscribeMessageId, readCompacted_, config_.getProperties(), config_.getSubscriptionProperties(), config_.getSchema(), getInitialPosition(), config_.isReplicateSubscriptionStateEnabled(), config_.getKeySharedPolicy(), config_.getPriorityLevel()); cnx->sendRequestWithId(cmd, requestId) @@ -397,12 +402,12 @@ void ConsumerImpl::messageReceived(const ClientConnectionPtr& cnx, const proto:: return; } - const bool isMessageDecryptable = - metadata.encryption_keys_size() <= 0 || config_.getCryptoKeyReader().get() || + const bool isMessageUndecryptable = + metadata.encryption_keys_size() > 0 && !config_.getCryptoKeyReader().get() && config_.getCryptoFailureAction() == ConsumerCryptoFailureAction::CONSUME; const bool isChunkedMessage = metadata.num_chunks_from_msg() > 1; - if (isMessageDecryptable && !isChunkedMessage) { + if (!isMessageUndecryptable && !isChunkedMessage) { if (!uncompressMessageIfNeeded(cnx, msg.message_id(), metadata, payload, true)) { // Message was discarded on decompression error return; @@ -446,6 +451,16 @@ void ConsumerImpl::messageReceived(const ClientConnectionPtr& cnx, const proto:: Lock lock(mutex_); numOfMessageReceived = receiveIndividualMessagesFromBatch(cnx, m, msg.redelivery_count()); } else { + const auto startMessageId = startMessageId_.get(); + if (isPersistent_ && startMessageId.is_present() && + m.getMessageId().ledgerId() == startMessageId.value().ledgerId() && + m.getMessageId().entryId() == startMessageId.value().entryId() && + isPriorEntryIndex(m.getMessageId().entryId())) { + LOG_DEBUG(getName() << " Ignoring message from before the startMessageId: " + << startMessageId.value()); + return; + } + Lock lock(pendingReceiveMutex_); // if asyncReceive is waiting then notify callback without adding to incomingMessages queue bool asyncReceivedWaiting = !pendingReceives_.empty(); @@ -533,9 +548,7 @@ uint32_t ConsumerImpl::receiveIndividualMessagesFromBatch(const ClientConnection batchAcknowledgementTracker_.receivedMessage(batchedMessage); LOG_DEBUG("Received Batch messages of size - " << batchSize << " -- msgId: " << batchedMessage.getMessageId()); - Lock lock(mutexForMessageId_); - const auto startMessageId = startMessageId_; - lock.unlock(); + const auto startMessageId = startMessageId_.get(); int skippedMessages = 0; @@ -550,9 +563,9 @@ uint32_t ConsumerImpl::receiveIndividualMessagesFromBatch(const ClientConnection // If we are receiving a batch message, we need to discard messages that were prior // to the startMessageId - if (msgId.ledgerId() == startMessageId.value().ledgerId() && + if (isPersistent_ && msgId.ledgerId() == startMessageId.value().ledgerId() && msgId.entryId() == startMessageId.value().entryId() && - msgId.batchIndex() <= startMessageId.value().batchIndex()) { + isPriorBatchIndex(msgId.batchIndex())) { LOG_DEBUG(getName() << "Ignoring message from before the startMessageId" << msg.getMessageId()); ++skippedMessages; @@ -842,6 +855,12 @@ void ConsumerImpl::messageProcessed(Message& msg, bool track) { * not seen by the application */ Optional ConsumerImpl::clearReceiveQueue() { + bool expectedDuringSeek = true; + if (duringSeek_.compare_exchange_strong(expectedDuringSeek, false)) { + return Optional::of(seekMessageId_.get()); + } else if (subscriptionMode_ == Commands::SubscriptionModeDurable) { + return startMessageId_.get(); + } Message nextMessageInQueue; if (incomingMessages_.peekAndClear(nextMessageInQueue)) { // There was at least one message pending in the queue @@ -862,7 +881,7 @@ Optional ConsumerImpl::clearReceiveQueue() { } else { // No message was received or dequeued by this consumer. Next message would still be the // startMessageId - return startMessageId_; + return startMessageId_.get(); } } @@ -1175,18 +1194,6 @@ void ConsumerImpl::brokerConsumerStatsListener(Result res, BrokerConsumerStatsIm } } -void ConsumerImpl::handleSeek(Result result, ResultCallback callback) { - if (result == ResultOk) { - Lock lock(mutexForMessageId_); - lastDequedMessageId_ = MessageId::earliest(); - lock.unlock(); - LOG_INFO(getName() << "Seek successfully"); - } else { - LOG_ERROR(getName() << "Failed to seek: " << strResult(result)); - } - callback(result); -} - void ConsumerImpl::seekAsync(const MessageId& msgId, ResultCallback callback) { const auto state = state_.load(); if (state == Closed || state == Closing) { @@ -1197,25 +1204,13 @@ void ConsumerImpl::seekAsync(const MessageId& msgId, ResultCallback callback) { return; } - this->ackGroupingTrackerPtr_->flushAndClean(); - ClientConnectionPtr cnx = getCnx().lock(); - if (cnx) { - ClientImplPtr client = client_.lock(); - uint64_t requestId = client->newRequestId(); - LOG_DEBUG(getName() << " Sending seek Command for Consumer - " << getConsumerId() << ", requestId - " - << requestId); - Future future = - cnx->sendRequestWithId(Commands::newSeek(consumerId_, requestId, msgId), requestId); - - if (callback) { - future.addListener( - std::bind(&ConsumerImpl::handleSeek, shared_from_this(), std::placeholders::_1, callback)); - } + ClientImplPtr client = client_.lock(); + if (!client) { + LOG_ERROR(getName() << "Client is expired when seekAsync " << msgId); return; } - - LOG_ERROR(getName() << " Client Connection not ready for Consumer"); - callback(ResultNotConnected); + const auto requestId = client->newRequestId(); + seekAsyncInternal(requestId, Commands::newSeek(consumerId_, requestId, msgId), msgId, 0L, callback); } void ConsumerImpl::seekAsync(uint64_t timestamp, ResultCallback callback) { @@ -1228,24 +1223,14 @@ void ConsumerImpl::seekAsync(uint64_t timestamp, ResultCallback callback) { return; } - ClientConnectionPtr cnx = getCnx().lock(); - if (cnx) { - ClientImplPtr client = client_.lock(); - uint64_t requestId = client->newRequestId(); - LOG_DEBUG(getName() << " Sending seek Command for Consumer - " << getConsumerId() << ", requestId - " - << requestId); - Future future = - cnx->sendRequestWithId(Commands::newSeek(consumerId_, requestId, timestamp), requestId); - - if (callback) { - future.addListener( - std::bind(&ConsumerImpl::handleSeek, shared_from_this(), std::placeholders::_1, callback)); - } + ClientImplPtr client = client_.lock(); + if (!client) { + LOG_ERROR(getName() << "Client is expired when seekAsync " << timestamp); return; } - - LOG_ERROR(getName() << " Client Connection not ready for Consumer"); - callback(ResultNotConnected); + const auto requestId = client->newRequestId(); + seekAsyncInternal(requestId, Commands::newSeek(consumerId_, requestId, timestamp), MessageId::earliest(), + timestamp, callback); } bool ConsumerImpl::isReadCompacted() { return readCompacted_; } @@ -1255,9 +1240,10 @@ inline bool hasMoreMessages(const MessageId& lastMessageIdInBroker, const Messag } void ConsumerImpl::hasMessageAvailableAsync(HasMessageAvailableCallback callback) { + const auto startMessageId = startMessageId_.get(); Lock lock(mutexForMessageId_); const auto messageId = - (lastDequedMessageId_ == MessageId::earliest()) ? startMessageId_.value() : lastDequedMessageId_; + (lastDequedMessageId_ == MessageId::earliest()) ? startMessageId.value() : lastDequedMessageId_; if (messageId == MessageId::latest()) { lock.unlock(); @@ -1380,4 +1366,57 @@ bool ConsumerImpl::isConnected() const { return !getCnx().expired() && state_ == uint64_t ConsumerImpl::getNumberOfConnectedConsumer() { return isConnected() ? 1 : 0; } +void ConsumerImpl::seekAsyncInternal(long requestId, SharedBuffer seek, const MessageId& seekId, + long timestamp, ResultCallback callback) { + ClientConnectionPtr cnx = getCnx().lock(); + if (!cnx) { + LOG_ERROR(getName() << " Client Connection not ready for Consumer"); + callback(ResultNotConnected); + return; + } + + const auto originalSeekMessageId = seekMessageId_.get(); + seekMessageId_ = seekId; + duringSeek_ = true; + if (timestamp > 0) { + LOG_INFO(getName() << " Seeking subscription to " << timestamp); + } else { + LOG_INFO(getName() << " Seeking subscription to " << seekId); + } + + std::weak_ptr weakSelf{shared_from_this()}; + + cnx->sendRequestWithId(seek, requestId) + .addListener([this, weakSelf, callback, originalSeekMessageId](Result result, + const ResponseData& responseData) { + auto self = weakSelf.lock(); + if (!self) { + callback(result); + return; + } + if (result == ResultOk) { + LOG_INFO(getName() << "Seek successfully"); + ackGroupingTrackerPtr_->flushAndClean(); + Lock lock(mutexForMessageId_); + lastDequedMessageId_ = MessageId::earliest(); + lock.unlock(); + } else { + LOG_ERROR(getName() << "Failed to seek: " << result); + seekMessageId_ = originalSeekMessageId; + duringSeek_ = false; + } + callback(result); + }); +} + +bool ConsumerImpl::isPriorBatchIndex(int32_t idx) { + return config_.isStartMessageIdInclusive() ? idx < startMessageId_.get().value().batchIndex() + : idx <= startMessageId_.get().value().batchIndex(); +} + +bool ConsumerImpl::isPriorEntryIndex(int64_t idx) { + return config_.isStartMessageIdInclusive() ? idx < startMessageId_.get().value().entryId() + : idx <= startMessageId_.get().value().entryId(); +} + } /* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/ConsumerImpl.h b/pulsar-client-cpp/lib/ConsumerImpl.h index 70fda0170cc1a..1ad3a4c372764 100644 --- a/pulsar-client-cpp/lib/ConsumerImpl.h +++ b/pulsar-client-cpp/lib/ConsumerImpl.h @@ -46,6 +46,7 @@ #include #include #include +#include "Synchronized.h" using namespace pulsar; @@ -69,7 +70,7 @@ class ConsumerImpl : public ConsumerImplBase, public std::enable_shared_from_this { public: ConsumerImpl(const ClientImplPtr client, const std::string& topic, const std::string& subscriptionName, - const ConsumerConfiguration&, + const ConsumerConfiguration&, bool isPersistent, const ExecutorServicePtr listenerExecutor = ExecutorServicePtr(), bool hasParent = false, const ConsumerTopicType consumerTopicType = NonPartitioned, Commands::SubscriptionMode = Commands::SubscriptionModeDurable, @@ -138,7 +139,6 @@ class ConsumerImpl : public ConsumerImplBase, virtual void redeliverMessages(const std::set& messageIds); - void handleSeek(Result result, ResultCallback callback); virtual bool isReadCompacted(); virtual void hasMessageAvailableAsync(HasMessageAvailableCallback callback); virtual void getLastMessageIdAsync(BrokerGetLastMessageIdCallback callback); @@ -169,6 +169,8 @@ class ConsumerImpl : public ConsumerImplBase, void drainIncomingMessageQueue(size_t count); uint32_t receiveIndividualMessagesFromBatch(const ClientConnectionPtr& cnx, Message& batchedMessage, int redeliveryCount); + bool isPriorBatchIndex(int32_t idx); + bool isPriorEntryIndex(int64_t idx); void brokerConsumerStatsListener(Result, BrokerConsumerStatsImpl, BrokerConsumerStatsCallback); bool decryptMessageIfNeeded(const ClientConnectionPtr& cnx, const proto::CommandMessage& msg, @@ -187,11 +189,14 @@ class ConsumerImpl : public ConsumerImplBase, BrokerGetLastMessageIdCallback callback); Optional clearReceiveQueue(); + void seekAsyncInternal(long requestId, SharedBuffer seek, const MessageId& seekId, long timestamp, + ResultCallback callback); std::mutex mutexForReceiveWithZeroQueueSize; const ConsumerConfiguration config_; const std::string subscription_; std::string originalSubscriptionName_; + const bool isPersistent_; MessageListener messageListener_; ConsumerEventListenerPtr eventListener_; ExecutorServicePtr listenerExecutor_; @@ -220,12 +225,15 @@ class ConsumerImpl : public ConsumerImplBase, MessageCryptoPtr msgCrypto_; const bool readCompacted_; - // Make the access to `startMessageId_`, `lastDequedMessageId_` and `lastMessageIdInBroker_` thread safe + // Make the access to `lastDequedMessageId_` and `lastMessageIdInBroker_` thread safe mutable std::mutex mutexForMessageId_; - Optional startMessageId_; MessageId lastDequedMessageId_{MessageId::earliest()}; MessageId lastMessageIdInBroker_{MessageId::earliest()}; + std::atomic_bool duringSeek_{false}; + Synchronized> startMessageId_{Optional::empty()}; + Synchronized seekMessageId_{MessageId::earliest()}; + class ChunkedMessageCtx { public: ChunkedMessageCtx() : totalChunks_(0) {} diff --git a/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc b/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc index 7515076234556..0d730e1561f72 100644 --- a/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc +++ b/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc @@ -183,7 +183,8 @@ void MultiTopicsConsumerImpl::subscribeTopicPartitions(int numPartitions, TopicN if (numPartitions == 0) { // We don't have to add partition-n suffix consumer = std::make_shared(client_, topicName->toString(), subscriptionName_, config, - internalListenerExecutor, true, NonPartitioned); + topicName->isPersistent(), internalListenerExecutor, true, + NonPartitioned); consumer->getConsumerCreatedFuture().addListener(std::bind( &MultiTopicsConsumerImpl::handleSingleConsumerCreated, shared_from_this(), std::placeholders::_1, std::placeholders::_2, partitionsNeedCreate, topicSubResultPromise)); @@ -195,7 +196,8 @@ void MultiTopicsConsumerImpl::subscribeTopicPartitions(int numPartitions, TopicN for (int i = 0; i < numPartitions; i++) { std::string topicPartitionName = topicName->getTopicPartitionName(i); consumer = std::make_shared(client_, topicPartitionName, subscriptionName_, config, - internalListenerExecutor, true, Partitioned); + topicName->isPersistent(), internalListenerExecutor, + true, Partitioned); consumer->getConsumerCreatedFuture().addListener(std::bind( &MultiTopicsConsumerImpl::handleSingleConsumerCreated, shared_from_this(), std::placeholders::_1, std::placeholders::_2, partitionsNeedCreate, topicSubResultPromise)); @@ -819,7 +821,8 @@ void MultiTopicsConsumerImpl::subscribeSingleNewConsumer( std::string topicPartitionName = topicName->getTopicPartitionName(partitionIndex); auto consumer = std::make_shared(client_, topicPartitionName, subscriptionName_, config, - internalListenerExecutor, true, Partitioned); + topicName->isPersistent(), internalListenerExecutor, true, + Partitioned); consumer->getConsumerCreatedFuture().addListener( std::bind(&MultiTopicsConsumerImpl::handleSingleConsumerCreated, shared_from_this(), std::placeholders::_1, std::placeholders::_2, partitionsNeedCreate, topicSubResultPromise)); diff --git a/pulsar-client-cpp/lib/ReaderImpl.cc b/pulsar-client-cpp/lib/ReaderImpl.cc index 5f78068228ff4..83fa6a57009d7 100644 --- a/pulsar-client-cpp/lib/ReaderImpl.cc +++ b/pulsar-client-cpp/lib/ReaderImpl.cc @@ -76,9 +76,10 @@ void ReaderImpl::start(const MessageId& startMessageId, test::consumerConfigOfReader = consumerConf.clone(); } - consumer_ = std::make_shared( - client_.lock(), topic_, subscription, consumerConf, ExecutorServicePtr(), false, NonPartitioned, - Commands::SubscriptionModeNonDurable, Optional::of(startMessageId)); + consumer_ = std::make_shared(client_.lock(), topic_, subscription, consumerConf, + TopicName::get(topic_)->isPersistent(), ExecutorServicePtr(), + false, NonPartitioned, Commands::SubscriptionModeNonDurable, + Optional::of(startMessageId)); consumer_->setPartitionIndex(TopicName::getPartitionIndex(topic_)); auto self = shared_from_this(); consumer_->getConsumerCreatedFuture().addListener( diff --git a/pulsar-client-cpp/lib/Synchronized.h b/pulsar-client-cpp/lib/Synchronized.h new file mode 100644 index 0000000000000..a98c08daeee3a --- /dev/null +++ b/pulsar-client-cpp/lib/Synchronized.h @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#pragma once + +#include + +template +class Synchronized { + public: + explicit Synchronized(const T& value) : value_(value) {} + + T get() const { + std::lock_guard lock(mutex_); + return value_; + } + + Synchronized& operator=(const T& value) { + std::lock_guard lock(mutex_); + value_ = value; + return *this; + } + + private: + T value_; + mutable std::mutex mutex_; +}; diff --git a/pulsar-client-cpp/python/pulsar_test.py b/pulsar-client-cpp/python/pulsar_test.py index 5f46edc4365e6..375afe43adb61 100755 --- a/pulsar-client-cpp/python/pulsar_test.py +++ b/pulsar-client-cpp/python/pulsar_test.py @@ -546,7 +546,7 @@ def test_reader_on_specific_message(self): # The reset would be effectively done on the next position relative to reset. # When available, we should test this behaviour with `startMessageIdInclusive` opt. from_msg_idx = last_msg_idx - for i in range(from_msg_idx, num_of_msgs): + for i in range(from_msg_idx + 1, num_of_msgs): msg = reader2.read_next(TM) self.assertTrue(msg) self.assertEqual(msg.data(), b"hello-%d" % i) @@ -896,7 +896,7 @@ def test_seek(self): consumer.seek(ids[50]) time.sleep(0.5) msg = consumer.receive(TM) - self.assertEqual(msg.data(), b"hello-50") + self.assertEqual(msg.data(), b"hello-51") # ditto, but seek on timestamp consumer.seek(timestamps[42]) @@ -921,9 +921,9 @@ def test_seek(self): reader.seek(ids[33]) time.sleep(0.5) msg = reader.read_next(TM) - self.assertEqual(msg.data(), b"hello-33") - msg = reader.read_next(TM) self.assertEqual(msg.data(), b"hello-34") + msg = reader.read_next(TM) + self.assertEqual(msg.data(), b"hello-35") # seek on timestamp reader.seek(timestamps[79]) diff --git a/pulsar-client-cpp/tests/ConsumerTest.cc b/pulsar-client-cpp/tests/ConsumerTest.cc index cf9ac23d19054..c8a07e6c84b47 100644 --- a/pulsar-client-cpp/tests/ConsumerTest.cc +++ b/pulsar-client-cpp/tests/ConsumerTest.cc @@ -868,4 +868,67 @@ TEST(ConsumerTest, testGetLastMessageIdBlockWhenConnectionDisconnected) { ASSERT_GE(elapsed.seconds(), operationTimeout); } +class ConsumerSeekTest : public ::testing::TestWithParam { + public: + void SetUp() override { producerConf_ = ProducerConfiguration().setBatchingEnabled(GetParam()); } + + void TearDown() override { client_.close(); } + + protected: + Client client_{lookupUrl}; + ProducerConfiguration producerConf_; +}; + +TEST_P(ConsumerSeekTest, testSeekForMessageId) { + Client client(lookupUrl); + + const std::string topic = "test-seek-for-message-id-" + std::string((GetParam() ? "batch-" : "")) + + std::to_string(time(nullptr)); + + Producer producer; + ASSERT_EQ(ResultOk, client.createProducer(topic, producerConf_, producer)); + + const auto numMessages = 100; + MessageId seekMessageId; + + int r = (rand() % (numMessages - 1)); + for (int i = 0; i < numMessages; i++) { + MessageId id; + ASSERT_EQ(ResultOk, + producer.send(MessageBuilder().setContent("msg-" + std::to_string(i)).build(), id)); + + if (i == r) { + seekMessageId = id; + } + } + + LOG_INFO("The seekMessageId is: " << seekMessageId << ", r : " << r); + + Consumer consumerExclusive; + ASSERT_EQ(ResultOk, client.subscribe(topic, "sub-0", consumerExclusive)); + consumerExclusive.seek(seekMessageId); + Message msg0; + ASSERT_EQ(ResultOk, consumerExclusive.receive(msg0, 3000)); + + Consumer consumerInclusive; + ASSERT_EQ(ResultOk, + client.subscribe(topic, "sub-1", ConsumerConfiguration().setStartMessageIdInclusive(true), + consumerInclusive)); + consumerInclusive.seek(seekMessageId); + Message msg1; + ASSERT_EQ(ResultOk, consumerInclusive.receive(msg1, 3000)); + + LOG_INFO("consumerExclusive received " << msg0.getDataAsString() << " from " << msg0.getMessageId()); + LOG_INFO("consumerInclusive received " << msg1.getDataAsString() << " from " << msg1.getMessageId()); + + ASSERT_EQ(msg0.getDataAsString(), "msg-" + std::to_string(r + 1)); + ASSERT_EQ(msg1.getDataAsString(), "msg-" + std::to_string(r)); + + consumerInclusive.close(); + consumerExclusive.close(); + producer.close(); +} + +INSTANTIATE_TEST_CASE_P(Pulsar, ConsumerSeekTest, ::testing::Values(true, false)); + } // namespace pulsar From c1ce3dd7d49faff354cf73e2572d52175086f6bd Mon Sep 17 00:00:00 2001 From: tison Date: Wed, 28 Sep 2022 12:16:30 +0800 Subject: [PATCH 12/59] fix: SqliteJdbcSinkTest close in order (#17849) * fix: delete sqlite files after jdbc connection closed This closes #17713. Signed-off-by: tison * uses isolated db file Signed-off-by: tison * Revert "uses isolated db file" This reverts commit 295db3cf9457b537f295e74f091cd7dde0c478de. * close in order Signed-off-by: tison * strong order guarantee Signed-off-by: tison * factor out defer logic to avoid further bugs Signed-off-by: tison * Revert "factor out defer logic to avoid further bugs" This reverts commit f7f4634f37da783e90c64d94a2fb0b5cb947ef33. * Revert "strong order guarantee" This reverts commit 747086f3c7a4608b764fe6023ebaceab5b1227d1. * use awaitTermination Signed-off-by: tison Signed-off-by: tison --- .../apache/pulsar/io/jdbc/JdbcAbstractSink.java | 14 ++++++++------ .../apache/pulsar/io/jdbc/SqliteJdbcSinkTest.java | 6 ++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java index dbf27407ca14b..1d12909d5e251 100644 --- a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java +++ b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java @@ -137,8 +137,11 @@ private void initStatement() throws Exception { @Override public void close() throws Exception { - if (connection != null && jdbcSinkConfig.isUseTransactions()) { - connection.commit(); + if (flushExecutor != null) { + int timeoutMs = jdbcSinkConfig.getTimeoutMs() * 2; + flushExecutor.shutdown(); + flushExecutor.awaitTermination(timeoutMs, TimeUnit.MILLISECONDS); + flushExecutor = null; } if (insertStatement != null) { insertStatement.close(); @@ -152,9 +155,8 @@ public void close() throws Exception { if (deleteStatement != null) { deleteStatement.close(); } - if (flushExecutor != null) { - flushExecutor.shutdown(); - flushExecutor = null; + if (connection != null && jdbcSinkConfig.isUseTransactions()) { + connection.commit(); } if (connection != null) { connection.close(); @@ -267,7 +269,7 @@ private void flush() { } swapList.forEach(Record::ack); } catch (Exception e) { - log.error("Got exception ", e.getMessage(), e); + log.error("Got exception {}", e.getMessage(), e); swapList.forEach(Record::fail); try { if (jdbcSinkConfig.isUseTransactions()) { diff --git a/pulsar-io/jdbc/sqlite/src/test/java/org/apache/pulsar/io/jdbc/SqliteJdbcSinkTest.java b/pulsar-io/jdbc/sqlite/src/test/java/org/apache/pulsar/io/jdbc/SqliteJdbcSinkTest.java index cb1ca84424931..030a9b4187bea 100644 --- a/pulsar-io/jdbc/sqlite/src/test/java/org/apache/pulsar/io/jdbc/SqliteJdbcSinkTest.java +++ b/pulsar-io/jdbc/sqlite/src/test/java/org/apache/pulsar/io/jdbc/SqliteJdbcSinkTest.java @@ -110,16 +110,14 @@ public void setUp() throws Exception { jdbcSink = new SqliteJdbcAutoSchemaSink(); - // open should success + // open should succeed jdbcSink.open(conf, null); - - } @AfterMethod(alwaysRun = true) public void tearDown() throws Exception { - sqliteUtils.tearDown(); jdbcSink.close(); + sqliteUtils.tearDown(); } private void testOpenAndWriteSinkNullValue(Map actionProperties) throws Exception { From 5f59f8b7f15ba134b01c66dc925fd66fde91c89d Mon Sep 17 00:00:00 2001 From: fengyubiao Date: Wed, 28 Sep 2022 12:19:47 +0800 Subject: [PATCH 13/59] [fix][flaky-test]Delete PersistentSubscriptionTest.testCanAcknowledgeAndCommitForTransaction (#17845) * scenario is already covered by PendingAckPersistentTest --- .../PersistentSubscriptionTest.java | 45 ------------------- 1 file changed, 45 deletions(-) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java index 70bdf5e2cd100..72e6caf070b1c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java @@ -36,7 +36,6 @@ import java.lang.reflect.Field; import java.time.Duration; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Optional; @@ -222,50 +221,6 @@ public void teardown() throws Exception { store.close(); } - @Test - public void testCanAcknowledgeAndCommitForTransaction() throws ExecutionException, InterruptedException { - doAnswer((invocationOnMock) -> { - ((AsyncCallbacks.DeleteCallback) invocationOnMock.getArguments()[1]) - .deleteComplete(invocationOnMock.getArguments()[2]); - return null; - }).when(cursorMock).asyncDelete(any(List.class), any(AsyncCallbacks.DeleteCallback.class), any()); - - List> positionsPair = new ArrayList<>(); - positionsPair.add(new MutablePair<>(new PositionImpl(1, 1), 0)); - positionsPair.add(new MutablePair<>(new PositionImpl(1, 3), 0)); - positionsPair.add(new MutablePair<>(new PositionImpl(1, 5), 0)); - - doAnswer((invocationOnMock) -> { - assertTrue(Arrays.deepEquals(((List)invocationOnMock.getArguments()[0]).toArray(), - positionsPair.toArray())); - ((AsyncCallbacks.MarkDeleteCallback) invocationOnMock.getArguments()[2]) - .markDeleteComplete(invocationOnMock.getArguments()[3]); - return null; - }).when(cursorMock).asyncMarkDelete(any(), any(), any(AsyncCallbacks.MarkDeleteCallback.class), any()); - - // Single ack for txn - persistentSubscription.transactionIndividualAcknowledge(txnID1, positionsPair); - - // Commit txn - persistentSubscription.endTxn(txnID1.getMostSigBits(), txnID1.getLeastSigBits(), TxnAction.COMMIT_VALUE, -1).get(); - - List positions = new ArrayList<>(); - positions.add(new PositionImpl(3, 100)); - - // Cumulative ack for txn - persistentSubscription.transactionCumulativeAcknowledge(txnID1, positions); - - doAnswer((invocationOnMock) -> { - assertEquals(((PositionImpl) invocationOnMock.getArguments()[0]).compareTo(new PositionImpl(3, 100)), 0); - ((AsyncCallbacks.MarkDeleteCallback) invocationOnMock.getArguments()[2]) - .markDeleteComplete(invocationOnMock.getArguments()[3]); - return null; - }).when(cursorMock).asyncMarkDelete(any(), any(), any(AsyncCallbacks.MarkDeleteCallback.class), any()); - - // Commit txn - persistentSubscription.endTxn(txnID1.getMostSigBits(), txnID1.getLeastSigBits(), TxnAction.COMMIT_VALUE, -1).get(); - } - @Test public void testCanAcknowledgeAndAbortForTransaction() throws Exception { List> positionsPair = new ArrayList<>(); From 8aef1bfcea539470cff470e58f8f27d0f74c0b43 Mon Sep 17 00:00:00 2001 From: youzipi Date: Wed, 28 Sep 2022 16:12:43 +0800 Subject: [PATCH 14/59] [cleanup][broker][Modernizer] fix violations in pulsar-broker (#17691) Co-authored-by: Marvin Cai --- .../bookkeeper/mledger/ManagedCursor.java | 2 +- .../bookkeeper/mledger/ReadOnlyCursor.java | 2 +- .../mledger/impl/ManagedCursorImpl.java | 2 +- .../bookkeeper/mledger/impl/OpScan.java | 4 +- .../impl/ManagedCursorContainerTest.java | 2 +- pulsar-broker/pom.xml | 4 - .../pulsar/broker/admin/AdminResource.java | 3 +- .../broker/admin/impl/NamespacesBase.java | 25 ++-- .../admin/impl/PersistentTopicsBase.java | 52 +++---- .../admin/impl/SchemasResourceBase.java | 13 +- .../pulsar/broker/admin/impl/TenantsBase.java | 3 +- .../broker/admin/impl/TransactionsBase.java | 5 +- .../pulsar/broker/admin/v1/Namespaces.java | 4 +- .../broker/admin/v1/NonPersistentTopics.java | 8 +- .../pulsar/broker/admin/v2/Namespaces.java | 3 +- .../broker/admin/v2/NonPersistentTopics.java | 10 +- .../loadbalance/impl/LoadManagerShared.java | 4 +- .../namespace/NamespaceEphemeralData.java | 8 +- .../broker/namespace/NamespaceService.java | 26 ++-- .../broker/namespace/OwnershipCache.java | 4 +- .../pulsar/broker/service/AbstractTopic.java | 8 +- .../broker/service/BacklogQuotaManager.java | 4 +- .../pulsar/broker/service/BrokerService.java | 25 ++-- .../pulsar/broker/service/Consumer.java | 3 +- .../broker/service/EntryFilterSupport.java | 20 +-- .../pulsar/broker/service/Producer.java | 3 +- .../pulsar/broker/service/PulsarStats.java | 12 +- .../SystemTopicBasedTopicPoliciesService.java | 4 +- .../apache/pulsar/broker/service/Topic.java | 3 +- .../nonpersistent/NonPersistentTopic.java | 15 +- ...sistentDispatcherSingleActiveConsumer.java | 4 +- .../persistent/PersistentSubscription.java | 140 +++++++++--------- .../service/persistent/PersistentTopic.java | 27 ++-- .../schema/BookkeeperSchemaStorage.java | 14 +- .../service/schema/SchemaRegistryService.java | 4 +- .../broker/stats/AllocatorStatsGenerator.java | 4 +- .../stats/BookieClientStatsGenerator.java | 7 +- .../stats/BrokerOperabilityMetrics.java | 4 +- .../broker/stats/MBeanStatsGenerator.java | 4 +- .../pulsar/broker/stats/MetricsGenerator.java | 4 +- .../pulsar/broker/stats/NamespaceStats.java | 4 +- .../broker/stats/ReplicationMetrics.java | 4 +- .../broker/stats/metrics/AbstractMetrics.java | 13 +- .../stats/metrics/ManagedCursorMetrics.java | 8 +- .../metrics/ManagedLedgerCacheMetrics.java | 4 +- .../stats/metrics/ManagedLedgerMetrics.java | 10 +- .../systopic/SystemTopicClientBase.java | 5 +- .../pulsar/broker/admin/AdminApi2Test.java | 33 +++-- .../pulsar/broker/admin/AdminApiTest.java | 84 +++++------ .../apache/pulsar/broker/admin/AdminTest.java | 7 +- .../admin/AnalyzeBacklogSubscriptionTest.java | 23 ++- .../broker/admin/CreateSubscriptionTest.java | 16 +- .../broker/admin/IncrementPartitionsTest.java | 4 +- .../broker/admin/MaxUnackedMessagesTest.java | 3 +- .../pulsar/broker/admin/NamespacesTest.java | 56 +++---- .../pulsar/broker/admin/NamespacesV2Test.java | 4 +- .../broker/admin/PersistentTopicsTest.java | 13 +- .../broker/admin/TopicPoliciesTest.java | 13 +- .../broker/admin/v1/V1_AdminApi2Test.java | 28 ++-- .../broker/admin/v1/V1_AdminApiTest.java | 66 ++++----- .../pulsar/broker/auth/AuthLogsTest.java | 18 +-- .../auth/AuthenticationServiceTest.java | 6 +- .../pulsar/broker/auth/AuthorizationTest.java | 4 +- .../auth/MockAuthenticationProvider.java | 3 - .../loadbalance/impl/OverloadShedderTest.java | 12 +- .../impl/ThresholdShedderTest.java | 10 +- .../namespace/NamespaceServiceTest.java | 30 ++-- .../broker/namespace/OwnershipCacheTest.java | 8 +- .../service/AbstractBaseDispatcherTest.java | 4 +- .../service/BacklogQuotaManagerTest.java | 40 ++--- .../broker/service/BatchMessageTest.java | 36 ++--- .../BatchMessageWithBatchIndexLevelTest.java | 17 +-- .../service/BrokerBookieIsolationTest.java | 11 +- .../broker/service/BrokerServiceTest.java | 5 +- .../service/BrokerServiceThrottlingTest.java | 4 +- .../service/DistributedIdGeneratorTest.java | 4 +- ...xclusiveStickyKeyConsumerSelectorTest.java | 8 +- .../pulsar/broker/service/MessageTTLTest.java | 5 +- .../broker/service/PeerReplicatorTest.java | 17 +-- .../service/PersistentFailoverE2ETest.java | 19 +-- .../service/PersistentQueueE2ETest.java | 19 +-- .../PersistentTopicConcurrentTest.java | 4 +- .../broker/service/PersistentTopicTest.java | 10 +- .../pulsar/broker/service/RackAwareTest.java | 7 +- .../pulsar/broker/service/ReplicatorTest.java | 13 +- .../broker/service/ReplicatorTlsTest.java | 5 +- .../pulsar/broker/service/ServerCnxTest.java | 54 +++---- .../broker/service/SubscriptionSeekTest.java | 5 +- ...temTopicBasedTopicPoliciesServiceTest.java | 5 +- .../MessageRedeliveryControllerTest.java | 20 ++- .../service/plugin/FilterEntryTest.java | 15 +- .../KeyValueSchemaCompatibilityCheckTest.java | 73 +++++---- .../StreamingEntryReaderTests.java | 44 +++--- .../broker/stats/ConsumerStatsTest.java | 3 +- .../broker/stats/SubscriptionStatsTest.java | 16 +- .../TransactionCoordinatorClientTest.java | 4 +- .../broker/web/ExceptionHandlerTest.java | 8 +- .../broker/web/PulsarWebResourceTest.java | 5 +- .../pulsar/broker/web/RestExceptionTest.java | 1 - .../pulsar/sql/presto/PulsarSplitManager.java | 29 ++-- .../sql/presto/TestPulsarConnector.java | 59 ++++---- 101 files changed, 740 insertions(+), 811 deletions(-) diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java index 17dbac09a2292..cf022d3552e64 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java @@ -18,13 +18,13 @@ */ package org.apache.bookkeeper.mledger; -import com.google.common.base.Predicate; import com.google.common.collect.Range; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.function.Predicate; import org.apache.bookkeeper.common.annotation.InterfaceAudience; import org.apache.bookkeeper.common.annotation.InterfaceStability; import org.apache.bookkeeper.mledger.AsyncCallbacks.ClearBacklogCallback; diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ReadOnlyCursor.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ReadOnlyCursor.java index 28454d647b008..fe086f009808e 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ReadOnlyCursor.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ReadOnlyCursor.java @@ -18,9 +18,9 @@ */ package org.apache.bookkeeper.mledger; -import com.google.common.base.Predicate; import com.google.common.collect.Range; import java.util.List; +import java.util.function.Predicate; import org.apache.bookkeeper.common.annotation.InterfaceAudience; import org.apache.bookkeeper.common.annotation.InterfaceStability; import org.apache.bookkeeper.mledger.AsyncCallbacks.ReadEntriesCallback; diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java index 6d595e76dc127..fe1798a0d7eff 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java @@ -28,7 +28,6 @@ import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; -import com.google.common.base.Predicate; import com.google.common.collect.Collections2; import com.google.common.collect.Lists; import com.google.common.collect.Range; @@ -60,6 +59,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; +import java.util.function.Predicate; import org.apache.bookkeeper.client.AsyncCallback.CloseCallback; import org.apache.bookkeeper.client.AsyncCallback.OpenCallback; import org.apache.bookkeeper.client.BKException; diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpScan.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpScan.java index e65e418a5ec82..9e56711b38667 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpScan.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpScan.java @@ -18,11 +18,11 @@ */ package org.apache.bookkeeper.mledger.impl; -import com.google.common.base.Predicate; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Predicate; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.AsyncCallbacks.ReadEntriesCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.ScanCallback; @@ -81,7 +81,7 @@ public void readEntriesComplete(List entries, Object ctx) { callback.scanComplete(lastSeenPosition, ScanOutcome.ABORTED, OpScan.this.ctx); return; } - if (!condition.apply(entry)) { + if (!condition.test(entry)) { log.warn("[{}] Scan abort due to user code", OpScan.this.cursor); callback.scanComplete(lastSeenPosition, ScanOutcome.USER_INTERRUPTED, OpScan.this.ctx); return; diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java index 312e09f846eb1..dfb65d59c78bd 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java @@ -25,7 +25,6 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.google.common.base.Predicate; import com.google.common.collect.Lists; import com.google.common.collect.Range; import com.google.common.collect.Sets; @@ -34,6 +33,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Predicate; import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.ClearBacklogCallback; diff --git a/pulsar-broker/pom.xml b/pulsar-broker/pom.xml index df04da60bc615..ca36f9ca3710d 100644 --- a/pulsar-broker/pom.xml +++ b/pulsar-broker/pom.xml @@ -437,10 +437,6 @@ true 17 - org.apache.pulsar.broker.admin - org.apache.pulsar.broker.namespace - org.apache.pulsar.broker.service - org.apache.pulsar.broker.stats org.apache.pulsar.client diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java index 47c69ecc69846..bab7ed0542402 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java @@ -23,6 +23,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -802,7 +803,7 @@ protected CompletableFuture getSchemaCompatibilityS @CanIgnoreReturnValue public static T checkNotNull(T reference) { - return com.google.common.base.Preconditions.checkNotNull(reference); + return Objects.requireNonNull(reference); } protected void checkNotNull(Object o, String errorMessage) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java index 93ddbf794c0d2..01532449687ee 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java @@ -22,7 +22,6 @@ import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.pulsar.common.policies.data.PoliciesUtil.defaultBundle; import static org.apache.pulsar.common.policies.data.PoliciesUtil.getBundles; -import com.google.common.collect.Lists; import com.google.common.collect.Sets; import java.lang.reflect.Field; import java.net.MalformedURLException; @@ -176,7 +175,7 @@ protected CompletableFuture> internalGetListOfTopics(Policies polic } protected CompletableFuture> internalGetNonPersistentTopics(Policies policies) { - final List>> futures = Lists.newArrayList(); + final List>> futures = new ArrayList<>(); final List boundaries = policies.bundles.getBoundaries(); for (int i = 0; i < boundaries.size() - 1; i++) { final String bundle = String.format("%s_%s", boundaries.get(i), boundaries.get(i + 1)); @@ -189,7 +188,7 @@ protected CompletableFuture> internalGetNonPersistentTopics(Policie } return FutureUtil.waitForAll(futures) .thenApply(__ -> { - final List topics = Lists.newArrayList(); + final List topics = new ArrayList<>(); for (int i = 0; i < futures.size(); i++) { List topicList = futures.get(i).join(); if (topicList != null) { @@ -341,7 +340,7 @@ private CompletableFuture precheckWhenDeleteNamespace(NamespaceName ns if (replicationClusters.size() == 1 && !policies.replication_clusters.contains(config().getClusterName())) { // the only replication cluster is other cluster, redirect - String replCluster = Lists.newArrayList(policies.replication_clusters).get(0); + String replCluster = new ArrayList<>(policies.replication_clusters).get(0); return clusterResources().getClusterAsync(replCluster) .thenCompose(replClusterDataOpt -> { ClusterData replClusterData = replClusterDataOpt @@ -423,7 +422,7 @@ protected CompletableFuture internalDeleteNamespaceBundleAsync(String bund if (policies.replication_clusters.size() == 1 && !policies.replication_clusters.contains(config().getClusterName())) { // the only replication cluster is other cluster, redirect - String replCluster = Lists.newArrayList(policies.replication_clusters).get(0); + String replCluster = new ArrayList<>(policies.replication_clusters).get(0); future = clusterResources().getClusterAsync(replCluster) .thenCompose(clusterData -> { if (clusterData.isEmpty()) { @@ -768,7 +767,7 @@ protected void internalUnloadNamespace(AsyncResponse asyncResponse) { Policies policies = getNamespacePolicies(namespaceName); - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); List boundaries = policies.bundles.getBoundaries(); for (int i = 0; i < boundaries.size() - 1; i++) { String bundle = String.format("%s_%s", boundaries.get(i), boundaries.get(i + 1)); @@ -814,7 +813,7 @@ protected CompletableFuture internalUnloadNamespaceAsync() { }) .thenCompose(__ -> getNamespacePoliciesAsync(namespaceName)) .thenCompose(policies -> { - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); List boundaries = policies.bundles.getBoundaries(); for (int i = 0; i < boundaries.size() - 1; i++) { String bundle = String.format("%s_%s", boundaries.get(i), boundaries.get(i + 1)); @@ -1342,7 +1341,7 @@ private CompletableFuture doUpdatePersistenceAsync(PersistencePolicies per protected void internalClearNamespaceBacklog(AsyncResponse asyncResponse, boolean authoritative) { validateNamespaceOperation(namespaceName, NamespaceOperation.CLEAR_BACKLOG); - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); try { NamespaceBundles bundles = pulsar().getNamespaceService().getNamespaceBundleFactory() .getBundles(namespaceName); @@ -1407,7 +1406,7 @@ protected void internalClearNamespaceBacklogForSubscription(AsyncResponse asyncR validateNamespaceOperation(namespaceName, NamespaceOperation.CLEAR_BACKLOG); checkNotNull(subscription, "Subscription should not be null"); - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); try { NamespaceBundles bundles = pulsar().getNamespaceService().getNamespaceBundleFactory() .getBundles(namespaceName); @@ -1474,7 +1473,7 @@ protected void internalUnsubscribeNamespace(AsyncResponse asyncResponse, String validateNamespaceOperation(namespaceName, NamespaceOperation.UNSUBSCRIBE); checkNotNull(subscription, "Subscription should not be null"); - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); try { NamespaceBundles bundles = pulsar().getNamespaceService().getNamespaceBundleFactory() .getBundles(namespaceName); @@ -1721,7 +1720,7 @@ private void clearBacklog(NamespaceName nsName, String bundleRange, String subsc List topicList = pulsar().getBrokerService().getAllTopicsFromNamespaceBundle(nsName.toString(), nsName.toString() + "/" + bundleRange); - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); if (subscription != null) { if (subscription.startsWith(pulsar().getConfiguration().getReplicatorPrefix())) { subscription = PersistentReplicator.getRemoteCluster(subscription); @@ -1753,7 +1752,7 @@ private void unsubscribe(NamespaceName nsName, String bundleRange, String subscr try { List topicList = pulsar().getBrokerService().getAllTopicsFromNamespaceBundle(nsName.toString(), nsName.toString() + "/" + bundleRange); - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); if (subscription.startsWith(pulsar().getConfiguration().getReplicatorPrefix())) { throw new RestException(Status.PRECONDITION_FAILED, "Cannot unsubscribe a replication cursor"); } else { @@ -1794,7 +1793,7 @@ protected BundlesData validateBundlesData(BundlesData initialBundles) { throw new RestException(Status.BAD_REQUEST, "Input bundles do not cover the whole hash range. first:" + partitions.first() + ", last:" + partitions.last()); } - List bundles = Lists.newArrayList(); + List bundles = new ArrayList<>(); bundles.addAll(partitions); return BundlesData.builder() .boundaries(bundles) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java index 04a4ee7a23e4e..86b242c949760 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java @@ -22,8 +22,6 @@ import static org.apache.pulsar.common.naming.SystemTopicNames.isTransactionInternalName; import com.fasterxml.jackson.core.JsonProcessingException; import com.github.zafarkhaja.semver.Version; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import com.google.common.collect.Sets; import io.netty.buffer.ByteBuf; import java.io.IOException; @@ -251,7 +249,7 @@ protected CompletableFuture>> internalGetPermissions throw new RestException(Status.NOT_FOUND, "Namespace does not exist"); } - Map> permissions = Maps.newHashMap(); + Map> permissions = new HashMap<>(); String topicUri = topicName.toString(); AuthPolicies auth = policies.get().auth_policies; // First add namespace level permissions @@ -871,8 +869,7 @@ protected void internalUnloadTopic(AsyncResponse asyncResponse, boolean authorit getPartitionedTopicMetadataAsync(topicName, authoritative, false) .thenAccept(meta -> { if (meta.partitions > 0) { - final List> futures = - Lists.newArrayListWithCapacity(meta.partitions); + final List> futures = new ArrayList<>(meta.partitions); for (int i = 0; i < meta.partitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); try { @@ -1145,7 +1142,7 @@ protected void internalGetSubscriptions(AsyncResponse asyncResponse, boolean aut final Set subscriptions = Collections.newSetFromMap( new ConcurrentHashMap<>(partitionMetadata.partitions)); - final List> subscriptionFutures = Lists.newArrayList(); + final List> subscriptionFutures = new ArrayList<>(); if (topicName.getDomain() == TopicDomain.persistent) { final Map> existsFutures = new ConcurrentHashMap<>(partitionMetadata.partitions); @@ -1153,7 +1150,7 @@ protected void internalGetSubscriptions(AsyncResponse asyncResponse, boolean aut existsFutures.put(i, topicResources().persistentTopicExists(topicName.getPartition(i))); } - FutureUtil.waitForAll(Lists.newArrayList(existsFutures.values())) + FutureUtil.waitForAll(new ArrayList<>(existsFutures.values())) .thenApply(unused2 -> existsFutures.entrySet().stream().filter(e -> e.getValue().join()) .map(item -> topicName.getPartition(item.getKey()).toString()) @@ -1243,7 +1240,7 @@ private void resumeAsyncResponse(AsyncResponse asyncResponse, Set subscr private void internalGetSubscriptionsForNonPartitionedTopic(AsyncResponse asyncResponse) { getTopicReferenceAsync(topicName) - .thenAccept(topic -> asyncResponse.resume(Lists.newArrayList(topic.getSubscriptions().keys()))) + .thenAccept(topic -> asyncResponse.resume(new ArrayList<>(topic.getSubscriptions().keys()))) .exceptionally(ex -> { // If the exception is not redirect exception we need to log it. if (!isRedirectException(ex)) { @@ -1309,7 +1306,7 @@ protected void internalGetManagedLedgerInfo(AsyncResponse asyncResponse, boolean .thenAccept(partitionMetadata -> { if (partitionMetadata.partitions > 0) { final List> futures = - Lists.newArrayListWithCapacity(partitionMetadata.partitions); + new ArrayList<>(partitionMetadata.partitions); PartitionedManagedLedgerInfo partitionedManagedLedgerInfo = new PartitionedManagedLedgerInfo(); for (int i = 0; i < partitionMetadata.partitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); @@ -1509,7 +1506,7 @@ protected void internalGetPartitionedStatsInternal(AsyncResponse asyncResponse, PartitionedTopicInternalStats stats = new PartitionedTopicInternalStats(partitionMetadata); - List> topicStatsFutureList = Lists.newArrayList(); + List> topicStatsFutureList = new ArrayList<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { try { topicStatsFutureList.add(pulsar().getAdminClient().topics() @@ -1571,7 +1568,7 @@ protected void internalDeleteSubscription(AsyncResponse asyncResponse, String su getPartitionedTopicMetadataAsync(topicName, authoritative, false).thenAcceptAsync(partitionMetadata -> { if (partitionMetadata.partitions > 0) { - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); @@ -1792,7 +1789,7 @@ protected void internalDeleteSubscriptionForcefully(AsyncResponse asyncResponse, getPartitionedTopicMetadataAsync(topicName, authoritative, false).thenAccept(partitionMetadata -> { if (partitionMetadata.partitions > 0) { - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); @@ -1895,7 +1892,7 @@ protected void internalSkipAllMessages(AsyncResponse asyncResponse, String subNa return getPartitionedTopicMetadataAsync(topicName, authoritative, false).thenCompose(partitionMetadata -> { if (partitionMetadata.partitions > 0) { - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); @@ -2072,8 +2069,7 @@ protected void internalExpireMessagesForAllSubscriptions(AsyncResponse asyncResp partitionMetadata, expireTimeInSeconds, authoritative); } else { if (partitionMetadata.partitions > 0) { - final List> futures = - Lists.newArrayListWithCapacity(partitionMetadata.partitions); + final List> futures = new ArrayList<>(partitionMetadata.partitions); // expire messages for each partition topic for (int i = 0; i < partitionMetadata.partitions; i++) { @@ -2145,9 +2141,9 @@ private void internalExpireMessagesForAllSubscriptionsForNonPartitionedTopic(Asy } PersistentTopic topic = (PersistentTopic) t; final List> futures = - Lists.newArrayListWithCapacity((int) topic.getReplicators().size()); + new ArrayList<>((int) topic.getReplicators().size()); List subNames = - Lists.newArrayListWithCapacity((int) topic.getReplicators().size() + new ArrayList<>((int) topic.getReplicators().size() + (int) topic.getSubscriptions().size()); subNames.addAll(topic.getReplicators().keys()); subNames.addAll(topic.getSubscriptions().keys()); @@ -2472,7 +2468,7 @@ protected void internalUpdateSubscriptionProperties(AsyncResponse asyncResponse, getPartitionedTopicMetadataAsync(topicName, authoritative, false).thenAcceptAsync(partitionMetadata -> { if (partitionMetadata.partitions > 0) { - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); @@ -2590,7 +2586,7 @@ protected void internalGetSubscriptionProperties(AsyncResponse asyncResponse, St getPartitionedTopicMetadataAsync(topicName, authoritative, false).thenAcceptAsync(partitionMetadata -> { if (partitionMetadata.partitions > 0) { - final List>> futures = Lists.newArrayList(); + final List>> futures = new ArrayList<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); @@ -3242,10 +3238,10 @@ protected CompletableFuture> in Map quotaMap = op .map(TopicPolicies::getBackLogQuotaMap) .map(map -> { - HashMap hashMap = Maps.newHashMap(); + HashMap hashMap = new HashMap<>(); map.forEach((key, value) -> hashMap.put(BacklogQuota.BacklogQuotaType.valueOf(key), value)); return hashMap; - }).orElse(Maps.newHashMap()); + }).orElse(new HashMap<>()); if (applied && quotaMap.isEmpty()) { quotaMap = getNamespacePolicies(namespaceName).backlog_quota_map; if (quotaMap.isEmpty()) { @@ -3804,7 +3800,7 @@ protected void internalTerminatePartitionedTopic(AsyncResponse asyncResponse, bo if (partitionMetadata.partitions > 0) { Map messageIds = new ConcurrentHashMap<>(partitionMetadata.partitions); final List> futures = - Lists.newArrayListWithCapacity(partitionMetadata.partitions); + new ArrayList<>(partitionMetadata.partitions); for (int i = 0; i < partitionMetadata.partitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); @@ -3882,7 +3878,7 @@ protected void internalExpireMessagesByTimestamp(AsyncResponse asyncResponse, St } else { if (partitionMetadata.partitions > 0) { return CompletableFuture.completedFuture(null).thenAccept(unused -> { - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); // expire messages for each partition topic for (int i = 0; i < partitionMetadata.partitions; i++) { @@ -4148,7 +4144,7 @@ protected void internalTriggerCompaction(AsyncResponse asyncResponse, boolean au .thenAccept(partitionMetadata -> { final int numPartitions = partitionMetadata.partitions; if (numPartitions > 0) { - final List> futures = Lists.newArrayListWithCapacity(numPartitions); + final List> futures = new ArrayList<>(numPartitions); for (int i = 0; i < numPartitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); @@ -4958,7 +4954,7 @@ protected CompletableFuture>> internalGetSubscriptionType protected CompletableFuture internalSetSubscriptionTypesEnabled( Set subscriptionTypesEnabled, boolean isGlobal) { - List subTypes = Lists.newArrayList(); + List subTypes = new ArrayList<>(); subscriptionTypesEnabled.forEach(subscriptionType -> subTypes.add(SubType.valueOf(subscriptionType.name()))); return getTopicPoliciesAsyncWithRetry(topicName, isGlobal) .thenCompose(op -> { @@ -4975,7 +4971,7 @@ protected CompletableFuture internalRemoveSubscriptionTypesEnabled(boolean if (!op.isPresent()) { return CompletableFuture.completedFuture(null); } - op.get().setSubscriptionTypesEnabled(Lists.newArrayList()); + op.get().setSubscriptionTypesEnabled(new ArrayList<>()); op.get().setIsGlobal(isGlobal); return pulsar().getTopicPoliciesService().updateTopicPoliciesAsync(topicName, op.get()); }); @@ -5114,7 +5110,7 @@ protected void internalSetReplicatedSubscriptionStatus(AsyncResponse asyncRespon thenCompose(__ -> getPartitionedTopicMetadataAsync(topicName, authoritative, false)) .thenAccept(partitionMetadata -> { if (partitionMetadata.partitions > 0) { - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); @@ -5252,7 +5248,7 @@ protected void internalGetReplicatedSubscriptionStatus(AsyncResponse asyncRespon .thenAccept(partitionMetadata -> { if (partitionMetadata.partitions > 0) { List> futures = new ArrayList<>(partitionMetadata.partitions); - Map status = Maps.newHashMap(); + Map status = new HashMap<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { TopicName partition = topicName.getPartition(i); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java index a115b26407d18..0254ff395ba2a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java @@ -18,13 +18,12 @@ */ package org.apache.pulsar.broker.admin.impl; -import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Objects.isNull; import static org.apache.commons.lang.StringUtils.defaultIfEmpty; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.time.Clock; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -123,12 +122,12 @@ public CompletableFuture postSchemaAsync(PostSchemaPayload payloa try { data = DefaultImplementation.getDefaultImplementation() .convertKeyValueDataStringToSchemaInfoSchema(payload.getSchema() - .getBytes(Charsets.UTF_8)); + .getBytes(StandardCharsets.UTF_8)); } catch (IOException conversionError) { throw new RestException(conversionError); } } else { - data = payload.getSchema().getBytes(Charsets.UTF_8); + data = payload.getSchema().getBytes(StandardCharsets.UTF_8); } return pulsar().getSchemaRegistryService() .putSchemaIfAbsent(getSchemaId(), @@ -148,7 +147,7 @@ public CompletableFuture> testCompati .thenCompose(strategy -> { String schemaId = getSchemaId(); return pulsar().getSchemaRegistryService().isCompatible(schemaId, - SchemaData.builder().data(payload.getSchema().getBytes(Charsets.UTF_8)) + SchemaData.builder().data(payload.getSchema().getBytes(StandardCharsets.UTF_8)) .isDeleted(false) .timestamp(clock.millis()).type(SchemaType.valueOf(payload.getType())) .user(defaultIfEmpty(clientAppId(), "")) @@ -164,7 +163,7 @@ public CompletableFuture getVersionBySchemaAsync(PostSchemaPayload payload String schemaId = getSchemaId(); return pulsar().getSchemaRegistryService() .findSchemaVersion(schemaId, - SchemaData.builder().data(payload.getSchema().getBytes(Charsets.UTF_8)) + SchemaData.builder().data(payload.getSchema().getBytes(StandardCharsets.UTF_8)) .isDeleted(false).timestamp(clock.millis()) .type(SchemaType.valueOf(payload.getType())) .user(defaultIfEmpty(clientAppId(), "")) @@ -185,7 +184,7 @@ private static GetSchemaResponse convertSchemaAndMetadataToGetSchemaResponse(Sch DefaultImplementation.getDefaultImplementation() .decodeKeyValueSchemaInfo(schemaAndMetadata.schema.toSchemaInfo())); } else { - schemaData = new String(schemaAndMetadata.schema.getData(), UTF_8); + schemaData = new String(schemaAndMetadata.schema.getData(), StandardCharsets.UTF_8); } return GetSchemaResponse.builder().version(getLongSchemaVersion(schemaAndMetadata.version)) .type(schemaAndMetadata.schema.getType()).timestamp(schemaAndMetadata.schema.getTimestamp()) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TenantsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TenantsBase.java index c2b8bb7220ba8..fdf0433b340cf 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TenantsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TenantsBase.java @@ -20,7 +20,6 @@ package org.apache.pulsar.broker.admin.impl; import static org.apache.pulsar.common.naming.Constants.GLOBAL_CLUSTER; -import com.google.common.collect.Lists; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import io.swagger.annotations.ApiResponse; @@ -249,7 +248,7 @@ protected CompletableFuture internalDeleteTenantAsyncForcefully(String ten } return tenantResources().getListOfNamespacesAsync(tenant) .thenApply(namespaces -> { - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); try { PulsarAdmin adminClient = pulsar().getAdminClient(); for (String namespace : namespaces) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java index b2137d7433870..dbd70feb1c052 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java @@ -21,7 +21,6 @@ import static javax.ws.rs.core.Response.Status.METHOD_NOT_ALLOWED; import static javax.ws.rs.core.Response.Status.NOT_FOUND; import static javax.ws.rs.core.Response.Status.SERVICE_UNAVAILABLE; -import com.google.common.collect.Lists; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -92,7 +91,7 @@ protected void internalGetCoordinatorStats(AsyncResponse asyncResponse, boolean return; } List> transactionMetadataStoreInfoFutures = - Lists.newArrayList(); + new ArrayList<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { try { transactionMetadataStoreInfoFutures @@ -309,7 +308,7 @@ protected void internalGetSlowTransactions(AsyncResponse asyncResponse, return; } List>> completableFutures = - Lists.newArrayList(); + new ArrayList<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { try { completableFutures diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/Namespaces.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/Namespaces.java index 9fd2020d48ab1..3fd09ddd594e3 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/Namespaces.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/Namespaces.java @@ -19,12 +19,12 @@ package org.apache.pulsar.broker.admin.v1; import static org.apache.pulsar.common.policies.data.PoliciesUtil.getBundles; -import com.google.common.collect.Lists; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import io.swagger.annotations.ApiResponse; import io.swagger.annotations.ApiResponses; +import java.util.ArrayList; import java.util.List; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -107,7 +107,7 @@ public void getTenantNamespaces(@Suspended AsyncResponse response, public List getNamespacesForCluster(@PathParam("property") String tenant, @PathParam("cluster") String cluster) { validateTenantOperation(tenant, TenantOperation.LIST_NAMESPACES); - List namespaces = Lists.newArrayList(); + List namespaces = new ArrayList<>(); if (!clusters().contains(cluster)) { log.warn("[{}] Failed to get namespace list for tenant: {}/{} - Cluster does not exist", clientAppId(), tenant, cluster); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java index 34e9eadd71665..fef5eda009bde 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java @@ -19,11 +19,11 @@ package org.apache.pulsar.broker.admin.v1; -import com.google.common.collect.Lists; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiResponse; import io.swagger.annotations.ApiResponses; +import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -200,7 +200,7 @@ public void getList(@Suspended final AsyncResponse asyncResponse, @PathParam("pr return; } - final List>> futures = Lists.newArrayList(); + final List>> futures = new ArrayList<>(); final List boundaries = policies.bundles.getBoundaries(); for (int i = 0; i < boundaries.size() - 1; i++) { final String bundle = String.format("%s_%s", boundaries.get(i), boundaries.get(i + 1)); @@ -222,7 +222,7 @@ public void getList(@Suspended final AsyncResponse asyncResponse, @PathParam("pr if (ex != null) { resumeAsyncResponseExceptionally(asyncResponse, ex); } else { - final List topics = Lists.newArrayList(); + final List topics = new ArrayList<>(); for (int i = 0; i < futures.size(); i++) { List topicList = futures.get(i).join(); if (topicList != null) { @@ -267,7 +267,7 @@ public List getListFromBundle(@PathParam("property") String property, @P } NamespaceBundle nsBundle = validateNamespaceBundleOwnership(fqnn, policies.bundles, bundleRange, true, true); - final List topicList = Lists.newArrayList(); + final List topicList = new ArrayList<>(); pulsar().getBrokerService().forEachTopic(topic -> { TopicName topicName = TopicName.get(topic.getName()); if (nsBundle.includes(topicName)) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Namespaces.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Namespaces.java index 0ebfabb78913b..dace261ceb6ae 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Namespaces.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Namespaces.java @@ -19,7 +19,6 @@ package org.apache.pulsar.broker.admin.v2; import static org.apache.pulsar.common.policies.data.PoliciesUtil.getBundles; -import com.google.common.collect.Sets; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; @@ -2319,7 +2318,7 @@ public void setSubscriptionTypesEnabled( public void removeSubscriptionTypesEnabled(@PathParam("tenant") String tenant, @PathParam("namespace") String namespace) { validateNamespaceName(tenant, namespace); - internalSetSubscriptionTypesEnabled(Sets.newHashSet()); + internalSetSubscriptionTypesEnabled(new HashSet<>()); } @GET diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java index 82da5f91263a4..5b0deccd0378b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java @@ -19,12 +19,12 @@ package org.apache.pulsar.broker.admin.v2; -import com.google.common.collect.Lists; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import io.swagger.annotations.ApiResponse; import io.swagger.annotations.ApiResponses; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -233,7 +233,7 @@ public void getPartitionedStats( } NonPersistentPartitionedTopicStatsImpl stats = new NonPersistentPartitionedTopicStatsImpl(partitionMetadata); - List> topicStatsFutureList = Lists.newArrayList(); + List> topicStatsFutureList = new ArrayList<>(); for (int i = 0; i < partitionMetadata.partitions; i++) { try { topicStatsFutureList @@ -369,7 +369,7 @@ public void getList( return; } - final List>> futures = Lists.newArrayList(); + final List>> futures = new ArrayList<>(); final List boundaries = policies.bundles.getBoundaries(); for (int i = 0; i < boundaries.size() - 1; i++) { final String bundle = String.format("%s_%s", boundaries.get(i), boundaries.get(i + 1)); @@ -390,7 +390,7 @@ public void getList( if (ex != null) { resumeAsyncResponseExceptionally(asyncResponse, ex); } else { - final List topics = Lists.newArrayList(); + final List topics = new ArrayList<>(); for (int i = 0; i < futures.size(); i++) { List topicList = futures.get(i).join(); if (topicList != null) { @@ -458,7 +458,7 @@ public void getListFromBundle( asyncResponse.resume(Collections.emptyList()); return; } - final List topicList = Lists.newArrayList(); + final List topicList = new ArrayList<>(); String bundleKey = namespaceName.toString() + "/" + nsBundle.getBundleRange(); ConcurrentOpenHashMap topicMap = bundleTopics.get(bundleKey); if (topicMap != null) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java index aa801f2187d4d..a298558f34502 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java @@ -20,10 +20,10 @@ import static com.google.common.base.Preconditions.checkArgument; import static org.apache.pulsar.common.stats.JvmMetrics.getJvmDirectMemoryUsed; -import com.beust.jcommander.internal.Lists; import io.netty.util.concurrent.FastThreadLocal; import java.net.MalformedURLException; import java.net.URL; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -433,7 +433,7 @@ public static CompletableFuture> getAntiAffinityNamespaceOw } final String antiAffinityGroup = policies.get().namespaceAntiAffinityGroup; final Map brokerToAntiAffinityNamespaceCount = new ConcurrentHashMap<>(); - final List> futures = Lists.newArrayList(); + final List> futures = new ArrayList<>(); brokerToNamespaceToBundleRange.forEach((broker, nsToBundleRange) -> { nsToBundleRange.forEach((ns, bundleRange) -> { if (bundleRange.isEmpty()) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceEphemeralData.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceEphemeralData.java index 2f1262f5bf225..e86007b64460d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceEphemeralData.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceEphemeralData.java @@ -18,8 +18,8 @@ */ package org.apache.pulsar.broker.namespace; -import com.google.common.collect.Maps; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import javax.validation.constraints.NotNull; import lombok.Data; @@ -49,16 +49,16 @@ public NamespaceEphemeralData(String brokerUrl, String brokerUrlTls, String http this.httpUrlTls = httpUrlTls; this.disabled = disabled; if (advertisedListeners == null) { - this.advertisedListeners = Collections.EMPTY_MAP; + this.advertisedListeners = Collections.emptyMap(); } else { - this.advertisedListeners = Maps.newHashMap(advertisedListeners); + this.advertisedListeners = new HashMap<>(advertisedListeners); } } @NotNull public Map getAdvertisedListeners() { if (this.advertisedListeners == null) { - return Collections.EMPTY_MAP; + return Collections.emptyMap(); } return Collections.unmodifiableMap(this.advertisedListeners); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java index 33d7057c4bd36..5c1cb283d6990 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java @@ -19,21 +19,21 @@ package org.apache.pulsar.broker.namespace; import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; import static java.lang.String.format; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.apache.pulsar.common.naming.NamespaceName.SYSTEM_NAMESPACE; -import com.google.common.collect.Lists; import com.google.common.hash.Hashing; import io.prometheus.client.Counter; import java.net.URI; import java.net.URL; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -549,7 +549,7 @@ private void searchForCandidateBroker(NamespaceBundle bundle, } try { - checkNotNull(candidateBroker); + Objects.requireNonNull(candidateBroker); if (candidateBroker.equals(pulsar.getSafeWebServiceAddress())) { // Load manager decided that the local broker should try to become the owner @@ -865,8 +865,8 @@ void splitAndOwnBundleOnceAndRetry(NamespaceBundle bundle, return; } - checkNotNull(splittedBundles.getLeft()); - checkNotNull(splittedBundles.getRight()); + Objects.requireNonNull(splittedBundles.getLeft()); + Objects.requireNonNull(splittedBundles.getRight()); checkArgument(splittedBundles.getRight().size() == splitBoundaries.size() + 1, "bundle has to be split in " + (splitBoundaries.size() + 1) + " bundles"); NamespaceName nsname = bundle.getNamespaceObject(); @@ -878,7 +878,7 @@ void splitAndOwnBundleOnceAndRetry(NamespaceBundle bundle, try { // take ownership of newly split bundles for (NamespaceBundle sBundle : splittedBundles.getRight()) { - checkNotNull(ownershipCache.tryAcquiringOwnership(sBundle)); + Objects.requireNonNull(ownershipCache.tryAcquiringOwnership(sBundle)); } updateNamespaceBundles(nsname, splittedBundles.getLeft()) .thenRun(() -> { @@ -968,8 +968,8 @@ void splitAndOwnBundleOnceAndRetry(NamespaceBundle bundle, * @throws Exception */ private CompletableFuture updateNamespaceBundles(NamespaceName nsname, NamespaceBundles nsBundles) { - checkNotNull(nsname); - checkNotNull(nsBundles); + Objects.requireNonNull(nsname); + Objects.requireNonNull(nsBundles); LocalPolicies localPolicies = nsBundles.toLocalPolicies(); @@ -1088,7 +1088,7 @@ protected void onNamespaceBundleUnload(NamespaceBundle bundle) { } public void addNamespaceBundleOwnershipListener(NamespaceBundleOwnershipListener... listeners) { - checkNotNull(listeners); + Objects.requireNonNull(listeners); for (NamespaceBundleOwnershipListener listener : listeners) { if (listener != null) { bundleOwnershipListeners.add(listener); @@ -1208,9 +1208,9 @@ public CompletableFuture> getPartitions(NamespaceName namespaceName .listPartitionedTopicsAsync(namespaceName, topicDomain) .thenCompose(topics -> { CompletableFuture> result = new CompletableFuture<>(); - List resultPartitions = Collections.synchronizedList(Lists.newArrayList()); + List resultPartitions = Collections.synchronizedList(new ArrayList<>()); if (CollectionUtils.isNotEmpty(topics)) { - List>> futures = Lists.newArrayList(); + List>> futures = new ArrayList<>(); for (String topic : topics) { CompletableFuture> future = getPartitionsForTopic(TopicName.get(topic)); futures.add(future); @@ -1232,7 +1232,7 @@ public CompletableFuture> getPartitions(NamespaceName namespaceName private CompletableFuture> getPartitionsForTopic(TopicName topicName) { return pulsar.getBrokerService().fetchPartitionedTopicMetadataAsync(topicName).thenCompose(meta -> { - List result = Lists.newArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < meta.partitions; i++) { result.add(topicName.getPartition(i).toString()); } @@ -1255,7 +1255,7 @@ public CompletableFuture> getListOfNonPersistentTopics(NamespaceNam } else { // Non-persistent topics don't have managed ledgers so we have to retrieve them from local // cache. - List topics = Lists.newArrayList(); + List topics = new ArrayList<>(); synchronized (pulsar.getBrokerService().getMultiLayerTopicMap()) { if (pulsar.getBrokerService().getMultiLayerTopicMap() .containsKey(namespaceName.toString())) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java index 4f60512a7966a..e0cdd129a8f03 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java @@ -21,8 +21,8 @@ import com.github.benmanes.caffeine.cache.AsyncCacheLoader; import com.github.benmanes.caffeine.cache.AsyncLoadingCache; import com.github.benmanes.caffeine.cache.Caffeine; -import com.google.common.collect.Lists; import com.google.common.util.concurrent.MoreExecutors; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Optional; @@ -223,7 +223,7 @@ public CompletableFuture removeOwnership(NamespaceBundle bundle) { * NamespaceBundles to remove from ownership cache */ public CompletableFuture removeOwnership(NamespaceBundles bundles) { - List> allFutures = Lists.newArrayList(); + List> allFutures = new ArrayList<>(); for (NamespaceBundle bundle : bundles.getBundles()) { if (getOwnedBundle(bundle) == null) { // continue diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java index a3df1498698f3..428c54ecc191d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java @@ -21,8 +21,6 @@ import static com.google.common.base.Preconditions.checkArgument; import static org.apache.bookkeeper.mledger.impl.ManagedLedgerMBeanImpl.ENTRY_LATENCY_BUCKETS_USEC; import com.google.common.base.MoreObjects; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -147,7 +145,7 @@ public abstract class AbstractTopic implements Topic, TopicPolicyListener entryFilters; + protected Map entryFilters; public AbstractTopic(String topic, BrokerService brokerService) { this.topic = topic; @@ -187,7 +185,7 @@ public EntryFilters getEntryFiltersPolicy() { return this.topicPolicies.getEntryFilters().get(); } - public ImmutableMap getEntryFilters() { + public Map getEntryFilters() { return this.entryFilters; } @@ -251,7 +249,7 @@ protected void updateTopicPolicyByNamespacePolicy(Policies namespacePolicies) { topicPolicies.getRetentionPolicies().updateNamespaceValue(namespacePolicies.retention_policies); topicPolicies.getCompactionThreshold().updateNamespaceValue(namespacePolicies.compaction_threshold); topicPolicies.getReplicationClusters().updateNamespaceValue( - Lists.newArrayList(CollectionUtils.emptyIfNull(namespacePolicies.replication_clusters))); + new ArrayList<>(CollectionUtils.emptyIfNull(namespacePolicies.replication_clusters))); topicPolicies.getMaxUnackedMessagesOnConsumer() .updateNamespaceValue(namespacePolicies.max_unacked_messages_per_consumer); topicPolicies.getMaxUnackedMessagesOnSubscription() diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java index 805d00adca6bb..b11538a9217e2 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java @@ -18,7 +18,7 @@ */ package org.apache.pulsar.broker.service; -import com.google.common.collect.Lists; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -245,7 +245,7 @@ private void dropBacklogForTimeLimit(PersistentTopic persistentTopic, BacklogQuo * The topic on which all producers should be disconnected */ private void disconnectProducers(PersistentTopic persistentTopic) { - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); Map producers = persistentTopic.getProducers(); producers.values().forEach(producer -> { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java index a3cef4bd0276b..8491615448aae 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java @@ -19,15 +19,11 @@ package org.apache.pulsar.broker.service; import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun; import static org.apache.commons.collections4.CollectionUtils.isEmpty; import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.apache.pulsar.common.naming.SystemTopicNames.isTransactionInternalName; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import com.google.common.collect.Queues; import io.netty.bootstrap.ServerBootstrap; import io.netty.buffer.ByteBuf; @@ -52,6 +48,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; @@ -277,7 +274,7 @@ public class BrokerService implements Closeable { private boolean preciseTopicPublishRateLimitingEnable; private final LongAdder pausedConnections = new LongAdder(); private BrokerInterceptor interceptor; - private ImmutableMap entryFilters; + private Map entryFilters; private TopicFactory topicFactory; private Set brokerEntryMetadataInterceptors; @@ -1604,20 +1601,20 @@ public CompletableFuture getManagedLedgerConfig(TopicName t managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyClassName( IsolatedBookieEnsemblePlacementPolicy.class); if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) { - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupPrimary()); properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupSecondary()); managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); } else if (isSystemTopic(topicName)) { - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, "*"); properties.put(IsolatedBookieEnsemblePlacementPolicy .SECONDARY_ISOLATION_BOOKIE_GROUPS, "*"); managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); } else { - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, ""); properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, ""); managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); @@ -1626,7 +1623,7 @@ public CompletableFuture getManagedLedgerConfig(TopicName t if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) { managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyClassName( IsolatedBookieEnsemblePlacementPolicy.class); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupPrimary()); properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, @@ -1744,7 +1741,7 @@ private void addTopicToStatsMaps(TopicName topicName, Topic topic) { } public void refreshTopicToStatsMaps(NamespaceBundle oldBundle) { - checkNotNull(oldBundle); + Objects.requireNonNull(oldBundle); try { // retrieve all topics under existing old bundle List topics = getAllTopicsFromNamespaceBundle(oldBundle.getNamespaceObject().toString(), @@ -1974,7 +1971,7 @@ public CompletableFuture unloadServiceUnit(NamespaceBundle serviceUnit, */ private CompletableFuture unloadServiceUnit(NamespaceBundle serviceUnit, boolean closeWithoutWaitingClientDisconnect) { - List> closeFutures = Lists.newArrayList(); + List> closeFutures = new ArrayList<>(); topics.forEach((name, topicFuture) -> { TopicName topicName = TopicName.get(name); if (serviceUnit.includes(topicName)) { @@ -2677,7 +2674,7 @@ private void createDynamicConfigPathIfNotExist() { // create dynamic-config if not exist. if (!configCache.isPresent()) { pulsar().getPulsarResources().getDynamicConfigResources() - .setDynamicConfigurationWithCreate(n -> Maps.newHashMap()); + .setDynamicConfigurationWithCreate(n -> new HashMap<>()); } } catch (Exception e) { log.warn("Failed to read dynamic broker configuration", e); @@ -2698,7 +2695,7 @@ private void updateDynamicServiceConfiguration() { // create dynamic-config if not exist. if (!configCache.isPresent()) { pulsar().getPulsarResources().getDynamicConfigResources() - .setDynamicConfigurationWithCreate(n -> Maps.newHashMap()); + .setDynamicConfigurationWithCreate(n -> new HashMap<>()); } } catch (Exception e) { log.warn("Failed to read dynamic broker configuration", e); @@ -2736,7 +2733,7 @@ public static List getDynamicConfiguration() { } public Map getRuntimeConfiguration() { - Map configMap = Maps.newHashMap(); + Map configMap = new HashMap<>(); ConcurrentOpenHashMap runtimeConfigurationMap = getRuntimeConfigurationMap(); runtimeConfigurationMap.forEach((key, value) -> { configMap.put(key, String.valueOf(value)); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java index 7eae83ba25031..cc47976e82a31 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java @@ -22,7 +22,6 @@ import static org.apache.pulsar.common.protocol.Commands.DEFAULT_CONSUMER_EPOCH; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; -import com.google.common.collect.Lists; import com.google.common.util.concurrent.AtomicDouble; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; @@ -994,7 +993,7 @@ public void redeliverUnacknowledgedMessages(long consumerEpoch) { public void redeliverUnacknowledgedMessages(List messageIds) { int totalRedeliveryMessages = 0; - List pendingPositions = Lists.newArrayList(); + List pendingPositions = new ArrayList<>(); for (MessageIdData msg : messageIds) { PositionImpl position = PositionImpl.get(msg.getLedgerId(), msg.getEntryId()); LongPair longPair = pendingAcks.get(position.getLedgerId(), position.getEntryId()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/EntryFilterSupport.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/EntryFilterSupport.java index 7a4700a90a6d6..8704e21ae7556 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/EntryFilterSupport.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/EntryFilterSupport.java @@ -18,8 +18,9 @@ */ package org.apache.pulsar.broker.service; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; +import java.util.Collections; +import java.util.List; +import java.util.Map; import org.apache.bookkeeper.mledger.Entry; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -34,7 +35,7 @@ public class EntryFilterSupport { * Entry filters in Broker. * Not set to final, for the convenience of testing mock. */ - protected ImmutableList entryFilters; + protected List entryFilters; protected final FilterContext filterContext; protected final Subscription subscription; @@ -45,19 +46,20 @@ public EntryFilterSupport(Subscription subscription) { .getBrokerService().getEntryFilters()) && !subscription.getTopic().getBrokerService().pulsar() .getConfiguration().isAllowOverrideEntryFilters()) { - this.entryFilters = subscription.getTopic().getBrokerService().getEntryFilters().values().asList(); + this.entryFilters = subscription.getTopic().getBrokerService().getEntryFilters().values().stream() + .toList(); } else { - ImmutableMap entryFiltersMap = + Map entryFiltersMap = subscription.getTopic().getEntryFilters(); if (entryFiltersMap != null) { - this.entryFilters = subscription.getTopic().getEntryFilters().values().asList(); + this.entryFilters = subscription.getTopic().getEntryFilters().values().stream().toList(); } else { - this.entryFilters = ImmutableList.of(); + this.entryFilters = Collections.emptyList(); } } this.filterContext = new FilterContext(); } else { - this.entryFilters = ImmutableList.of(); + this.entryFilters = Collections.emptyList(); this.filterContext = FilterContext.FILTER_CONTEXT_DISABLED; } } @@ -82,7 +84,7 @@ private void fillContext(FilterContext context, MessageMetadata msgMetadata, private static EntryFilter.FilterResult getFilterResult(FilterContext filterContext, Entry entry, - ImmutableList entryFilters) { + List entryFilters) { for (EntryFilter entryFilter : entryFilters) { EntryFilter.FilterResult filterResult = entryFilter.filterEntry(entry, filterContext); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java index b5d87a46cfc09..62182f6e84f49 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.broker.service; -import static com.google.common.base.Preconditions.checkNotNull; import static com.scurrilous.circe.checksum.Crc32cIntChecksum.computeChecksum; import static org.apache.pulsar.broker.service.AbstractReplicator.REPL_PRODUCER_NAME_DELIMITER; import static org.apache.pulsar.common.protocol.Commands.hasChecksum; @@ -106,7 +105,7 @@ public Producer(Topic topic, TransportCnx cnx, long producerId, String producerN this.topic = topic; this.cnx = cnx; this.producerId = producerId; - this.producerName = checkNotNull(producerName); + this.producerName = Objects.requireNonNull(producerName); this.userProvidedProducerName = userProvidedProducerName; this.epoch = epoch; this.closeFuture = new CompletableFuture<>(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarStats.java index ff74cf839aef8..0babf2fe01223 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarStats.java @@ -18,14 +18,14 @@ */ package org.apache.pulsar.broker.service; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.util.ReferenceCountUtil; import java.io.Closeable; +import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; import org.apache.pulsar.broker.PulsarService; @@ -65,12 +65,12 @@ public PulsarStats(PulsarService pulsar) { this.nsStats = new NamespaceStats(pulsar.getConfig().getStatsUpdateFrequencyInSecs()); this.clusterReplicationMetrics = new ClusterReplicationMetrics(pulsar.getConfiguration().getClusterName(), pulsar.getConfiguration().isReplicationMetricsEnabled()); - this.bundleStats = Maps.newConcurrentMap(); - this.tempMetricsCollection = Lists.newArrayList(); - this.metricsCollection = Lists.newArrayList(); + this.bundleStats = new ConcurrentHashMap<>(); + this.tempMetricsCollection = new ArrayList<>(); + this.metricsCollection = new ArrayList<>(); this.brokerOperabilityMetrics = new BrokerOperabilityMetrics(pulsar.getConfiguration().getClusterName(), pulsar.getAdvertisedAddress()); - this.tempNonPersistentTopics = Lists.newArrayList(); + this.tempNonPersistentTopics = new ArrayList<>(); this.exposePublisherStats = pulsar.getConfiguration().isExposePublisherStats(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java index b3a5e8c2b8b96..9c3c848897128 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java @@ -19,7 +19,6 @@ package org.apache.pulsar.broker.service; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; import com.google.common.collect.Sets; import java.util.HashSet; import java.util.List; @@ -27,6 +26,7 @@ import java.util.Objects; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import javax.annotation.Nonnull; @@ -559,7 +559,7 @@ public Boolean getPoliciesCacheInit(NamespaceName namespaceName) { public void registerListener(TopicName topicName, TopicPolicyListener listener) { listeners.compute(topicName, (k, topicListeners) -> { if (topicListeners == null) { - topicListeners = Lists.newCopyOnWriteArrayList(); + topicListeners = new CopyOnWriteArrayList<>(); } topicListeners.add(listener); return topicListeners; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Topic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Topic.java index 72e5d6a9228f0..c0f931bd6a553 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Topic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Topic.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.broker.service; -import com.google.common.collect.ImmutableMap; import io.netty.buffer.ByteBuf; import java.util.Map; import java.util.Optional; @@ -250,7 +249,7 @@ CompletableFuture createSubscription(String subscriptionName, Init EntryFilters getEntryFiltersPolicy(); - ImmutableMap getEntryFilters(); + Map getEntryFilters(); BacklogQuota getBacklogQuota(BacklogQuotaType backlogQuotaType); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java index a3da80080ffc0..05e42c1b64d6e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java @@ -23,8 +23,6 @@ import static org.apache.pulsar.common.policies.data.BacklogQuota.BacklogQuotaType; import static org.apache.pulsar.common.protocol.Commands.DEFAULT_CONSUMER_EPOCH; import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import io.netty.buffer.ByteBuf; import io.netty.util.concurrent.FastThreadLocal; import java.util.ArrayList; @@ -34,6 +32,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -394,7 +393,7 @@ private CompletableFuture delete(boolean failIfHasSubscriptions, boolean c CompletableFuture closeClientFuture = new CompletableFuture<>(); if (closeIfClientsConnected) { - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); replicators.forEach((cluster, replicator) -> futures.add(replicator.disconnect())); producers.values().forEach(producer -> futures.add(producer.disconnect())); subscriptions.forEach((s, sub) -> futures.add(sub.disconnect())); @@ -415,7 +414,7 @@ private CompletableFuture delete(boolean failIfHasSubscriptions, boolean c if (currentUsageCount() == 0) { isFenced = true; - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); if (failIfHasSubscriptions) { if (!subscriptions.isEmpty()) { @@ -485,7 +484,7 @@ public CompletableFuture close(boolean closeWithoutWaitingClientDisconnect lock.writeLock().unlock(); } - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); replicators.forEach((cluster, replicator) -> futures.add(replicator.disconnect())); producers.values().forEach(producer -> futures.add(producer.disconnect())); @@ -531,7 +530,7 @@ public CompletableFuture close(boolean closeWithoutWaitingClientDisconnect } public CompletableFuture stopReplProducers() { - List> closeFutures = Lists.newArrayList(); + List> closeFutures = new ArrayList<>(); replicators.forEach((region, replicator) -> closeFutures.add(replicator.disconnect())); return FutureUtil.waitForAll(closeFutures); } @@ -551,7 +550,7 @@ public CompletableFuture checkReplication() { String localCluster = brokerService.pulsar().getConfiguration().getClusterName(); - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); // Check for missing replicators for (String cluster : configuredClusters) { @@ -911,7 +910,7 @@ public CompletableFuture getInternalStats(boolean PersistentTopicInternalStats stats = new PersistentTopicInternalStats(); stats.entriesAddedCounter = ENTRIES_ADDED_COUNTER_UPDATER.get(this); - stats.cursors = Maps.newTreeMap(); + stats.cursors = new TreeMap<>(); subscriptions.forEach((name, subs) -> stats.cursors.put(name, new CursorStats())); replicators.forEach((name, subs) -> stats.cursors.put(name, new CursorStats())); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java index 47830e669af4e..accab20d2daed 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java @@ -18,12 +18,12 @@ */ package org.apache.pulsar.broker.service.persistent; -import static com.google.common.base.Preconditions.checkNotNull; import static org.apache.pulsar.broker.service.persistent.PersistentTopic.MESSAGE_RATE_BACKOFF_MS; import static org.apache.pulsar.common.protocol.Commands.DEFAULT_CONSUMER_EPOCH; import io.netty.util.Recycler; import java.util.Iterator; import java.util.List; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledFuture; @@ -520,7 +520,7 @@ private synchronized void internalReadEntriesFailed(ManagedLedgerException excep } } - checkNotNull(c); + Objects.requireNonNull(c); // Reduce read batch size to avoid flooding bookies with retries readBatchSize = serviceConfig.getDispatcherMinReadBatchSize(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java index e5d6251d177de..4afc5b6bdf9f8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java @@ -21,7 +21,6 @@ import static org.apache.pulsar.common.naming.SystemTopicNames.isEventSystemTopic; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; -import com.google.common.base.Predicate; import io.netty.buffer.ByteBuf; import java.util.Collections; import java.util.LinkedHashMap; @@ -34,6 +33,7 @@ import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; import java.util.stream.Collectors; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.ClearBacklogCallback; @@ -139,7 +139,7 @@ static boolean isCursorFromReplicatedSubscription(ManagedCursor cursor) { } public PersistentSubscription(PersistentTopic topic, String subscriptionName, ManagedCursor cursor, - boolean replicated) { + boolean replicated) { this(topic, subscriptionName, cursor, replicated, Collections.emptyMap()); } @@ -165,7 +165,7 @@ public PersistentSubscription(PersistentTopic topic, String subscriptionName, Ma public void updateLastMarkDeleteAdvancedTimestamp() { this.lastMarkDeleteAdvancedTimestamp = - Math.max(this.lastMarkDeleteAdvancedTimestamp, System.currentTimeMillis()); + Math.max(this.lastMarkDeleteAdvancedTimestamp, System.currentTimeMillis()); } @Override @@ -229,9 +229,9 @@ public CompletableFuture addConsumer(Consumer consumer) { previousDispatcher = dispatcher; dispatcher = useStreamingDispatcher ? new PersistentStreamingDispatcherSingleActiveConsumer( - cursor, SubType.Exclusive, 0, topic, this) + cursor, SubType.Exclusive, 0, topic, this) : new PersistentDispatcherSingleActiveConsumer( - cursor, SubType.Exclusive, 0, topic, this); + cursor, SubType.Exclusive, 0, topic, this); } break; case Shared: @@ -239,7 +239,7 @@ public CompletableFuture addConsumer(Consumer consumer) { previousDispatcher = dispatcher; dispatcher = useStreamingDispatcher ? new PersistentStreamingDispatcherMultipleConsumers( - topic, cursor, this) + topic, cursor, this) : new PersistentDispatcherMultipleConsumers(topic, cursor, this); } break; @@ -256,7 +256,7 @@ public CompletableFuture addConsumer(Consumer consumer) { previousDispatcher = dispatcher; dispatcher = useStreamingDispatcher ? new PersistentStreamingDispatcherSingleActiveConsumer( - cursor, SubType.Failover, partitionIndex, topic, this) : + cursor, SubType.Failover, partitionIndex, topic, this) : new PersistentDispatcherSingleActiveConsumer(cursor, SubType.Failover, partitionIndex, topic, this); } @@ -265,7 +265,7 @@ public CompletableFuture addConsumer(Consumer consumer) { KeySharedMeta ksm = consumer.getKeySharedMeta(); if (dispatcher == null || dispatcher.getType() != SubType.Key_Shared || !((PersistentStickyKeyDispatcherMultipleConsumers) dispatcher) - .hasSameKeySharedPolicy(ksm)) { + .hasSameKeySharedPolicy(ksm)) { previousDispatcher = dispatcher; dispatcher = new PersistentStickyKeyDispatcherMultipleConsumers(topic, cursor, this, topic.getBrokerService().getPulsar().getConfiguration(), ksm); @@ -337,7 +337,7 @@ public synchronized void removeConsumer(Consumer consumer, boolean isResetCursor // when topic closes: it iterates through concurrent-subscription map to close each subscription. so, // topic.remove again try to access same map which creates deadlock. so, execute it in different thread. - topic.getBrokerService().pulsar().getExecutor().execute(() ->{ + topic.getBrokerService().pulsar().getExecutor().execute(() -> { topic.removeSubscription(subName); // Also need remove the cursor here, otherwise the data deletion will not work well. // Because data deletion depends on the mark delete position of all cursors. @@ -412,7 +412,7 @@ public void acknowledgeMessage(List positions, AckType ackType, Map analyzeBacklog(Optional int batchSize = configuration.getDispatcherMaxReadBatchSize(); AtomicReference firstPosition = new AtomicReference<>(); AtomicReference lastPosition = new AtomicReference<>(); - return cursor.scan(position, new Predicate() { - @Override - public boolean apply(Entry entry) { - if (log.isDebugEnabled()) { - log.debug("found {}", entry); - } - Position entryPosition = entry.getPosition(); - firstPosition.compareAndSet(null, entryPosition); - lastPosition.set(entryPosition); - ByteBuf metadataAndPayload = entry.getDataBuffer(); - MessageMetadata messageMetadata = Commands.peekMessageMetadata(metadataAndPayload, "", -1); - int numMessages = 1; - if (messageMetadata.hasNumMessagesInBatch()) { - numMessages = messageMetadata.getNumMessagesInBatch(); - } - EntryFilter.FilterResult filterResult = entryFilterSupport - .runFiltersForEntry(entry, messageMetadata, null); - - if (filterResult == null) { - filterResult = EntryFilter.FilterResult.ACCEPT; - } - switch (filterResult) { - case REJECT: - rejected.incrementAndGet(); - rejectedMessages.addAndGet(numMessages); - break; - case RESCHEDULE: - rescheduled.incrementAndGet(); - rescheduledMessages.addAndGet(numMessages); - break; - default: - accepted.incrementAndGet(); - acceptedMessages.addAndGet(numMessages); - break; - } - long num = entries.incrementAndGet(); - messages.addAndGet(numMessages); - - if (num % 1000 == 0) { - long end = System.currentTimeMillis(); - log.info( - "[{}][{}] scan running since {} ms - scanned {} entries", - topicName, subName, end - start, num); - } + final Predicate condition = entry -> { + if (log.isDebugEnabled()) { + log.debug("found {}", entry); + } + Position entryPosition = entry.getPosition(); + firstPosition.compareAndSet(null, entryPosition); + lastPosition.set(entryPosition); + ByteBuf metadataAndPayload = entry.getDataBuffer(); + MessageMetadata messageMetadata = Commands.peekMessageMetadata(metadataAndPayload, "", -1); + int numMessages = 1; + if (messageMetadata.hasNumMessagesInBatch()) { + numMessages = messageMetadata.getNumMessagesInBatch(); + } + EntryFilter.FilterResult filterResult = entryFilterSupport + .runFiltersForEntry(entry, messageMetadata, null); - return true; + if (filterResult == null) { + filterResult = EntryFilter.FilterResult.ACCEPT; } - }, batchSize, maxEntries, timeOutMs).thenApply((ScanOutcome outcome) -> { + switch (filterResult) { + case REJECT: + rejected.incrementAndGet(); + rejectedMessages.addAndGet(numMessages); + break; + case RESCHEDULE: + rescheduled.incrementAndGet(); + rescheduledMessages.addAndGet(numMessages); + break; + default: + accepted.incrementAndGet(); + acceptedMessages.addAndGet(numMessages); + break; + } + long num = entries.incrementAndGet(); + messages.addAndGet(numMessages); + + if (num % 1000 == 0) { + long end = System.currentTimeMillis(); + log.info( + "[{}][{}] scan running since {} ms - scanned {} entries", + topicName, subName, end - start, num); + } + + return true; + }; + return cursor.scan( + position, + condition, + batchSize, + maxEntries, + timeOutMs + ).thenApply((ScanOutcome outcome) -> { long end = System.currentTimeMillis(); AnalyzeBacklogResult result = new AnalyzeBacklogResult(); result.setFirstPosition(firstPosition.get()); @@ -928,6 +932,7 @@ public CompletableFuture delete() { /** * Forcefully close all consumers and deletes the subscription. + * * @return */ @Override @@ -938,9 +943,10 @@ public CompletableFuture deleteForcefully() { /** * Delete the subscription by closing and deleting its managed cursor. Handle unsubscribe call from admin layer. * - * @param closeIfConsumersConnected - * Flag indicate whether explicitly close connected consumers before trying to delete subscription. If - * any consumer is connected to it and if this flag is disable then this operation fails. + * @param closeIfConsumersConnected Flag indicate whether explicitly close connected consumers before trying to + * delete subscription. If + * any consumer is connected to it and if this flag is disable then this operation + * fails. * @return CompletableFuture indicating the completion of delete operation */ private CompletableFuture delete(boolean closeIfConsumersConnected) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java index 49f2ac2f548cf..33d97970569ed 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.broker.service.persistent; -import static com.google.common.base.Preconditions.checkNotNull; import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.pulsar.broker.service.persistent.SubscribeRateLimiter.isSubscribeRateEnabled; import static org.apache.pulsar.common.naming.SystemTopicNames.isEventSystemTopic; @@ -26,8 +25,6 @@ import static org.apache.pulsar.compaction.Compactor.COMPACTION_SUBSCRIPTION; import com.carrotsearch.hppc.ObjectObjectHashMap; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import com.google.common.collect.Sets; import io.netty.buffer.ByteBuf; import io.netty.util.concurrent.FastThreadLocal; @@ -35,8 +32,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; @@ -401,7 +400,7 @@ public AtomicLong getPendingWriteOps() { private PersistentSubscription createPersistentSubscription(String subscriptionName, ManagedCursor cursor, boolean replicated, Map subscriptionProperties) { - checkNotNull(compactedTopic); + Objects.requireNonNull(compactedTopic); if (isCompactionSubscription(subscriptionName)) { return new CompactorSubscription(this, compactedTopic, subscriptionName, cursor); } else { @@ -553,7 +552,7 @@ public synchronized void addFailed(ManagedLedgerException exception, Object ctx) // close all producers CompletableFuture disconnectProducersFuture; if (producers.size() > 0) { - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); producers.forEach((__, producer) -> futures.add(producer.disconnect())); disconnectProducersFuture = FutureUtil.waitForAll(futures); } else { @@ -669,14 +668,14 @@ public CompletableFuture startReplProducers() { } public CompletableFuture stopReplProducers() { - List> closeFutures = Lists.newArrayList(); + List> closeFutures = new ArrayList<>(); replicators.forEach((region, replicator) -> closeFutures.add(replicator.disconnect())); shadowReplicators.forEach((__, replicator) -> closeFutures.add(replicator.disconnect())); return FutureUtil.waitForAll(closeFutures); } private synchronized CompletableFuture closeReplProducersIfNoBacklog() { - List> closeFutures = Lists.newArrayList(); + List> closeFutures = new ArrayList<>(); replicators.forEach((region, replicator) -> closeFutures.add(replicator.disconnect(true))); shadowReplicators.forEach((__, replicator) -> closeFutures.add(replicator.disconnect(true))); return FutureUtil.waitForAll(closeFutures); @@ -1162,7 +1161,7 @@ private CompletableFuture delete(boolean failIfHasSubscriptions, fenceTopicToCloseOrDelete(); // Avoid clients reconnections while deleting CompletableFuture closeClientFuture = new CompletableFuture<>(); if (closeIfClientsConnected) { - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); replicators.forEach((cluster, replicator) -> futures.add(replicator.disconnect())); shadowReplicators.forEach((__, replicator) -> futures.add(replicator.disconnect())); producers.values().forEach(producer -> futures.add(producer.disconnect())); @@ -1286,7 +1285,7 @@ public CompletableFuture close(boolean closeWithoutWaitingClientDisconnect lock.writeLock().unlock(); } - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); futures.add(transactionBuffer.closeAsync()); replicators.forEach((cluster, replicator) -> futures.add(replicator.disconnect())); @@ -1423,7 +1422,7 @@ public CompletableFuture checkReplication() { return deleteForcefully(); } - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); // Check for missing replicators for (String cluster : configuredClusters) { @@ -1461,7 +1460,7 @@ private CompletableFuture checkShadowReplication() { if (log.isDebugEnabled()) { log.debug("[{}] Checking shadow replication status, shadowTopics={}", topic, configuredShadowTopics); } - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); // Check for missing replicators for (String shadowTopic : configuredShadowTopics) { @@ -2142,7 +2141,7 @@ public CompletableFuture getInternalStats(boolean stats.lastConfirmedEntry = ml.getLastConfirmedEntry().toString(); stats.state = ml.getState().toString(); - stats.ledgers = Lists.newArrayList(); + stats.ledgers = new ArrayList<>(); Set> futures = Sets.newConcurrentHashSet(); CompletableFuture> availableBookiesFuture = brokerService.pulsar().getPulsarResources().getBookieResources().listAvailableBookiesAsync(); @@ -2197,7 +2196,7 @@ public CompletableFuture getInternalStats(boolean stats.compactedLedger = info; - stats.cursors = Maps.newTreeMap(); + stats.cursors = new HashMap<>(); ml.getCursors().forEach(c -> { ManagedCursorImpl cursor = (ManagedCursorImpl) c; CursorStats cs = new CursorStats(); @@ -2805,7 +2804,7 @@ public boolean isOldestMessageExpired(ManagedCursor cursor, int messageTTLInSeco */ public CompletableFuture clearBacklog() { log.info("[{}] Clearing backlog on all cursors in the topic.", topic); - List> futures = Lists.newArrayList(); + List> futures = new ArrayList<>(); List cursors = getSubscriptions().keys(); cursors.addAll(getReplicators().keys()); cursors.addAll(getShadowReplicators().keys()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java index b62335d9ab435..9a78de6e30ba4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.broker.service.schema; -import static com.google.common.collect.Iterables.concat; import static com.google.common.collect.Lists.newArrayList; import static com.google.protobuf.ByteString.copyFrom; import static java.util.Objects.isNull; @@ -463,12 +462,15 @@ private CompletableFuture updateSchemaLocator( .setHash(copyFrom(hash)) .build(); + final ArrayList indexList = new ArrayList<>(); + indexList.addAll(locator.getIndexList()); + indexList.add(info); return updateSchemaLocator(getSchemaPath(schemaId), - SchemaStorageFormat.SchemaLocator.newBuilder() - .setInfo(info) - .addAllIndex( - concat(locator.getIndexList(), newArrayList(info)) - ).build(), locatorEntry.version + SchemaStorageFormat.SchemaLocator.newBuilder() + .setInfo(info) + .addAllIndex(indexList) + .build() + , locatorEntry.version ).thenApply(ignore -> nextVersion); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/SchemaRegistryService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/SchemaRegistryService.java index e78477a9aa080..abf74cdc4503a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/SchemaRegistryService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/SchemaRegistryService.java @@ -18,7 +18,7 @@ */ package org.apache.pulsar.broker.service.schema; -import com.google.common.collect.Maps; +import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.concurrent.ScheduledExecutorService; @@ -34,7 +34,7 @@ public interface SchemaRegistryService extends SchemaRegistry { long NO_SCHEMA_VERSION = -1L; static Map getCheckers(Set checkerClasses) throws Exception { - Map checkers = Maps.newHashMap(); + Map checkers = new HashMap<>(); for (String className : checkerClasses) { SchemaCompatibilityCheck schemaCompatibilityCheck = Reflections.createInstance(className, SchemaCompatibilityCheck.class, Thread.currentThread().getContextClassLoader()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/AllocatorStatsGenerator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/AllocatorStatsGenerator.java index 5dd4c0b0402c9..9e71fb7d46bfe 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/AllocatorStatsGenerator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/AllocatorStatsGenerator.java @@ -18,12 +18,12 @@ */ package org.apache.pulsar.broker.stats; -import com.google.common.collect.Lists; import io.netty.buffer.PoolArenaMetric; import io.netty.buffer.PoolChunkListMetric; import io.netty.buffer.PoolChunkMetric; import io.netty.buffer.PoolSubpageMetric; import io.netty.buffer.PooledByteBufAllocator; +import java.util.ArrayList; import java.util.stream.Collectors; import org.apache.bookkeeper.mledger.impl.cache.RangeEntryCacheImpl; import org.apache.pulsar.common.stats.AllocatorStats; @@ -99,7 +99,7 @@ private static PoolChunkListStats newPoolChunkListStats(PoolChunkListMetric m) { PoolChunkListStats stats = new PoolChunkListStats(); stats.minUsage = m.minUsage(); stats.maxUsage = m.maxUsage(); - stats.chunks = Lists.newArrayList(); + stats.chunks = new ArrayList<>(); m.forEach(chunk -> stats.chunks.add(newPoolChunkStats(chunk))); return stats; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/BookieClientStatsGenerator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/BookieClientStatsGenerator.java index d34c430f19c59..c92693c0df4d8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/BookieClientStatsGenerator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/BookieClientStatsGenerator.java @@ -18,8 +18,9 @@ */ package org.apache.pulsar.broker.stats; -import com.google.common.collect.Maps; +import java.util.HashMap; import java.util.Map; +import java.util.TreeMap; import org.apache.bookkeeper.mledger.proto.PendingBookieOpsStats; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.service.persistent.PersistentTopic; @@ -35,7 +36,7 @@ public class BookieClientStatsGenerator { public BookieClientStatsGenerator(PulsarService pulsar) { this.pulsar = pulsar; - this.nsBookieClientStatsMap = Maps.newTreeMap(); + this.nsBookieClientStatsMap = new TreeMap<>(); } public static Map> generate(PulsarService pulsar) throws Exception { @@ -60,7 +61,7 @@ private Map> generate() throws Except private void put(TopicName topicName, PendingBookieOpsStats bookieOpsStats) { String namespace = topicName.getNamespace(); if (!nsBookieClientStatsMap.containsKey(namespace)) { - Map destBookieClientStatsMap = Maps.newTreeMap(); + Map destBookieClientStatsMap = new HashMap<>(); destBookieClientStatsMap.put(topicName.toString(), bookieOpsStats); nsBookieClientStatsMap.put(namespace, destBookieClientStatsMap); } else { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/BrokerOperabilityMetrics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/BrokerOperabilityMetrics.java index 4fd9e35dd7be3..ac131121d0b79 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/BrokerOperabilityMetrics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/BrokerOperabilityMetrics.java @@ -18,8 +18,8 @@ */ package org.apache.pulsar.broker.stats; -import com.google.common.collect.Maps; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -82,7 +82,7 @@ Metrics getConnectionMetrics() { } Map getDimensionMap(String metricsName) { - Map dimensionMap = Maps.newHashMap(); + Map dimensionMap = new HashMap<>(); dimensionMap.put("broker", brokerName); dimensionMap.put("cluster", localCluster); dimensionMap.put("metric", metricsName); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/MBeanStatsGenerator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/MBeanStatsGenerator.java index 6b5d63bee1b70..905db1c1e64ed 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/MBeanStatsGenerator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/MBeanStatsGenerator.java @@ -18,10 +18,10 @@ */ package org.apache.pulsar.broker.stats; -import com.google.common.collect.Maps; import java.lang.management.ManagementFactory; import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -108,7 +108,7 @@ private Metrics convert(ObjectInstance instance) { * @return */ private Metrics createMetricsByDimension(ObjectName objectName) { - Map dimensionMap = Maps.newHashMap(); + Map dimensionMap = new HashMap<>(); dimensionMap.put("MBean", objectName.toString()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/MetricsGenerator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/MetricsGenerator.java index 5239cfb9acc8d..d8c518e9a5d4a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/MetricsGenerator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/MetricsGenerator.java @@ -18,9 +18,9 @@ */ package org.apache.pulsar.broker.stats; -import com.google.common.collect.Maps; import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.pulsar.broker.PulsarService; @@ -61,7 +61,7 @@ private Collection merge(List metricsCollection) { // map by dimension map -> metrics // since dimension map is unique - Map, Metrics> mergedMetrics = Maps.newHashMap(); + Map, Metrics> mergedMetrics = new HashMap<>(); for (Metrics metrics : metricsCollection) { Map dimensionKey = metrics.getDimensions(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/NamespaceStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/NamespaceStats.java index 09db78d1edd54..d8047f1e13ea8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/NamespaceStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/NamespaceStats.java @@ -19,8 +19,8 @@ package org.apache.pulsar.broker.stats; import static org.apache.bookkeeper.mledger.impl.ManagedLedgerMBeanImpl.ENTRY_LATENCY_BUCKETS_USEC; -import com.google.common.collect.Maps; import java.util.Arrays; +import java.util.HashMap; import java.util.Map; import org.apache.pulsar.common.stats.Metrics; @@ -85,7 +85,7 @@ public void reset() { public Metrics add(String namespace) { - Map dimensionMap = Maps.newHashMap(); + Map dimensionMap = new HashMap<>(); dimensionMap.put("namespace", namespace); Metrics dMetrics = Metrics.create(dimensionMap); dMetrics.put("brk_in_rate", msgRateIn); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ReplicationMetrics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ReplicationMetrics.java index 28f36190428a5..0046218d7c2c2 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ReplicationMetrics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ReplicationMetrics.java @@ -18,9 +18,9 @@ */ package org.apache.pulsar.broker.stats; -import com.google.common.collect.Maps; import io.netty.util.Recycler; import io.netty.util.Recycler.Handle; +import java.util.HashMap; import java.util.Map; import org.apache.pulsar.common.stats.Metrics; @@ -70,7 +70,7 @@ public void recycle() { public Metrics add(String namespace, String local, String remote) { - Map dimensionMap = Maps.newHashMap(); + Map dimensionMap = new HashMap<>(); dimensionMap.put("namespace", namespace); dimensionMap.put("from_cluster", local); dimensionMap.put("to_cluster", remote); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/AbstractMetrics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/AbstractMetrics.java index 610d22c54d8e9..e7e19c8ce3ba3 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/AbstractMetrics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/AbstractMetrics.java @@ -18,8 +18,7 @@ */ package org.apache.pulsar.broker.stats.metrics; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -188,7 +187,7 @@ protected String parseNamespaceFromLedgerName(String ledgerName) { * @return */ protected Metrics createMetricsByDimension(String namespace) { - Map dimensionMap = Maps.newHashMap(); + Map dimensionMap = new HashMap<>(); dimensionMap.put("namespace", namespace); @@ -204,7 +203,7 @@ protected Metrics createMetricsByDimension(String namespace) { * @return */ protected Metrics createMetricsByDimension(String namespace, String fromClusterName, String toClusterName) { - Map dimensionMap = Maps.newHashMap(); + Map dimensionMap = new HashMap<>(); dimensionMap.put("namespace", namespace); dimensionMap.put("from_cluster", fromClusterName); @@ -214,7 +213,7 @@ protected Metrics createMetricsByDimension(String namespace, String fromClusterN } protected void populateAggregationMap(Map> map, String mkey, double value) { - map.computeIfAbsent(mkey, __ -> Lists.newArrayList()).add(value); + map.computeIfAbsent(mkey, __ -> new ArrayList<>()).add(value); } protected void populateAggregationMapWithSum(Map map, String mkey, double value) { @@ -238,11 +237,11 @@ protected void populateMaxMap(Map map, String mkey, long value) { */ protected void populateDimensionMap(Map> ledgersByDimensionMap, Metrics metrics, ManagedLedgerImpl ledger) { - ledgersByDimensionMap.computeIfAbsent(metrics, __ -> Lists.newArrayList()).add(ledger); + ledgersByDimensionMap.computeIfAbsent(metrics, __ -> new ArrayList<>()).add(ledger); } protected void populateDimensionMap(Map> topicsStatsByDimensionMap, Metrics metrics, TopicStats destStats) { - topicsStatsByDimensionMap.computeIfAbsent(metrics, __ -> Lists.newArrayList()).add(destStats); + topicsStatsByDimensionMap.computeIfAbsent(metrics, __ -> new ArrayList<>()).add(destStats); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedCursorMetrics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedCursorMetrics.java index 17fbc270e0d43..1f3b58a2590f4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedCursorMetrics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedCursorMetrics.java @@ -18,8 +18,8 @@ */ package org.apache.pulsar.broker.stats.metrics; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -38,8 +38,8 @@ public class ManagedCursorMetrics extends AbstractMetrics { public ManagedCursorMetrics(PulsarService pulsar) { super(pulsar); - this.metricsCollection = Lists.newArrayList(); - this.dimensionMap = Maps.newHashMap(); + this.metricsCollection = new ArrayList<>(); + this.dimensionMap = new HashMap<>(); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedLedgerCacheMetrics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedLedgerCacheMetrics.java index 1f4181f887fdd..7bbef5379cbae 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedLedgerCacheMetrics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedLedgerCacheMetrics.java @@ -18,11 +18,11 @@ */ package org.apache.pulsar.broker.stats.metrics; -import com.google.common.collect.Lists; import io.netty.buffer.PoolArenaMetric; import io.netty.buffer.PoolChunkListMetric; import io.netty.buffer.PoolChunkMetric; import io.netty.buffer.PooledByteBufAllocator; +import java.util.ArrayList; import java.util.List; import org.apache.bookkeeper.mledger.ManagedLedgerFactoryMXBean; import org.apache.bookkeeper.mledger.impl.cache.RangeEntryCacheImpl; @@ -34,7 +34,7 @@ public class ManagedLedgerCacheMetrics extends AbstractMetrics { private List metrics; public ManagedLedgerCacheMetrics(PulsarService pulsar) { super(pulsar); - this.metrics = Lists.newArrayList(); + this.metrics = new ArrayList<>(); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedLedgerMetrics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedLedgerMetrics.java index 8a9dd4a3da6cd..a8edbf3dd3e01 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedLedgerMetrics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/metrics/ManagedLedgerMetrics.java @@ -18,8 +18,8 @@ */ package org.apache.pulsar.broker.stats.metrics; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -50,9 +50,9 @@ public class ManagedLedgerMetrics extends AbstractMetrics { public ManagedLedgerMetrics(PulsarService pulsar) { super(pulsar); - this.metricsCollection = Lists.newArrayList(); - this.ledgersByDimensionMap = Maps.newHashMap(); - this.tempAggregatedMetricsMap = Maps.newHashMap(); + this.metricsCollection = new ArrayList<>(); + this.ledgersByDimensionMap = new HashMap<>(); + this.tempAggregatedMetricsMap = new HashMap<>(); this.statsPeriodSeconds = ((ManagedLedgerFactoryImpl) pulsar.getManagedLedgerFactory()) .getConfig().getStatsPeriodSeconds(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/SystemTopicClientBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/SystemTopicClientBase.java index cb45077341d22..98465d40f5730 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/SystemTopicClientBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/SystemTopicClientBase.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.broker.systopic; -import com.google.common.collect.Lists; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -86,9 +85,9 @@ public CompletableFuture> newWriterAsync() { @Override public CompletableFuture closeAsync() { List> futures = new ArrayList<>(); - List> tempWriters = Lists.newArrayList(writers); + List> tempWriters = new ArrayList<>(writers); tempWriters.forEach(writer -> futures.add(writer.closeAsync())); - List> tempReaders = Lists.newArrayList(readers); + List> tempReaders = new ArrayList<>(readers); tempReaders.forEach(reader -> futures.add(reader.closeAsync())); writers.clear(); readers.clear(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java index 888106ead3114..b71ad79c48728 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java @@ -39,6 +39,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -363,7 +364,7 @@ public void nonPersistentTopics() throws Exception { publishMessagesOnTopic(nonPersistentTopicName, 10, 0); NonPersistentTopicStats topicStats = (NonPersistentTopicStats) admin.topics().getStats(nonPersistentTopicName); - assertEquals(topicStats.getSubscriptions().keySet(), new TreeSet<>(Lists.newArrayList("my-sub"))); + assertEquals(topicStats.getSubscriptions().keySet(), Set.of("my-sub")); assertEquals(topicStats.getSubscriptions().get("my-sub").getConsumers().size(), 1); assertEquals(topicStats.getSubscriptions().get("my-sub").getMsgDropRate(), 0); assertEquals(topicStats.getPublishers().size(), 0); @@ -372,7 +373,7 @@ public void nonPersistentTopics() throws Exception { pulsar.getAdvertisedAddress() + ":" + pulsar.getConfiguration().getWebServicePort().get()); PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(nonPersistentTopicName, false); - assertEquals(internalStats.cursors.keySet(), new TreeSet<>(Lists.newArrayList("my-sub"))); + assertEquals(internalStats.cursors.keySet(), Set.of("my-sub")); consumer.close(); topicStats = (NonPersistentTopicStats) admin.topics().getStats(nonPersistentTopicName); @@ -545,7 +546,7 @@ public void testResetCursorOnPosition(String namespaceName) throws Exception { Consumer consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("my-sub") .subscriptionType(SubscriptionType.Shared).subscribe(); - assertEquals(admin.topics().getSubscriptions(topicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(topicName), List.of("my-sub")); publishMessagesOnPersistentTopic(topicName, totalProducedMessages, 0); @@ -684,21 +685,21 @@ public void testPeerCluster() throws Exception { admin.clusters().createCluster("us-east2", ClusterData.builder().serviceUrl("http://broker.messaging.east2.example.com:8080").build()); - admin.clusters().updatePeerClusterNames("us-west1", Sets.newLinkedHashSet(Lists.newArrayList("us-west2"))); - assertEquals(admin.clusters().getCluster("us-west1").getPeerClusterNames(), Lists.newArrayList("us-west2")); + admin.clusters().updatePeerClusterNames("us-west1", new LinkedHashSet<>(List.of("us-west2"))); + assertEquals(admin.clusters().getCluster("us-west1").getPeerClusterNames(), Set.of("us-west2")); assertNull(admin.clusters().getCluster("us-west2").getPeerClusterNames()); // update cluster with duplicate peer-clusters in the list - admin.clusters().updatePeerClusterNames("us-west1", Sets.newLinkedHashSet( - Lists.newArrayList("us-west2", "us-east1", "us-west2", "us-east1", "us-west2", "us-east1"))); + admin.clusters().updatePeerClusterNames("us-west1", + new LinkedHashSet<>(List.of("us-west2", "us-east1", "us-west2", "us-east1", "us-west2", "us-east1"))); assertEquals(admin.clusters().getCluster("us-west1").getPeerClusterNames(), - Lists.newArrayList("us-west2", "us-east1")); + List.of("us-west2", "us-east1")); admin.clusters().updatePeerClusterNames("us-west1", null); assertNull(admin.clusters().getCluster("us-west1").getPeerClusterNames()); // Check name validation try { admin.clusters().updatePeerClusterNames("us-west1", - Sets.newLinkedHashSet(Lists.newArrayList("invalid-cluster"))); + new LinkedHashSet<>(List.of("invalid-cluster"))); fail("should have failed"); } catch (PulsarAdminException e) { assertTrue(e instanceof PreconditionFailedException); @@ -706,7 +707,7 @@ public void testPeerCluster() throws Exception { // Cluster itself can't be part of peer-list try { - admin.clusters().updatePeerClusterNames("us-west1", Sets.newLinkedHashSet(Lists.newArrayList("us-west1"))); + admin.clusters().updatePeerClusterNames("us-west1", new LinkedHashSet<>(List.of("us-west1"))); fail("should have failed"); } catch (PulsarAdminException e) { assertTrue(e instanceof PreconditionFailedException); @@ -737,7 +738,7 @@ public void testReplicationPeerCluster() throws Exception { List allClusters = admin.clusters().getClusters(); Collections.sort(allClusters); assertEquals(allClusters, - Lists.newArrayList("test", "us-east1", "us-east2", "us-west1", "us-west2", "us-west3", "us-west4")); + List.of("test", "us-east1", "us-east2", "us-west1", "us-west2", "us-west3", "us-west4")); final String property = "peer-prop"; Set allowedClusters = Set.of("us-west1", "us-west2", "us-west3", "us-west4", "us-east1", @@ -749,9 +750,9 @@ public void testReplicationPeerCluster() throws Exception { admin.namespaces().createNamespace(namespace); admin.clusters().updatePeerClusterNames("us-west1", - Sets.newLinkedHashSet(Lists.newArrayList("us-west2", "us-west3"))); + new LinkedHashSet<>(List.of("us-west2", "us-west3"))); assertEquals(admin.clusters().getCluster("us-west1").getPeerClusterNames(), - Lists.newArrayList("us-west2", "us-west3")); + List.of("us-west2", "us-west3")); // (1) no conflicting peer Set clusterIds = Set.of("us-east1", "us-east2"); @@ -1241,7 +1242,7 @@ public void clustersList() throws PulsarAdminException { .serviceUrl("http://localhost:6650").build()); // Global cluster, if there, should be omitted from the results - assertEquals(admin.clusters().getClusters(), Lists.newArrayList(cluster)); + assertEquals(admin.clusters().getClusters(), List.of(cluster)); } /** * verifies cluster has been set before create topic @@ -1462,7 +1463,7 @@ public void testDeleteTenant() throws Exception { // create namespace String namespace = tenant + "/test-ns-1"; admin.namespaces().createNamespace(namespace, Set.of("test")); - assertEquals(admin.namespaces().getNamespaces(tenant), Lists.newArrayList(namespace)); + assertEquals(admin.namespaces().getNamespaces(tenant), List.of(namespace)); // create topic String topic = namespace + "/test-topic-1"; @@ -1554,7 +1555,7 @@ public void testDeleteNamespace(NamespaceAttr namespaceAttr) throws Exception { // create namespace String namespace = tenant + "/test-ns"; admin.namespaces().createNamespace(namespace, Set.of("test")); - assertEquals(admin.namespaces().getNamespaces(tenant), Lists.newArrayList(namespace)); + assertEquals(admin.namespaces().getNamespaces(tenant), List.of(namespace)); // create topic String topic = namespace + "/test-topic"; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java index 2246e184384e3..c75bb280faaa3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java @@ -244,14 +244,14 @@ public void clusters() throws Exception { ClusterData.builder().serviceUrl("http://broker.messaging.use.example.com:8080").build()); // "test" cluster is part of config-default cluster and it's znode gets created when PulsarService creates // failure-domain znode of this default cluster - assertEquals(admin.clusters().getClusters(), Lists.newArrayList("test", "usw")); + assertEquals(admin.clusters().getClusters(), List.of("test", "usw")); assertEquals(admin.clusters().getCluster("test"), ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build()); admin.clusters().updateCluster("usw", ClusterData.builder().serviceUrl("http://new-broker.messaging.usw.example.com:8080").build()); - assertEquals(admin.clusters().getClusters(), Lists.newArrayList("test", "usw")); + assertEquals(admin.clusters().getClusters(), List.of("test", "usw")); assertEquals(admin.clusters().getCluster("usw"), ClusterData.builder().serviceUrl("http://new-broker.messaging.usw.example.com:8080").build()); @@ -260,7 +260,7 @@ public void clusters() throws Exception { .serviceUrl("http://new-broker.messaging.usw.example.com:8080") .serviceUrlTls("https://new-broker.messaging.usw.example.com:4443") .build()); - assertEquals(admin.clusters().getClusters(), Lists.newArrayList("test", "usw")); + assertEquals(admin.clusters().getClusters(), List.of("test", "usw")); assertEquals(admin.clusters().getCluster("usw"), ClusterData.builder() .serviceUrl("http://new-broker.messaging.usw.example.com:8080") @@ -269,7 +269,7 @@ public void clusters() throws Exception { admin.clusters().deleteCluster("usw"); Awaitility.await() - .untilAsserted(() -> assertEquals(admin.clusters().getClusters(), Lists.newArrayList("test"))); + .untilAsserted(() -> assertEquals(admin.clusters().getClusters(), List.of("test"))); deleteNamespaceGraceFully("prop-xyz/ns1", false); admin.clusters().deleteCluster("test"); @@ -439,7 +439,7 @@ public void clusterNamespaceIsolationPolicies() throws PulsarAdminException { NamespaceIsolationData.Builder nsRegexPolicy = NamespaceIsolationData.builder() .namespaces(Collections.singletonList("other/use/other.*")) - .primary(Lists.newArrayList("prod1-broker[45-46].messaging.use.example.com")) + .primary(List.of("prod1-broker[45-46].messaging.use.example.com")) .autoFailoverPolicy(AutoFailoverPolicyData.builder() .policyType(AutoFailoverPolicyType.min_available) .parameters(parameters) @@ -451,7 +451,7 @@ public void clusterNamespaceIsolationPolicies() throws PulsarAdminException { //Ok } - nsRegexPolicy.primary(Lists.newArrayList("prod1-broker[45-46].messaging.use.example.com", + nsRegexPolicy.primary(List.of("prod1-broker[45-46].messaging.use.example.com", "prod1-broker[4-5].messaging.use.example.com")) .secondary(Collections.singletonList("prod1-broker[45-46].messaging.use.example.com")); try { @@ -714,7 +714,7 @@ public void properties() throws Exception { TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), allowedClusters); admin.tenants().updateTenant("prop-xyz", tenantInfo); - assertEquals(admin.tenants().getTenants(), Lists.newArrayList("prop-xyz")); + assertEquals(admin.tenants().getTenants(), List.of("prop-xyz")); assertEquals(admin.tenants().getTenantInfo("prop-xyz"), tenantInfo); @@ -769,8 +769,8 @@ public void namespaces() throws Exception { // Ok } - assertEquals(admin.namespaces().getNamespaces("prop-xyz"), Lists.newArrayList("prop-xyz/ns1", "prop-xyz/ns2")); - assertEquals(admin.namespaces().getNamespaces("prop-xyz"), Lists.newArrayList("prop-xyz/ns1", "prop-xyz/ns2")); + assertEquals(admin.namespaces().getNamespaces("prop-xyz"), List.of("prop-xyz/ns1", "prop-xyz/ns2")); + assertEquals(admin.namespaces().getNamespaces("prop-xyz"), List.of("prop-xyz/ns1", "prop-xyz/ns2")); try { admin.namespaces().createNamespace("prop-xyz/ns4", Set.of("usc")); @@ -835,7 +835,7 @@ public void namespaces() throws Exception { assertTrue(i < 10); deleteNamespaceGraceFully("prop-xyz/ns1", false); - assertEquals(admin.namespaces().getNamespaces("prop-xyz"), Lists.newArrayList("prop-xyz/ns2")); + assertEquals(admin.namespaces().getNamespaces("prop-xyz"), List.of("prop-xyz/ns2")); try { admin.namespaces().unload("prop-xyz/ns1"); @@ -862,7 +862,7 @@ public void persistentTopics(String topicName) throws Exception { // Force to create a topic publishMessagesOnPersistentTopic("persistent://prop-xyz/ns1/" + topicName, 0); assertEquals(admin.topics().getList("prop-xyz/ns1"), - Lists.newArrayList("persistent://prop-xyz/ns1/" + topicName)); + List.of("persistent://prop-xyz/ns1/" + topicName)); // create consumer and subscription @Cleanup @@ -873,12 +873,12 @@ public void persistentTopics(String topicName) throws Exception { Consumer consumer = client.newConsumer().topic(persistentTopicName).subscriptionName(subName) .subscriptionType(SubscriptionType.Exclusive).subscribe(); - assertEquals(admin.topics().getSubscriptions(persistentTopicName), Lists.newArrayList(subName)); + assertEquals(admin.topics().getSubscriptions(persistentTopicName), List.of(subName)); publishMessagesOnPersistentTopic("persistent://prop-xyz/ns1/" + topicName, 10); TopicStats topicStats = admin.topics().getStats(persistentTopicName); - assertEquals(topicStats.getSubscriptions().keySet(), new TreeSet<>(Lists.newArrayList(subName))); + assertEquals(topicStats.getSubscriptions().keySet(), Set.of(subName)); assertEquals(topicStats.getSubscriptions().get(subName).getConsumers().size(), 1); assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklog(), 10); assertEquals(topicStats.getPublishers().size(), 0); @@ -886,7 +886,7 @@ public void persistentTopics(String topicName) throws Exception { pulsar.getAdvertisedAddress() + ":" + pulsar.getConfiguration().getWebServicePort().get()); PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(persistentTopicName, false); - assertEquals(internalStats.cursors.keySet(), new TreeSet<>(Lists.newArrayList(Codec.encode(subName)))); + assertEquals(internalStats.cursors.keySet(), Set.of(Codec.encode(subName))); List> messages = admin.topics().peekMessages(persistentTopicName, subName, 3); assertEquals(messages.size(), 3); @@ -968,7 +968,7 @@ public void partitionedTopics(String topicType, String topicName) throws Excepti admin.topics().createPartitionedTopic(partitionedTopicName, 4); assertEquals(admin.topics().getPartitionedTopicList(namespace), - Lists.newArrayList(partitionedTopicName)); + List.of(partitionedTopicName)); assertEquals(admin.topics().getPartitionedTopicMetadata(partitionedTopicName).partitions, 4); @@ -1005,7 +1005,7 @@ public void partitionedTopics(String topicType, String topicName) throws Excepti Consumer consumer = client.newConsumer().topic(partitionedTopicName).subscriptionName("my-sub") .subscriptionType(SubscriptionType.Exclusive).subscribe(); - assertEquals(admin.topics().getSubscriptions(partitionedTopicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(partitionedTopicName), List.of("my-sub")); try { if (isPersistent) { @@ -1034,7 +1034,7 @@ public void partitionedTopics(String topicType, String topicName) throws Excepti // TODO: for non-persistent topics, deleteSubscription might throw NotFoundException admin.topics().deleteSubscription(partitionedTopicName, "my-sub-1"); // TODO: for non-persistent topics, getSubscriptions will return a empty set - assertEquals(admin.topics().getSubscriptions(partitionedTopicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(partitionedTopicName), List.of("my-sub")); } Producer producer = client.newProducer(Schema.BYTES) @@ -1056,7 +1056,7 @@ public void partitionedTopics(String topicType, String topicName) throws Excepti topicStats = admin.topics().getPartitionedStats(partitionedTopicName,false); if (isPersistent) { // TODO: for non-persistent topics, the subscription doesn't exist - assertEquals(topicStats.getSubscriptions().keySet(), new TreeSet<>(Lists.newArrayList("my-sub"))); + assertEquals(topicStats.getSubscriptions().keySet(), Set.of("my-sub")); assertEquals(topicStats.getSubscriptions().get("my-sub").getConsumers().size(), 1); assertEquals(topicStats.getSubscriptions().get("my-sub").getMsgBacklog(), 10); } @@ -1160,7 +1160,7 @@ public void testGetPartitionedInternalInfo() throws Exception { assertEquals(admin.topics().getPartitionedTopicList("prop-xyz/ns1"), new ArrayList<>()); final String partitionedTopicName = "persistent://prop-xyz/ns1/" + partitionedTopic; admin.topics().createPartitionedTopic(partitionedTopicName, 2); - assertEquals(admin.topics().getPartitionedTopicList("prop-xyz/ns1"), Lists.newArrayList(partitionedTopicName)); + assertEquals(admin.topics().getPartitionedTopicList("prop-xyz/ns1"), List.of(partitionedTopicName)); assertEquals(admin.topics().getPartitionedTopicMetadata(partitionedTopicName).partitions, 2); String partitionTopic0 = partitionedTopicName + "-partition-0"; @@ -1223,7 +1223,7 @@ public void testGetPartitionedStatsInternal() throws Exception { assertEquals(admin.topics().getPartitionedTopicList("prop-xyz/ns1"), new ArrayList<>()); final String partitionedTopicName = "persistent://prop-xyz/ns1/" + partitionedTopic; admin.topics().createPartitionedTopic(partitionedTopicName, 2); - assertEquals(admin.topics().getPartitionedTopicList("prop-xyz/ns1"), Lists.newArrayList(partitionedTopicName)); + assertEquals(admin.topics().getPartitionedTopicList("prop-xyz/ns1"), List.of(partitionedTopicName)); assertEquals(admin.topics().getPartitionedTopicMetadata(partitionedTopicName).partitions, 2); // create consumer and subscription @@ -1238,10 +1238,10 @@ public void testGetPartitionedStatsInternal() throws Exception { Thread.sleep(1000); PersistentTopicInternalStats internalStats0 = admin.topics().getInternalStats(partitionTopic0, false); - assertEquals(internalStats0.cursors.keySet(), new TreeSet<>(Lists.newArrayList(Codec.encode(subName)))); + assertEquals(internalStats0.cursors.keySet(), Set.of(Codec.encode(subName))); PersistentTopicInternalStats internalStats1 = admin.topics().getInternalStats(partitionTopic1, false); - assertEquals(internalStats1.cursors.keySet(), new TreeSet<>(Lists.newArrayList(Codec.encode(subName)))); + assertEquals(internalStats1.cursors.keySet(), Set.of(Codec.encode(subName))); // expected internal stats PartitionedTopicMetadata partitionedTopicMetadata = new PartitionedTopicMetadata(2); @@ -1294,7 +1294,7 @@ public void testDeleteTenantForcefully() throws Exception { String namespace = tenant + "/my-ns"; admin.namespaces().createNamespace("my-tenant/my-ns", Set.of("test")); - assertEquals(admin.namespaces().getNamespaces(tenant), Lists.newArrayList("my-tenant/my-ns")); + assertEquals(admin.namespaces().getNamespaces(tenant), List.of("my-tenant/my-ns")); // create topic String topic = namespace + "/my-topic"; @@ -1350,7 +1350,7 @@ public void testDeleteNamespaceForcefully() throws Exception { String namespace = tenant + "/my-ns"; admin.namespaces().createNamespace("my-tenant/my-ns", Set.of("test")); - assertEquals(admin.namespaces().getNamespaces(tenant), Lists.newArrayList("my-tenant/my-ns")); + assertEquals(admin.namespaces().getNamespaces(tenant), List.of("my-tenant/my-ns")); // create topic String topic = namespace + "/my-topic"; @@ -1399,7 +1399,7 @@ public void testForceDeleteTenantNotAllowed() throws Exception { String namespace = tenant + "/my-ns"; admin.namespaces().createNamespace("my-tenant/my-ns", Set.of("test")); - assertEquals(admin.namespaces().getNamespaces(tenant), Lists.newArrayList("my-tenant/my-ns")); + assertEquals(admin.namespaces().getNamespaces(tenant), List.of("my-tenant/my-ns")); // create topic String topic = namespace + "/my-topic"; @@ -1436,7 +1436,7 @@ public void testNamespaceSplitBundle() throws Exception { .create(); producer.send("message".getBytes()); publishMessagesOnPersistentTopic(topicName, 0); - assertEquals(admin.topics().getList(namespace), Lists.newArrayList(topicName)); + assertEquals(admin.topics().getList(namespace), List.of(topicName)); try { admin.namespaces().splitNamespaceBundle(namespace, "0x00000000_0xffffffff", true, null); @@ -1458,7 +1458,7 @@ public void testNamespaceSplitBundle() throws Exception { public void testNamespaceSplitBundleWithTopicCountEquallyDivideAlgorithm() throws Exception { // Force to create a topic final String namespace = "prop-xyz/ns1"; - List topicNames = Lists.newArrayList( + List topicNames = List.of( (new StringBuilder("persistent://")).append(namespace).append("/topicCountEquallyDivideAlgorithum-1").toString(), (new StringBuilder("persistent://")).append(namespace).append("/topicCountEquallyDivideAlgorithum-2").toString()); @@ -1931,12 +1931,12 @@ public void testDeleteSubscription() throws Exception { // create a topic and produce some messages publishMessagesOnPersistentTopic(persistentTopicName, 5); assertEquals(admin.topics().getList("prop-xyz/ns1"), - Lists.newArrayList(persistentTopicName)); + List.of(persistentTopicName)); // create the subscription by PulsarAdmin admin.topics().createSubscription(persistentTopicName, subName, MessageId.earliest); - assertEquals(admin.topics().getSubscriptions(persistentTopicName), Lists.newArrayList(subName)); + assertEquals(admin.topics().getSubscriptions(persistentTopicName), List.of(subName)); // create consumer and subscription @Cleanup @@ -1956,7 +1956,7 @@ public void testDeleteSubscription() throws Exception { } // failed to delete the subscription - assertEquals(admin.topics().getSubscriptions(persistentTopicName), Lists.newArrayList(subName)); + assertEquals(admin.topics().getSubscriptions(persistentTopicName), List.of(subName)); // try to delete the subscription with a connected consumer forcefully admin.topics().deleteSubscription(persistentTopicName, subName, true); @@ -2076,9 +2076,9 @@ public void testUnsubscribeOnNamespace(Integer numBundles) throws Exception { admin.namespaces().unsubscribeNamespace("prop-xyz/ns1-bundles", "my-sub"); assertEquals(admin.topics().getSubscriptions("persistent://prop-xyz/ns1-bundles/ds2"), - Lists.newArrayList("my-sub-1", "my-sub-2")); + List.of("my-sub-1", "my-sub-2")); assertEquals(admin.topics().getSubscriptions("persistent://prop-xyz/ns1-bundles/ds1"), - Lists.newArrayList("my-sub-1")); + List.of("my-sub-1")); consumer2.close(); consumer5.close(); @@ -2086,7 +2086,7 @@ public void testUnsubscribeOnNamespace(Integer numBundles) throws Exception { admin.namespaces().unsubscribeNamespace("prop-xyz/ns1-bundles", "my-sub-1"); assertEquals(admin.topics().getSubscriptions("persistent://prop-xyz/ns1-bundles/ds2"), - Lists.newArrayList("my-sub-2")); + List.of("my-sub-2")); assertEquals(admin.topics().getSubscriptions("persistent://prop-xyz/ns1-bundles/ds1"), new ArrayList<>()); } @@ -2218,9 +2218,9 @@ public void testJacksonWithTypeDifferences() throws Exception { @Test public void testBackwardCompatibility() throws Exception { - assertEquals(admin.tenants().getTenants(), Lists.newArrayList("prop-xyz")); + assertEquals(admin.tenants().getTenants(), List.of("prop-xyz")); assertEquals(admin.tenants().getTenantInfo("prop-xyz").getAdminRoles(), - Lists.newArrayList("role1", "role2")); + List.of("role1", "role2")); assertEquals(admin.tenants().getTenantInfo("prop-xyz").getAllowedClusters(), Set.of("test")); // Try to deserialize property JSON with IncompatibleTenantAdmin format @@ -2266,7 +2266,7 @@ public void persistentTopicsCursorReset(String topicName) throws Exception { .subscriptionType(SubscriptionType.Exclusive) .acknowledgmentGroupTime(0, TimeUnit.SECONDS).subscribe(); - assertEquals(admin.topics().getSubscriptions(topicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(topicName), List.of("my-sub")); publishMessagesOnPersistentTopic(topicName, 5, 0, false); @@ -2319,7 +2319,7 @@ public void persistentTopicsCursorResetAfterReset(String topicName) throws Excep .subscriptionType(SubscriptionType.Exclusive) .acknowledgmentGroupTime(0, TimeUnit.SECONDS).subscribe(); - assertEquals(admin.topics().getSubscriptions(topicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(topicName), List.of("my-sub")); publishMessagesOnPersistentTopic(topicName, 5, 0, false); @@ -2475,7 +2475,7 @@ public void partitionedTopicsCursorReset(String topicName) throws Exception { List topics = admin.topics().getList("prop-xyz/ns1"); assertEquals(topics.size(), 4); - assertEquals(admin.topics().getSubscriptions(topicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(topicName), List.of("my-sub")); publishMessagesOnPersistentTopic(topicName, 5, 0, false); Thread.sleep(1); @@ -2517,7 +2517,7 @@ public void persistentTopicsInvalidCursorReset() throws Exception { String topicName = "persistent://prop-xyz/ns1/invalidcursorreset"; // Force to create a topic publishMessagesOnPersistentTopic(topicName, 0); - assertEquals(admin.topics().getList("prop-xyz/ns1"), Lists.newArrayList(topicName)); + assertEquals(admin.topics().getList("prop-xyz/ns1"), List.of(topicName)); // create consumer and subscription @Cleanup @@ -2528,7 +2528,7 @@ public void persistentTopicsInvalidCursorReset() throws Exception { Consumer consumer = client.newConsumer().topic(topicName).subscriptionName("my-sub") .subscriptionType(SubscriptionType.Exclusive).subscribe(); - assertEquals(admin.topics().getSubscriptions(topicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(topicName), List.of("my-sub")); publishMessagesOnPersistentTopic(topicName, 10); @@ -2601,7 +2601,7 @@ public void testPersistentTopicsExpireMessages() throws Exception { // Force to create a topic publishMessagesOnPersistentTopic("persistent://prop-xyz/ns1/ds2", 0); assertEquals(admin.topics().getList("prop-xyz/ns1"), - Lists.newArrayList("persistent://prop-xyz/ns1/ds2")); + List.of("persistent://prop-xyz/ns1/ds2")); // create consumer and subscription @Cleanup @@ -2685,7 +2685,7 @@ public void testPersistentTopicsExpireMessagesInvalidPartitionIndex() throws Exc // Force to create a topic publishMessagesOnPersistentTopic("persistent://prop-xyz/ns1/ds2-partition-2", 0); assertEquals(admin.topics().getList("prop-xyz/ns1"), - Lists.newArrayList("persistent://prop-xyz/ns1/ds2-partition-2")); + List.of("persistent://prop-xyz/ns1/ds2-partition-2")); // create consumer and subscription @Cleanup diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java index d035d7c4290e9..c1bfde505bf61 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java @@ -44,7 +44,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TreeSet; import java.util.concurrent.CompletableFuture; import javax.ws.rs.container.AsyncResponse; import javax.ws.rs.core.Response; @@ -141,7 +140,7 @@ public void setup() throws Exception { namespaces.setServletContext(new MockServletContext()); namespaces.setPulsar(pulsar); doReturn("test").when(namespaces).clientAppId(); - doReturn(new TreeSet<>(Lists.newArrayList("use", "usw", "usc", "global"))).when(namespaces).clusters(); + doReturn(Set.of("use", "usw", "usc", "global")).when(namespaces).clusters(); doNothing().when(namespaces).validateAdminAccessForTenant("my-tenant"); doNothing().when(namespaces).validateAdminAccessForTenant("other-tenant"); doNothing().when(namespaces).validateAdminAccessForTenant("new-property"); @@ -159,7 +158,7 @@ public void setup() throws Exception { persistentTopics.setPulsar(pulsar); doReturn("test").when(persistentTopics).clientAppId(); doReturn("persistent").when(persistentTopics).domain(); - doReturn(new TreeSet<>(Lists.newArrayList("use", "usw", "usc"))).when(persistentTopics).clusters(); + doReturn(Set.of("use", "usw", "usc")).when(persistentTopics).clusters(); doNothing().when(persistentTopics).validateAdminAccessForTenant("my-tenant"); doNothing().when(persistentTopics).validateAdminAccessForTenant("other-tenant"); doNothing().when(persistentTopics).validateAdminAccessForTenant("prop-xyz"); @@ -481,7 +480,7 @@ public void properties() throws Throwable { verify(properties, times(2)).validateSuperUserAccessAsync(); response = asyncRequests(ctx -> properties.getTenants(ctx)); - assertEquals(response, Lists.newArrayList("test-property")); + assertEquals(response, List.of("test-property")); verify(properties, times(3)).validateSuperUserAccessAsync(); response = asyncRequests(ctx -> properties.getTenantAdmin(ctx, "test-property")); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java index fdf6fc562db68..ab568ac5eeccd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java @@ -18,7 +18,14 @@ */ package org.apache.pulsar.broker.admin; -import com.google.common.collect.Lists; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertThrows; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import lombok.Cleanup; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; @@ -33,16 +40,6 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; - -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertThrows; - @Test(groups = "broker-admin") public class AnalyzeBacklogSubscriptionTest extends ProducerConsumerBase { @@ -78,7 +75,7 @@ private void simpleAnalyzeBacklogTest(boolean batching) throws Exception { String subName = "sub-1"; admin.topics().createSubscription(topic, subName, MessageId.latest); - assertEquals(admin.topics().getSubscriptions(topic), Lists.newArrayList("sub-1")); + assertEquals(admin.topics().getSubscriptions(topic), List.of("sub-1")); verifyBacklog(topic, subName, 0, 0); @@ -178,7 +175,7 @@ public void partitionedTopicNotAllowed() throws Exception { String subName = "sub-1"; admin.topics().createPartitionedTopic(topic, 2); admin.topics().createSubscription(topic, subName, MessageId.latest); - assertEquals(admin.topics().getSubscriptions(topic), Lists.newArrayList("sub-1")); + assertEquals(admin.topics().getSubscriptions(topic), List.of("sub-1")); // you cannot use this feature on a partitioned topic assertThrows(PulsarAdminException.NotAllowedException.class, () -> { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/CreateSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/CreateSubscriptionTest.java index ada26ee6c56c9..b5d5ffc046705 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/CreateSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/CreateSubscriptionTest.java @@ -22,26 +22,24 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - -import com.google.common.collect.Lists; +import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.TimeUnit; -import java.io.IOException; import javax.ws.rs.ClientErrorException; import javax.ws.rs.core.Response.Status; - import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; -import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.http.HttpResponse; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpPut; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.admin.PulsarAdminException.ConflictException; @@ -49,8 +47,8 @@ import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionMode; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.impl.MessageIdImpl; @@ -95,7 +93,7 @@ public void createSubscriptionSingleTopic() throws Exception { Status.CONFLICT.getStatusCode()); } - assertEquals(admin.topics().getSubscriptions(topic), Lists.newArrayList("sub-1")); + assertEquals(admin.topics().getSubscriptions(topic), List.of("sub-1")); Producer p1 = pulsarClient.newProducer().topic(topic).create(); p1.send("test-1".getBytes()); @@ -131,7 +129,7 @@ public void createSubscriptionOnPartitionedTopic() throws Exception { for (int i = 0; i < 10; i++) { assertEquals(admin.topics().getSubscriptions(TopicName.get(topic).getPartition(i).toString()), - Lists.newArrayList("sub-1")); + List.of("sub-1")); } } @@ -157,7 +155,7 @@ public void createSubscriptionOnPartitionedTopicWithPartialFailure() throws Exce for (int i = 0; i < 10; i++) { assertEquals( admin.topics().getSubscriptions(TopicName.get(topic).getPartition(i).toString()), - Lists.newArrayList("sub-1")); + List.of("sub-1")); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/IncrementPartitionsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/IncrementPartitionsTest.java index 15bb586607799..1758e13d735b5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/IncrementPartitionsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/IncrementPartitionsTest.java @@ -19,8 +19,8 @@ package org.apache.pulsar.broker.admin; import static org.testng.Assert.assertEquals; -import com.google.common.collect.Lists; import java.util.Collections; +import java.util.List; import java.util.Set; import lombok.Cleanup; import org.apache.pulsar.broker.BrokerTestUtil; @@ -100,7 +100,7 @@ public void testIncrementPartitionsOfTopic() throws Exception { assertEquals(admin.topics().getPartitionedTopicMetadata(partitionedTopicName).partitions, 20); assertEquals(admin.topics().getSubscriptions( - TopicName.get(partitionedTopicName).getPartition(15).toString()), Lists.newArrayList("sub-1")); + TopicName.get(partitionedTopicName).getPartition(15).toString()), List.of("sub-1")); consumer.close(); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/MaxUnackedMessagesTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/MaxUnackedMessagesTest.java index 87e4f703bbfa5..921aab3de0700 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/MaxUnackedMessagesTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/MaxUnackedMessagesTest.java @@ -47,7 +47,6 @@ import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import org.testng.collections.Lists; @Test(groups = "broker-admin") public class MaxUnackedMessagesTest extends ProducerConsumerBase { @@ -103,7 +102,7 @@ public void testMaxUnackedMessagesOnSubscription() throws Exception { Consumer consumer1 = consumerBuilder.subscribe(); Consumer consumer2 = consumerBuilder.subscribe(); Consumer consumer3 = consumerBuilder.subscribe(); - List> consumers = Lists.newArrayList(consumer1, consumer2, consumer3); + List> consumers = List.of(consumer1, consumer2, consumer3); waitCacheInit(topicName); admin.topics().setMaxUnackedMessagesOnSubscription(topicName, unackMsgAllowed); Awaitility.await().untilAsserted(() diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java index 6c3fba38642d1..6fd874154fad9 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java @@ -33,19 +33,18 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.google.common.collect.Lists; import java.lang.reflect.Field; import java.net.URI; import java.net.URL; import java.time.Duration; import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.TreeSet; import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; @@ -171,7 +170,7 @@ public void setup() throws Exception { doReturn("test").when(namespaces).clientAppId(); doReturn(null).when(namespaces).originalPrincipal(); doReturn(null).when(namespaces).clientAuthData(); - doReturn(new TreeSet<>(Lists.newArrayList("use", "usw", "usc", "global"))).when(namespaces).clusters(); + doReturn(Set.of("use", "usw", "usc", "global")).when(namespaces).clusters(); admin.clusters().createCluster("use", ClusterData.builder().serviceUrl("http://broker-use.com:8080").build()); admin.clusters().createCluster("usw", ClusterData.builder().serviceUrl("http://broker-usw.com:8080").build()); @@ -269,13 +268,16 @@ public void testCreateNamespaces() throws Exception { @Test public void testGetNamespaces() throws Exception { - List expectedList = Lists.newArrayList(this.testLocalNamespaces.get(0).toString(), + List expectedList = Arrays.asList(this.testLocalNamespaces.get(0).toString(), this.testLocalNamespaces.get(1).toString()); expectedList.sort(null); assertEquals(namespaces.getNamespacesForCluster(this.testTenant, this.testLocalCluster), expectedList); - expectedList = Lists.newArrayList(this.testLocalNamespaces.get(0).toString(), - this.testLocalNamespaces.get(1).toString(), this.testLocalNamespaces.get(2).toString(), - this.testGlobalNamespaces.get(0).toString()); + expectedList = Arrays.asList( + this.testLocalNamespaces.get(0).toString(), + this.testLocalNamespaces.get(1).toString(), + this.testLocalNamespaces.get(2).toString(), + this.testGlobalNamespaces.get(0).toString() + ); expectedList.sort(null); AsyncResponse response = mock(AsyncResponse.class); namespaces.getTenantNamespaces(response, this.testTenant); @@ -510,18 +512,18 @@ public void testGlobalNamespaceReplicationConfiguration() throws Exception { asyncRequests(rsp -> namespaces.setNamespaceReplicationClusters(rsp, this.testGlobalNamespaces.get(0).getTenant(), this.testGlobalNamespaces.get(0).getCluster(), this.testGlobalNamespaces.get(0).getLocalName(), - Lists.newArrayList("use", "usw"))); + List.of("use", "usw"))); repCluster = (Set) asyncRequests(rsp -> namespaces.getNamespaceReplicationClusters(rsp, this.testGlobalNamespaces.get(0).getTenant(), this.testGlobalNamespaces.get(0).getCluster(), this.testGlobalNamespaces.get(0).getLocalName())); - assertEquals(repCluster, Lists.newArrayList("use", "usw")); + assertEquals(repCluster, List.of("use", "usw")); try { asyncRequests(rsp -> namespaces.setNamespaceReplicationClusters(rsp, this.testGlobalNamespaces.get(0).getTenant(), this.testGlobalNamespaces.get(0).getCluster(), this.testGlobalNamespaces.get(0).getLocalName(), - Lists.newArrayList("use", "invalid-cluster"))); + List.of("use", "invalid-cluster"))); fail("should have failed"); } catch (RestException e) { assertEquals(e.getResponse().getStatus(), Status.FORBIDDEN.getStatusCode()); @@ -531,7 +533,7 @@ public void testGlobalNamespaceReplicationConfiguration() throws Exception { asyncRequests(rsp -> namespaces.setNamespaceReplicationClusters(rsp, this.testGlobalNamespaces.get(0).getTenant(), this.testGlobalNamespaces.get(0).getCluster(), this.testGlobalNamespaces.get(0).getLocalName(), - Lists.newArrayList("use", "global"))); + List.of("use", "global"))); fail("should have failed"); } catch (RestException e) { // Ok, global should not be allowed in the list of replication clusters @@ -541,7 +543,7 @@ public void testGlobalNamespaceReplicationConfiguration() throws Exception { try { asyncRequests(rsp -> namespaces.setNamespaceReplicationClusters(rsp, this.testTenant, "global", this.testGlobalNamespaces.get(0).getLocalName(), - Lists.newArrayList("use", "invalid-cluster"))); + List.of("use", "invalid-cluster"))); fail("should have failed"); } catch (RestException e) { // Ok, invalid-cluster is an invalid cluster id @@ -553,7 +555,7 @@ public void testGlobalNamespaceReplicationConfiguration() throws Exception { try { asyncRequests(rsp -> namespaces.setNamespaceReplicationClusters(rsp, this.testTenant, "global", - this.testGlobalNamespaces.get(0).getLocalName(), Lists.newArrayList("use", "usw"))); + this.testGlobalNamespaces.get(0).getLocalName(), List.of("use", "usw"))); fail("should have failed"); } catch (RestException e) { // Ok, usw was not configured in the list of allowed clusters @@ -566,7 +568,7 @@ public void testGlobalNamespaceReplicationConfiguration() throws Exception { try { asyncRequests(rsp -> namespaces.setNamespaceReplicationClusters(rsp, this.testTenant, "global", - this.testGlobalNamespaces.get(0).getLocalName(), Lists.newArrayList("use"))); + this.testGlobalNamespaces.get(0).getLocalName(), List.of("use"))); fail("should have failed"); } catch (RestException e) { assertEquals(e.getResponse().getStatus(), Status.INTERNAL_SERVER_ERROR.getStatusCode()); @@ -588,7 +590,7 @@ public void testGlobalNamespaceReplicationConfiguration() throws Exception { store.invalidateAll(); try { asyncRequests(rsp -> namespaces.setNamespaceReplicationClusters(rsp, this.testTenant, "global", - this.testGlobalNamespaces.get(0).getLocalName(), Lists.newArrayList("use"))); + this.testGlobalNamespaces.get(0).getLocalName(), List.of("use"))); fail("should have failed"); } catch (RestException e) { assertEquals(e.getResponse().getStatus(), 500); @@ -604,7 +606,7 @@ public void testGlobalNamespaceReplicationConfiguration() throws Exception { try { asyncRequests(rsp -> namespaces.setNamespaceReplicationClusters(rsp, this.testTenant, - "global", "non-existing-ns", Lists.newArrayList("use"))); + "global", "non-existing-ns", List.of("use"))); fail("should have failed"); } catch (RestException e) { assertEquals(e.getResponse().getStatus(), Status.NOT_FOUND.getStatusCode()); @@ -637,7 +639,7 @@ public void testGlobalNamespaceReplicationConfiguration() throws Exception { try { asyncRequests(rsp -> namespaces.setNamespaceReplicationClusters(rsp, this.testTenant, this.testLocalCluster, - this.testLocalNamespaces.get(0).getLocalName(), Lists.newArrayList("use"))); + this.testLocalNamespaces.get(0).getLocalName(), List.of("use"))); fail("should have failed"); } catch (RestException e) { assertEquals(e.getResponse().getStatus(), Status.PRECONDITION_FAILED.getStatusCode()); @@ -647,7 +649,7 @@ public void testGlobalNamespaceReplicationConfiguration() throws Exception { @Test public void testGetBundles() throws Exception { - List boundaries = Lists.newArrayList("0x00000000", "0x80000000", "0xffffffff"); + List boundaries = List.of("0x00000000", "0x80000000", "0xffffffff"); BundlesData bundle = BundlesData.builder() .boundaries(boundaries) .numBundles(boundaries.size() - 1) @@ -783,8 +785,10 @@ public void testDeleteNamespaces() throws Exception { responseCaptor = ArgumentCaptor.forClass(Response.class); verify(response, timeout(5000).times(1)).resume(responseCaptor.capture()); assertEquals(responseCaptor.getValue().getStatus(), Status.NO_CONTENT.getStatusCode()); - List nsList = Lists.newArrayList(this.testLocalNamespaces.get(1).toString(), - this.testLocalNamespaces.get(2).toString()); + List nsList = Arrays.asList( + this.testLocalNamespaces.get(1).toString(), + this.testLocalNamespaces.get(2).toString() + ); nsList.sort(null); assertEquals(asyncRequests(ctx -> namespaces.getTenantNamespaces(ctx, this.testTenant)), nsList); @@ -803,7 +807,7 @@ public void testDeleteNamespaces() throws Exception { public void testDeleteNamespaceWithBundles() throws Exception { URL localWebServiceUrl = new URL(pulsar.getSafeWebServiceAddress()); String bundledNsLocal = "test-delete-namespace-with-bundles"; - List boundaries = Lists.newArrayList("0x00000000", "0x80000000", "0xffffffff"); + List boundaries = List.of("0x00000000", "0x80000000", "0xffffffff"); BundlesData bundleData = BundlesData.builder() .boundaries(boundaries) .numBundles(boundaries.size() - 1) @@ -901,7 +905,7 @@ public void testUnloadNamespaces() throws Exception { public void testSplitBundles() throws Exception { URL localWebServiceUrl = new URL(pulsar.getSafeWebServiceAddress()); String bundledNsLocal = "test-bundled-namespace-1"; - List boundaries = Lists.newArrayList("0x00000000", "0xffffffff"); + List boundaries = List.of("0x00000000", "0xffffffff"); BundlesData bundleData = BundlesData.builder() .boundaries(boundaries) .numBundles(boundaries.size() - 1) @@ -940,7 +944,7 @@ public void testSplitBundles() throws Exception { public void testSplitBundleWithUnDividedRange() throws Exception { URL localWebServiceUrl = new URL(pulsar.getSafeWebServiceAddress()); String bundledNsLocal = "test-bundled-namespace-1"; - List boundaries = Lists.newArrayList("0x00000000", "0x08375b1a", "0x08375b1b", "0xffffffff"); + List boundaries = List.of("0x00000000", "0x08375b1a", "0x08375b1b", "0xffffffff"); BundlesData bundleData = BundlesData.builder() .boundaries(boundaries) .numBundles(boundaries.size() - 1) @@ -967,7 +971,7 @@ public void testSplitBundleWithUnDividedRange() throws Exception { public void testUnloadNamespaceWithBundles() throws Exception { URL localWebServiceUrl = new URL(pulsar.getSafeWebServiceAddress()); String bundledNsLocal = "test-bundled-namespace-1"; - List boundaries = Lists.newArrayList("0x00000000", "0x80000000", "0xffffffff"); + List boundaries = List.of("0x00000000", "0x80000000", "0xffffffff"); BundlesData bundleData = BundlesData.builder() .boundaries(boundaries) .numBundles(boundaries.size() - 1) @@ -1030,7 +1034,7 @@ public void testRetention() throws Exception { try { URL localWebServiceUrl = new URL(pulsar.getSafeWebServiceAddress()); String bundledNsLocal = "test-bundled-namespace-1"; - List boundaries = Lists.newArrayList("0x00000000", "0xffffffff"); + List boundaries = List.of("0x00000000", "0xffffffff"); BundlesData bundleData = BundlesData.builder() .boundaries(boundaries) .numBundles(boundaries.size() - 1) @@ -1101,7 +1105,7 @@ public void testPersistenceUnauthorized() throws Exception { public void testValidateTopicOwnership() throws Exception { URL localWebServiceUrl = new URL(pulsar.getSafeWebServiceAddress()); String bundledNsLocal = "test-bundled-namespace-1"; - List boundaries = Lists.newArrayList("0x00000000", "0xffffffff"); + List boundaries = List.of("0x00000000", "0xffffffff"); BundlesData bundleData = BundlesData.builder() .boundaries(boundaries) .numBundles(boundaries.size() - 1) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesV2Test.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesV2Test.java index 556f9f3ac0544..91b0fdbf079a4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesV2Test.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesV2Test.java @@ -25,13 +25,11 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.google.common.collect.Lists; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.TreeSet; import javax.ws.rs.core.Response; import javax.ws.rs.core.UriInfo; import org.apache.pulsar.broker.admin.v2.Namespaces; @@ -97,7 +95,7 @@ public void setup() throws Exception { doReturn("test").when(namespaces).clientAppId(); doReturn(null).when(namespaces).originalPrincipal(); doReturn(null).when(namespaces).clientAuthData(); - doReturn(new TreeSet<>(Lists.newArrayList("use", "usw", "usc", "global"))).when(namespaces).clusters(); + doReturn(Set.of("use", "usw", "usc", "global")).when(namespaces).clusters(); admin.clusters().createCluster("use", ClusterData.builder().serviceUrl("http://broker-use.com:8080").build()); admin.clusters().createCluster("usw", ClusterData.builder().serviceUrl("http://broker-usw.com:8080").build()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java index 3a9bd21245bf0..5aef3644f2db4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java @@ -30,7 +30,6 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import com.google.common.collect.Lists; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.HashMap; @@ -212,7 +211,7 @@ public void testGetSubscriptions() { response = mock(AsyncResponse.class); persistentTopics.getSubscriptions(response, testTenant, testNamespace, testLocalTopicName + "-partition-0", true); - verify(response, timeout(5000).times(1)).resume(Lists.newArrayList("test")); + verify(response, timeout(5000).times(1)).resume(List.of("test")); // 6) Delete the subscription response = mock(AsyncResponse.class); @@ -240,7 +239,7 @@ public void testGetSubscriptions() { response = mock(AsyncResponse.class); persistentTopics.getSubscriptions(response, testTenant, testNamespace, testLocalTopicName + "-partition-1", true); - verify(response, timeout(5000).times(1)).resume(Lists.newArrayList("test")); + verify(response, timeout(5000).times(1)).resume(List.of("test")); // response = mock(AsyncResponse.class); persistentTopics.getSubscriptions(response, testTenant, testNamespace, testLocalTopicName + "-partition-0", @@ -249,7 +248,7 @@ public void testGetSubscriptions() { // response = mock(AsyncResponse.class); persistentTopics.getSubscriptions(response, testTenant, testNamespace, testLocalTopicName, true); - verify(response, timeout(5000).times(1)).resume(Lists.newArrayList("test")); + verify(response, timeout(5000).times(1)).resume(List.of("test")); // 9) Delete the partitioned topic response = mock(AsyncResponse.class); @@ -603,7 +602,7 @@ public void testCreatePartitionedTopicHavingNonPartitionTopicWithPartitionSuffix when(pulsar.getPulsarResources().getTopicResources() .listPersistentTopicsAsync(NamespaceName.get("my-tenant/my-namespace"))) - .thenReturn(CompletableFuture.completedFuture(Lists.newArrayList( + .thenReturn(CompletableFuture.completedFuture(List.of( "persistent://my-tenant/my-namespace/" + nonPartitionTopicName1, "persistent://my-tenant/my-namespace/" + nonPartitionTopicName2 ))); @@ -1188,7 +1187,7 @@ public void testExamineMessageMetadata() throws Exception { producer.newMessage() .keyBytes("partition123".getBytes()) .orderingKey(new byte[]{0}) - .replicationClusters(Lists.newArrayList("a", "b")) + .replicationClusters(List.of("a", "b")) .sequenceId(112233) .value("data") .send(); @@ -1204,7 +1203,7 @@ public void testExamineMessageMetadata() throws Exception { Assert.assertEquals("partition123".getBytes(), message.getKeyBytes()); Assert.assertTrue(message.hasBase64EncodedKey()); //test arrays - Assert.assertEquals(Lists.newArrayList("a", "b"), message.getReplicateTo()); + Assert.assertEquals(List.of("a", "b"), message.getReplicateTo()); //test string Assert.assertEquals(producer.getProducerName(), message.getProducerName()); //test enum diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java index 0cf5ed79ea743..22670babcc6f6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java @@ -88,7 +88,6 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import org.testng.collections.Lists; @Slf4j @Test(groups = "broker-admin") @@ -3072,22 +3071,22 @@ public void testShadowTopics() throws Exception { //shadow topic must exist Assert.expectThrows(PulsarAdminException.PreconditionFailedException.class, ()-> - admin.topics().setShadowTopics(sourceTopic, Lists.newArrayList(shadowTopic1))); + admin.topics().setShadowTopics(sourceTopic, List.of(shadowTopic1))); //shadow topic must be persistent topic Assert.expectThrows(PulsarAdminException.PreconditionFailedException.class, ()-> admin.topics().setShadowTopics(sourceTopic, - Lists.newArrayList("non-persistent://" + myNamespace + "/shadow-test1-" + UUID.randomUUID()))); + List.of("non-persistent://" + myNamespace + "/shadow-test1-" + UUID.randomUUID()))); pulsarClient.newProducer().topic(shadowTopic1).create().close(); pulsarClient.newProducer().topic(shadowTopic2).create().close(); - admin.topics().setShadowTopics(sourceTopic, Lists.newArrayList(shadowTopic1)); + admin.topics().setShadowTopics(sourceTopic, List.of(shadowTopic1)); Awaitility.await().untilAsserted(() -> Assert.assertEquals(admin.topics().getShadowTopics(sourceTopic), - Lists.newArrayList(shadowTopic1))); - admin.topics().setShadowTopics(sourceTopic, Lists.newArrayList(shadowTopic1, shadowTopic2)); + List.of(shadowTopic1))); + admin.topics().setShadowTopics(sourceTopic, List.of(shadowTopic1, shadowTopic2)); Awaitility.await().untilAsserted(() -> Assert.assertEquals(admin.topics().getShadowTopics(sourceTopic), - Lists.newArrayList(shadowTopic1, shadowTopic2))); + List.of(shadowTopic1, shadowTopic2))); admin.topics().removeShadowTopics(sourceTopic); Awaitility.await().untilAsserted(() -> assertNull(admin.topics().getShadowTopics(sourceTopic))); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApi2Test.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApi2Test.java index f339416c2b9d7..7dce82a3691d0 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApi2Test.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApi2Test.java @@ -25,7 +25,6 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.google.common.collect.Lists; import com.google.common.collect.Sets; import java.net.URL; import java.util.HashMap; @@ -33,7 +32,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TreeSet; import java.util.concurrent.TimeUnit; import lombok.Cleanup; import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; @@ -151,7 +149,7 @@ public void testIncrementPartitionsOfTopic() throws Exception { PulsarClient client = PulsarClient.builder().serviceUrl(pulsarUrl.toString()).build(); Consumer consumer1 = client.newConsumer().topic(partitionedTopicName).subscriptionName(subName1) .subscriptionType(SubscriptionType.Shared).subscribe(); - assertEquals(admin.topics().getSubscriptions(partitionedTopicName), Lists.newArrayList(subName1)); + assertEquals(admin.topics().getSubscriptions(partitionedTopicName), List.of(subName1)); Consumer consumer2 = client.newConsumer().topic(partitionedTopicName).subscriptionName(subName2) .subscriptionType(SubscriptionType.Shared).subscribe(); assertEquals(new HashSet<>(admin.topics().getSubscriptions(partitionedTopicName)), @@ -189,7 +187,7 @@ public void testIncrementPartitionsOfTopic() throws Exception { // test cumulative stats for partitioned topic PartitionedTopicStats topicStats = admin.topics().getPartitionedStats(partitionedTopicName, false); - assertEquals(topicStats.getSubscriptions().keySet(), new TreeSet<>(Lists.newArrayList(subName1, subName2))); + assertEquals(topicStats.getSubscriptions().keySet(), Set.of(subName1, subName2)); assertEquals(topicStats.getSubscriptions().get(subName2).getConsumers().size(), 1); assertEquals(topicStats.getSubscriptions().get(subName2).getMsgBacklog(), totalMessages); assertEquals(topicStats.getPublishers().size(), 1); @@ -242,12 +240,12 @@ public void nonPersistentTopics() throws Exception { publishMessagesOnTopic("non-persistent://prop-xyz/use/ns1/" + topicName, 10, 0); NonPersistentTopicStats topicStats = admin.nonPersistentTopics().getStats(persistentTopicName); - assertEquals(topicStats.getSubscriptions().keySet(), new TreeSet<>(Lists.newArrayList("my-sub"))); + assertEquals(topicStats.getSubscriptions().keySet(), Set.of("my-sub")); assertEquals(topicStats.getSubscriptions().get("my-sub").getConsumers().size(), 1); assertEquals(topicStats.getPublishers().size(), 0); PersistentTopicInternalStats internalStats = admin.nonPersistentTopics().getInternalStats(persistentTopicName); - assertEquals(internalStats.cursors.keySet(), new TreeSet<>(Lists.newArrayList("my-sub"))); + assertEquals(internalStats.cursors.keySet(), Set.of("my-sub")); consumer.close(); client.close(); @@ -435,7 +433,7 @@ public void testResetCursorOnPosition(String namespaceName) throws Exception { Consumer consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("my-sub") .subscriptionType(SubscriptionType.Shared).subscribe(); - assertEquals(admin.topics().getSubscriptions(topicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(topicName), List.of("my-sub")); publishMessagesOnPersistentTopic(topicName, totalProducedMessages, 0); @@ -555,21 +553,21 @@ public void testPeerCluster() throws Exception { admin.clusters().createCluster("us-east2", ClusterData.builder().serviceUrl("http://broker.messaging.east2.example.com:8080").build()); - admin.clusters().updatePeerClusterNames("us-west1", Sets.newLinkedHashSet(Lists.newArrayList("us-west2"))); - assertEquals(admin.clusters().getCluster("us-west1").getPeerClusterNames(), Lists.newArrayList("us-west2")); + admin.clusters().updatePeerClusterNames("us-west1", Sets.newLinkedHashSet(List.of("us-west2"))); + assertEquals(admin.clusters().getCluster("us-west1").getPeerClusterNames(), List.of("us-west2")); assertNull(admin.clusters().getCluster("us-west2").getPeerClusterNames()); // update cluster with duplicate peer-clusters in the list admin.clusters().updatePeerClusterNames("us-west1", Sets.newLinkedHashSet( - Lists.newArrayList("us-west2", "us-east1", "us-west2", "us-east1", "us-west2", "us-east1"))); + List.of("us-west2", "us-east1", "us-west2", "us-east1", "us-west2", "us-east1"))); assertEquals(admin.clusters().getCluster("us-west1").getPeerClusterNames(), - Lists.newArrayList("us-west2", "us-east1")); + List.of("us-west2", "us-east1")); admin.clusters().updatePeerClusterNames("us-west1", null); assertNull(admin.clusters().getCluster("us-west1").getPeerClusterNames()); // Check name validation try { admin.clusters().updatePeerClusterNames("us-west1", - Sets.newLinkedHashSet(Lists.newArrayList("invalid-cluster"))); + Sets.newLinkedHashSet(List.of("invalid-cluster"))); fail("should have failed"); } catch (PulsarAdminException e) { assertTrue(e instanceof PreconditionFailedException); @@ -577,7 +575,7 @@ public void testPeerCluster() throws Exception { // Cluster itselft can't be part of peer-list try { - admin.clusters().updatePeerClusterNames("us-west1", Sets.newLinkedHashSet(Lists.newArrayList("us-west1"))); + admin.clusters().updatePeerClusterNames("us-west1", Sets.newLinkedHashSet(List.of("us-west1"))); fail("should have failed"); } catch (PulsarAdminException e) { assertTrue(e instanceof PreconditionFailedException); @@ -615,9 +613,9 @@ public void testReplicationPeerCluster() throws Exception { admin.namespaces().createNamespace(namespace); admin.clusters().updatePeerClusterNames("us-west1", - Sets.newLinkedHashSet(Lists.newArrayList("us-west2", "us-west3"))); + Sets.newLinkedHashSet(List.of("us-west2", "us-west3"))); assertEquals(admin.clusters().getCluster("us-west1").getPeerClusterNames(), - Lists.newArrayList("us-west2", "us-west3")); + List.of("us-west2", "us-west3")); // (1) no conflicting peer Set clusterIds = Set.of("us-east1", "us-east2"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java index 9d90933ee9605..c0863cd73337a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java @@ -203,14 +203,14 @@ public void clusters() throws Exception { ClusterData.builder().serviceUrl("http://broker.messaging.use.example.com:8080").build()); // "test" cluster is part of config-default cluster and it's znode gets created when PulsarService creates // failure-domain znode of this default cluster - assertEquals(admin.clusters().getClusters(), Lists.newArrayList("use", "usw")); + assertEquals(admin.clusters().getClusters(), List.of("use", "usw")); assertEquals(admin.clusters().getCluster("use"), ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build()); admin.clusters().updateCluster("usw", ClusterData.builder().serviceUrl("http://new-broker.messaging.use.example.com:8080").build()); - assertEquals(admin.clusters().getClusters(), Lists.newArrayList("use", "usw")); + assertEquals(admin.clusters().getClusters(), List.of("use", "usw")); assertEquals(admin.clusters().getCluster("usw"), ClusterData.builder().serviceUrl("http://new-broker.messaging.use.example.com:8080").build()); @@ -219,7 +219,7 @@ public void clusters() throws Exception { .serviceUrl("http://new-broker.messaging.usw.example.com:8080") .serviceUrlTls("https://new-broker.messaging.usw.example.com:4443") .build()); - assertEquals(admin.clusters().getClusters(), Lists.newArrayList("use", "usw")); + assertEquals(admin.clusters().getClusters(), List.of("use", "usw")); assertEquals(admin.clusters().getCluster("usw"), ClusterData.builder() .serviceUrl("http://new-broker.messaging.usw.example.com:8080") @@ -229,7 +229,7 @@ public void clusters() throws Exception { admin.clusters().deleteCluster("usw"); Thread.sleep(300); - assertEquals(admin.clusters().getClusters(), Lists.newArrayList("use")); + assertEquals(admin.clusters().getClusters(), List.of("use")); admin.namespaces().deleteNamespace("prop-xyz/use/ns1"); admin.clusters().deleteCluster("use"); @@ -588,7 +588,7 @@ public void properties() throws PulsarAdminException { TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), allowedClusters); admin.tenants().updateTenant("prop-xyz", tenantInfo); - assertEquals(admin.tenants().getTenants(), Lists.newArrayList("prop-xyz")); + assertEquals(admin.tenants().getTenants(), List.of("prop-xyz")); assertEquals(admin.tenants().getTenantInfo("prop-xyz"), tenantInfo); @@ -635,9 +635,9 @@ public void namespaces() throws Exception { } assertEquals(admin.namespaces().getNamespaces("prop-xyz"), - Lists.newArrayList("prop-xyz/use/ns1", "prop-xyz/use/ns2")); + List.of("prop-xyz/use/ns1", "prop-xyz/use/ns2")); assertEquals(admin.namespaces().getNamespaces("prop-xyz", "use"), - Lists.newArrayList("prop-xyz/use/ns1", "prop-xyz/use/ns2")); + List.of("prop-xyz/use/ns1", "prop-xyz/use/ns2")); try { admin.namespaces().createNamespace("prop-xyz/usc/ns1"); @@ -693,7 +693,7 @@ public void namespaces() throws Exception { assertTrue(i < 10); admin.namespaces().deleteNamespace("prop-xyz/use/ns1"); - assertEquals(admin.namespaces().getNamespaces("prop-xyz", "use"), Lists.newArrayList("prop-xyz/use/ns2")); + assertEquals(admin.namespaces().getNamespaces("prop-xyz", "use"), List.of("prop-xyz/use/ns2")); try { admin.namespaces().unload("prop-xyz/use/ns1"); @@ -719,7 +719,7 @@ public void persistentTopics(String topicName) throws Exception { // Force to create a topic publishMessagesOnPersistentTopic("persistent://prop-xyz/use/ns1/" + topicName, 0); assertEquals(admin.topics().getList("prop-xyz/use/ns1"), - Lists.newArrayList("persistent://prop-xyz/use/ns1/" + topicName)); + List.of("persistent://prop-xyz/use/ns1/" + topicName)); // create consumer and subscription @Cleanup @@ -730,18 +730,18 @@ public void persistentTopics(String topicName) throws Exception { Consumer consumer = client.newConsumer().topic(persistentTopicName).subscriptionName("my-sub") .subscriptionType(SubscriptionType.Exclusive).subscribe(); - assertEquals(admin.topics().getSubscriptions(persistentTopicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(persistentTopicName), List.of("my-sub")); publishMessagesOnPersistentTopic("persistent://prop-xyz/use/ns1/" + topicName, 10); TopicStats topicStats = admin.topics().getStats(persistentTopicName); - assertEquals(topicStats.getSubscriptions().keySet(), new TreeSet<>(Lists.newArrayList("my-sub"))); + assertEquals(topicStats.getSubscriptions().keySet(), new TreeSet<>(List.of("my-sub"))); assertEquals(topicStats.getSubscriptions().get("my-sub").getConsumers().size(), 1); assertEquals(topicStats.getSubscriptions().get("my-sub").getMsgBacklog(), 10); assertEquals(topicStats.getPublishers().size(), 0); PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(persistentTopicName, false); - assertEquals(internalStats.cursors.keySet(), new TreeSet<>(Lists.newArrayList("my-sub"))); + assertEquals(internalStats.cursors.keySet(), new TreeSet<>(List.of("my-sub"))); List> messages = admin.topics().peekMessages(persistentTopicName, "my-sub", 3); assertEquals(messages.size(), 3); @@ -797,7 +797,7 @@ public void partitionedTopics(String topicName) throws Exception { final String partitionedTopicName = "persistent://prop-xyz/use/ns1/" + topicName; admin.topics().createPartitionedTopic(partitionedTopicName, 4); assertEquals(admin.topics().getPartitionedTopicList("prop-xyz/use/ns1"), - Lists.newArrayList(partitionedTopicName)); + List.of(partitionedTopicName)); assertEquals(admin.topics().getPartitionedTopicMetadata(partitionedTopicName).partitions, 4); @@ -819,7 +819,7 @@ public void partitionedTopics(String topicName) throws Exception { Consumer consumer = client.newConsumer().topic(partitionedTopicName).subscriptionName("my-sub") .subscriptionType(SubscriptionType.Exclusive).subscribe(); - assertEquals(admin.topics().getSubscriptions(partitionedTopicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(partitionedTopicName), List.of("my-sub")); try { admin.topics().deleteSubscription(partitionedTopicName, "my-sub"); @@ -841,7 +841,7 @@ public void partitionedTopics(String topicName) throws Exception { consumer1.close(); admin.topics().deleteSubscription(partitionedTopicName, "my-sub-1"); - assertEquals(admin.topics().getSubscriptions(partitionedTopicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(partitionedTopicName), List.of("my-sub")); Producer producer = client.newProducer(Schema.BYTES) .topic(partitionedTopicName) @@ -860,7 +860,7 @@ public void partitionedTopics(String topicName) throws Exception { // test cumulative stats for partitioned topic PartitionedTopicStats topicStats = admin.topics().getPartitionedStats(partitionedTopicName, false); - assertEquals(topicStats.getSubscriptions().keySet(), new TreeSet<>(Lists.newArrayList("my-sub"))); + assertEquals(topicStats.getSubscriptions().keySet(), new TreeSet<>(List.of("my-sub"))); assertEquals(topicStats.getSubscriptions().get("my-sub").getConsumers().size(), 1); assertEquals(topicStats.getSubscriptions().get("my-sub").getMsgBacklog(), 10); assertEquals(topicStats.getPublishers().size(), 1); @@ -974,7 +974,7 @@ public void testNamespaceSplitBundle() throws Exception { .create(); producer.send("message".getBytes()); publishMessagesOnPersistentTopic(topicName, 0); - assertEquals(admin.topics().getList(namespace), Lists.newArrayList(topicName)); + assertEquals(admin.topics().getList(namespace), List.of(topicName)); try { admin.namespaces().splitNamespaceBundle(namespace, "0x00000000_0xffffffff", true, null); @@ -1004,7 +1004,7 @@ public void testNamespaceSplitBundleConcurrent() throws Exception { .create(); producer.send("message".getBytes()); publishMessagesOnPersistentTopic(topicName, 0); - assertEquals(admin.topics().getList(namespace), Lists.newArrayList(topicName)); + assertEquals(admin.topics().getList(namespace), List.of(topicName)); try { admin.namespaces().splitNamespaceBundle(namespace, "0x00000000_0xffffffff", false, null); @@ -1113,13 +1113,13 @@ public void testNamespaceUnloadBundle() throws Exception { // Force to create a topic publishMessagesOnPersistentTopic("persistent://prop-xyz/use/ns1/ds2", 0); assertEquals(admin.topics().getList("prop-xyz/use/ns1"), - Lists.newArrayList("persistent://prop-xyz/use/ns1/ds2")); + List.of("persistent://prop-xyz/use/ns1/ds2")); // create consumer and subscription Consumer consumer = pulsarClient.newConsumer().topic("persistent://prop-xyz/use/ns1/ds2") .subscriptionName("my-sub").subscribe(); assertEquals(admin.topics().getSubscriptions("persistent://prop-xyz/use/ns1/ds2"), - Lists.newArrayList("my-sub")); + List.of("my-sub")); // Create producer Producer producer = pulsarClient.newProducer(Schema.BYTES) @@ -1174,13 +1174,13 @@ public void testNamespaceBundleUnload(Integer numBundles) throws Exception { // Force to create a topic publishMessagesOnPersistentTopic("persistent://prop-xyz/use/ns1-bundles/ds2", 0); assertEquals(admin.topics().getList("prop-xyz/use/ns1-bundles"), - Lists.newArrayList("persistent://prop-xyz/use/ns1-bundles/ds2")); + List.of("persistent://prop-xyz/use/ns1-bundles/ds2")); // create consumer and subscription Consumer consumer = pulsarClient.newConsumer().topic("persistent://prop-xyz/use/ns1-bundles/ds2") .subscriptionName("my-sub").subscribe(); assertEquals(admin.topics().getSubscriptions("persistent://prop-xyz/use/ns1-bundles/ds2"), - Lists.newArrayList("my-sub")); + List.of("my-sub")); // Create producer Producer producer = pulsarClient.newProducer(Schema.BYTES) @@ -1326,9 +1326,9 @@ public void testUnsubscribeOnNamespace(Integer numBundles) throws Exception { admin.namespaces().unsubscribeNamespace("prop-xyz/use/ns1-bundles", "my-sub"); assertEquals(admin.topics().getSubscriptions("persistent://prop-xyz/use/ns1-bundles/ds2"), - Lists.newArrayList("my-sub-1", "my-sub-2")); + List.of("my-sub-1", "my-sub-2")); assertEquals(admin.topics().getSubscriptions("persistent://prop-xyz/use/ns1-bundles/ds1"), - Lists.newArrayList("my-sub-1")); + List.of("my-sub-1")); consumer2.close(); consumer5.close(); @@ -1336,7 +1336,7 @@ public void testUnsubscribeOnNamespace(Integer numBundles) throws Exception { admin.namespaces().unsubscribeNamespace("prop-xyz/use/ns1-bundles", "my-sub-1"); assertEquals(admin.topics().getSubscriptions("persistent://prop-xyz/use/ns1-bundles/ds2"), - Lists.newArrayList("my-sub-2")); + List.of("my-sub-2")); assertEquals(admin.topics().getSubscriptions("persistent://prop-xyz/use/ns1-bundles/ds1"), new ArrayList<>()); } @@ -1456,9 +1456,9 @@ public void testJacksonWithTypeDifferencies() throws Exception { @Test public void testBackwardCompatiblity() throws Exception { - assertEquals(admin.tenants().getTenants(), Lists.newArrayList("prop-xyz")); + assertEquals(admin.tenants().getTenants(), List.of("prop-xyz")); assertEquals(admin.tenants().getTenantInfo("prop-xyz").getAdminRoles(), - Lists.newArrayList("role1", "role2")); + List.of("role1", "role2")); assertEquals(admin.tenants().getTenantInfo("prop-xyz").getAllowedClusters(), Set.of("use")); // Try to deserialize property JSON with IncompatiblePropertyAdmin format @@ -1490,7 +1490,7 @@ public void persistentTopicsCursorReset(String topicName) throws Exception { .subscriptionType(SubscriptionType.Exclusive) .acknowledgmentGroupTime(0, TimeUnit.SECONDS).subscribe(); - assertEquals(admin.topics().getSubscriptions(topicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(topicName), List.of("my-sub")); publishMessagesOnPersistentTopic(topicName, 5, 0); @@ -1543,7 +1543,7 @@ public void persistentTopicsCursorResetAfterReset(String topicName) throws Excep .subscriptionType(SubscriptionType.Exclusive) .acknowledgmentGroupTime(0, TimeUnit.SECONDS).subscribe(); - assertEquals(admin.topics().getSubscriptions(topicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(topicName), List.of("my-sub")); publishMessagesOnPersistentTopic(topicName, 5, 0); @@ -1619,7 +1619,7 @@ public void partitionedTopicsCursorReset(String topicName) throws Exception { List topics = admin.topics().getList("prop-xyz/use/ns1"); assertEquals(topics.size(), 4); - assertEquals(admin.topics().getSubscriptions(topicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(topicName), List.of("my-sub")); publishMessagesOnPersistentTopic(topicName, 5, 0); Thread.sleep(1); @@ -1661,7 +1661,7 @@ public void persistentTopicsInvalidCursorReset() throws Exception { String topicName = "persistent://prop-xyz/use/ns1/invalidcursorreset"; // Force to create a topic publishMessagesOnPersistentTopic(topicName, 0); - assertEquals(admin.topics().getList("prop-xyz/use/ns1"), Lists.newArrayList(topicName)); + assertEquals(admin.topics().getList("prop-xyz/use/ns1"), List.of(topicName)); // create consumer and subscription @Cleanup @@ -1672,7 +1672,7 @@ public void persistentTopicsInvalidCursorReset() throws Exception { Consumer consumer = client.newConsumer().topic(topicName).subscriptionName("my-sub") .subscriptionType(SubscriptionType.Exclusive).subscribe(); - assertEquals(admin.topics().getSubscriptions(topicName), Lists.newArrayList("my-sub")); + assertEquals(admin.topics().getSubscriptions(topicName), List.of("my-sub")); publishMessagesOnPersistentTopic(topicName, 10); @@ -1746,7 +1746,7 @@ public void testPersistentTopicsExpireMessages() throws Exception { // Force to create a topic publishMessagesOnPersistentTopic("persistent://prop-xyz/use/ns1/ds2", 0); assertEquals(admin.topics().getList("prop-xyz/use/ns1"), - Lists.newArrayList("persistent://prop-xyz/use/ns1/ds2")); + List.of("persistent://prop-xyz/use/ns1/ds2")); // create consumer and subscription @Cleanup diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java index 2945049e3f546..d0243ecdef1dd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java @@ -19,26 +19,22 @@ package org.apache.pulsar.broker.auth; import static org.testng.Assert.fail; - -import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.testng.annotations.AfterClass; - -import org.testng.annotations.BeforeClass; -import org.testng.annotations.Test; - +import com.google.common.collect.Sets; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException.NotAuthorizedException; import org.apache.pulsar.client.admin.PulsarAdminException.ServerSideErrorException; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException.AuthenticationException; import org.apache.pulsar.client.api.PulsarClientException.AuthorizationException; -import org.apache.pulsar.client.api.Producer; -import com.google.common.collect.Sets; - +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; /** * This test doesn't test much in and off itself. diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthenticationServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthenticationServiceTest.java index 15feac95380a6..24beb8f426a97 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthenticationServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthenticationServiceTest.java @@ -21,13 +21,11 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; - +import com.google.common.collect.Sets; import java.io.IOException; import java.util.Set; - import javax.naming.AuthenticationException; import javax.servlet.http.HttpServletRequest; - import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; import org.apache.pulsar.broker.authentication.AuthenticationProvider; @@ -35,8 +33,6 @@ import org.testng.Assert; import org.testng.annotations.Test; -import com.google.common.collect.Sets; - public class AuthenticationServiceTest { private static final String s_authentication_success = "authenticated"; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthorizationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthorizationTest.java index 39a91f72dc742..b533f8ce8b221 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthorizationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthorizationTest.java @@ -22,6 +22,7 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; +import com.google.common.collect.Sets; import java.util.EnumSet; import org.apache.pulsar.broker.authorization.AuthorizationService; import org.apache.pulsar.client.admin.PulsarAdmin; @@ -30,13 +31,12 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.AuthAction; import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.SubscriptionAuthMode; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import com.google.common.collect.Sets; @Test(groups = "flaky") public class AuthorizationTest extends MockedPulsarServiceBaseTest { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthenticationProvider.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthenticationProvider.java index a15a433f26eca..3ef9b5ad2d63f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthenticationProvider.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthenticationProvider.java @@ -19,13 +19,10 @@ package org.apache.pulsar.broker.auth; import java.io.IOException; - import javax.naming.AuthenticationException; - import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; import org.apache.pulsar.broker.authentication.AuthenticationProvider; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/OverloadShedderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/OverloadShedderTest.java index 84b24fc3121e3..e1edb8a9462b4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/OverloadShedderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/OverloadShedderTest.java @@ -21,17 +21,15 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; - -import com.google.common.collect.Lists; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; - +import java.util.List; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.loadbalance.LoadData; -import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; -import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.BrokerData; import org.apache.pulsar.policies.data.loadbalancer.BundleData; +import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; +import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.TimeAverageMessageData; import org.testng.annotations.Test; @@ -148,7 +146,7 @@ public void testBrokerWithMultipleBundles() { Multimap bundlesToUnload = os.findBundlesForUnloading(loadData, conf); assertFalse(bundlesToUnload.isEmpty()); - assertEquals(bundlesToUnload.get("broker-1"), Lists.newArrayList("bundle-10", "bundle-9")); + assertEquals(bundlesToUnload.get("broker-1"), List.of("bundle-10", "bundle-9")); } @Test @@ -187,7 +185,7 @@ public void testFilterRecentlyUnloaded() { Multimap bundlesToUnload = os.findBundlesForUnloading(loadData, conf); assertFalse(bundlesToUnload.isEmpty()); - assertEquals(bundlesToUnload.get("broker-1"), Lists.newArrayList("bundle-8", "bundle-7")); + assertEquals(bundlesToUnload.get("broker-1"), List.of("bundle-8", "bundle-7")); } @Test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ThresholdShedderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ThresholdShedderTest.java index 8461f8ce74c43..adc7d2f9c7370 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ThresholdShedderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ThresholdShedderTest.java @@ -21,16 +21,16 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; -import com.google.common.collect.Lists; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; +import java.util.List; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.loadbalance.LoadData; -import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; -import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.BrokerData; import org.apache.pulsar.policies.data.loadbalancer.BundleData; +import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; +import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.TimeAverageMessageData; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -161,7 +161,7 @@ public void testBrokerWithMultipleBundles() { Multimap bundlesToUnload = thresholdShedder.findBundlesForUnloading(loadData, conf); assertFalse(bundlesToUnload.isEmpty()); assertEquals(bundlesToUnload.get("broker-1"), - Lists.newArrayList("bundle-10", "bundle-9", "bundle-8")); + List.of("bundle-10", "bundle-9", "bundle-8")); } @Test @@ -209,7 +209,7 @@ public void testFilterRecentlyUnloaded() { Multimap bundlesToUnload = thresholdShedder.findBundlesForUnloading(loadData, conf); assertFalse(bundlesToUnload.isEmpty()); assertEquals(bundlesToUnload.get("broker-1"), - Lists.newArrayList("bundle-8", "bundle-7", "bundle-6", "bundle-5")); + List.of("bundle-8", "bundle-7", "bundle-6", "bundle-5")); } @Test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java index cb806d3ccfe5d..ca8408c468d9d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java @@ -31,11 +31,14 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - +import com.github.benmanes.caffeine.cache.AsyncLoadingCache; +import com.google.common.hash.Hashing; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.net.URI; +import java.util.ArrayList; import java.util.EnumSet; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -45,7 +48,6 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; - import lombok.Cleanup; import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.commons.collections4.CollectionUtils; @@ -64,13 +66,12 @@ import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.PulsarClient; - -import org.apache.pulsar.common.naming.ServiceUnitId; -import org.apache.pulsar.common.naming.NamespaceBundleSplitAlgorithm; -import org.apache.pulsar.common.naming.NamespaceBundleFactory; import org.apache.pulsar.common.naming.NamespaceBundle; +import org.apache.pulsar.common.naming.NamespaceBundleFactory; +import org.apache.pulsar.common.naming.NamespaceBundleSplitAlgorithm; import org.apache.pulsar.common.naming.NamespaceBundles; import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.ServiceUnitId; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.BundlesData; import org.apache.pulsar.common.policies.data.LocalPolicies; @@ -82,10 +83,10 @@ import org.apache.pulsar.metadata.api.Notification; import org.apache.pulsar.metadata.api.extended.CreateOption; import org.apache.pulsar.policies.data.loadbalancer.AdvertisedListener; +import org.apache.pulsar.policies.data.loadbalancer.BundleData; import org.apache.pulsar.policies.data.loadbalancer.LoadReport; import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats; -import org.apache.pulsar.policies.data.loadbalancer.BundleData; import org.awaitility.Awaitility; import org.mockito.stubbing.Answer; import org.slf4j.Logger; @@ -95,11 +96,6 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import com.github.benmanes.caffeine.cache.AsyncLoadingCache; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.hash.Hashing; - @Test(groups = "flaky") public class NamespaceServiceTest extends BrokerTestBase { @@ -186,7 +182,7 @@ public void testSplitMapWithRefreshedStatMap() throws Exception { OwnershipCache MockOwnershipCache = spy(pulsar.getNamespaceService().getOwnershipCache()); ManagedLedger ledger = mock(ManagedLedger.class); - when(ledger.getCursors()).thenReturn(Lists.newArrayList()); + when(ledger.getCursors()).thenReturn(new ArrayList<>()); doReturn(CompletableFuture.completedFuture(null)).when(MockOwnershipCache).disableOwnership(any(NamespaceBundle.class)); Field ownership = NamespaceService.class.getDeclaredField("ownershipCache"); @@ -240,7 +236,7 @@ public void testIsServiceUnitDisabled() throws Exception { OwnershipCache MockOwnershipCache = spy(pulsar.getNamespaceService().getOwnershipCache()); ManagedLedger ledger = mock(ManagedLedger.class); - when(ledger.getCursors()).thenReturn(Lists.newArrayList()); + when(ledger.getCursors()).thenReturn(new ArrayList<>()); doReturn(CompletableFuture.completedFuture(null)).when(MockOwnershipCache).disableOwnership(any(NamespaceBundle.class)); Field ownership = NamespaceService.class.getDeclaredField("ownershipCache"); @@ -263,7 +259,7 @@ public void testRemoveOwnershipNamespaceBundle() throws Exception { OwnershipCache ownershipCache = spy(pulsar.getNamespaceService().getOwnershipCache()); ManagedLedger ledger = mock(ManagedLedger.class); - when(ledger.getCursors()).thenReturn(Lists.newArrayList()); + when(ledger.getCursors()).thenReturn(new ArrayList<>()); doReturn(CompletableFuture.completedFuture(null)).when(ownershipCache).disableOwnership(any(NamespaceBundle.class)); Field ownership = NamespaceService.class.getDeclaredField("ownershipCache"); @@ -386,7 +382,7 @@ public void testCreateLookupResult() throws Exception { final String listenerUrl = "pulsar://localhost:7000"; final String listenerUrlTls = "pulsar://localhost:8000"; final String listener = "listenerName"; - Map advertisedListeners = Maps.newHashMap(); + Map advertisedListeners = new HashMap<>(); advertisedListeners.put(listener, AdvertisedListener.builder().brokerServiceUrl(new URI(listenerUrl)).brokerServiceUrlTls(new URI(listenerUrlTls)).build()); LocalBrokerData ld = new LocalBrokerData(null, null, candidateBroker, null, advertisedListeners); URI uri = new URI(candidateBroker); @@ -570,7 +566,7 @@ public void testSplitLargestBundle() throws Exception { NamespaceName nsname = NamespaceName.get(namespace); NamespaceBundles bundles = namespaceService.getNamespaceBundleFactory().getBundles(nsname); - Map topicCount = Maps.newHashMap(); + Map topicCount = new HashMap<>(); int maxTopics = 0; String maxBundle = null; for (int i = 0; i < totalTopics; i++) { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnershipCacheTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnershipCacheTest.java index 6168c61bb20b4..db29d16c9fff3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnershipCacheTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnershipCacheTest.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.broker.namespace; -import static com.google.common.base.Preconditions.checkNotNull; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; @@ -34,6 +33,7 @@ import com.google.common.collect.Range; import com.google.common.hash.Hashing; import java.util.EnumSet; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -250,7 +250,7 @@ public void testGetOwnedServiceUnit() throws Exception { assertFalse(cache.getOwnerAsync(testBundle).get().isPresent()); try { - checkNotNull(cache.getOwnedBundle(testBundle)); + Objects.requireNonNull(cache.getOwnedBundle(testBundle)); fail("Should have failed"); } catch (NullPointerException npe) { // OK for not owned namespace @@ -264,7 +264,7 @@ public void testGetOwnedServiceUnit() throws Exception { "https://localhost:4443", false)), Optional.of(-1L), EnumSet.of(CreateOption.Ephemeral)).join(); try { - checkNotNull(cache.getOwnedBundle(testBundle)); + Objects.requireNonNull(cache.getOwnedBundle(testBundle)); fail("Should have failed"); } catch (NullPointerException npe) { // OK for not owned namespace @@ -284,7 +284,7 @@ public void testGetOwnedServiceUnit() throws Exception { assertEquals(data1.getNativeUrlTls(), "pulsar://otherhost:8884"); assertFalse(data1.isDisabled()); try { - checkNotNull(cache.getOwnedBundle(testBundle)); + Objects.requireNonNull(cache.getOwnedBundle(testBundle)); fail("Should have failed"); } catch (NullPointerException npe) { // OK for not owned namespace diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractBaseDispatcherTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractBaseDispatcherTest.java index cc1558fed2745..b129995a8cc47 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractBaseDispatcherTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractBaseDispatcherTest.java @@ -24,11 +24,11 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; -import com.google.common.collect.ImmutableMap; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.impl.EntryImpl; @@ -87,7 +87,7 @@ public void testFilterEntriesForConsumerOfEntryFilter() throws Exception { EntryFilterWithClassLoader mockFilter = mock(EntryFilterWithClassLoader.class); when(mockFilter.filterEntry(any(Entry.class), any(FilterContext.class))).thenReturn( EntryFilter.FilterResult.REJECT); - ImmutableMap entryFilters = ImmutableMap.of("key", mockFilter); + Map entryFilters = Map.of("key", mockFilter); when(mockTopic.getEntryFilters()).thenReturn(entryFilters); this.helper = new AbstractBaseDispatcherTestHelper(this.subscriptionMock, this.svcConfig); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java index b100c3a977ab2..fccc7abc66854 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java @@ -22,11 +22,11 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.beust.jcommander.internal.Maps; import com.google.common.collect.Sets; import java.net.URL; import java.time.Duration; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Optional; import java.util.UUID; @@ -183,7 +183,7 @@ private void rolloverStats() { @Test public void testBacklogQuotaWithReader() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitSize(10 * 1024) @@ -268,7 +268,7 @@ private TopicStats getTopicStats(String topic1) throws PulsarAdminException { @Test public void testTriggerBacklogQuotaSizeWithReader() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitSize(10 * 1024) @@ -340,7 +340,7 @@ public void testTriggerBacklogQuotaSizeWithReader() throws Exception { @Test public void testTriggerBacklogTimeQuotaWithReader() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitSize(10 * 1024) @@ -407,7 +407,7 @@ public void testTriggerBacklogTimeQuotaWithReader() throws Exception { @Test public void testConsumerBacklogEvictionSizeQuota() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitSize(10 * 1024) @@ -442,7 +442,7 @@ public void testConsumerBacklogEvictionSizeQuota() throws Exception { @Test public void testConsumerBacklogEvictionTimeQuotaPrecise() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitTime(TIME_TO_CHECK_BACKLOG_QUOTA) @@ -484,7 +484,7 @@ public void testConsumerBacklogEvictionTimeQuotaPrecise() throws Exception { @Test(timeOut = 60000) public void testConsumerBacklogEvictionTimeQuota() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitTime(TIME_TO_CHECK_BACKLOG_QUOTA) @@ -530,7 +530,7 @@ public void testConsumerBacklogEvictionTimeQuota() throws Exception { @Test public void testConsumerBacklogEvictionTimeQuotaWithEmptyLedger() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitTime(TIME_TO_CHECK_BACKLOG_QUOTA) @@ -577,7 +577,7 @@ public void testConsumerBacklogEvictionTimeQuotaWithEmptyLedger() throws Excepti @Test public void testConsumerBacklogEvictionWithAckSizeQuota() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitSize(10 * 1024) @@ -612,7 +612,7 @@ public void testConsumerBacklogEvictionWithAckSizeQuota() throws Exception { @Test public void testConsumerBacklogEvictionWithAckTimeQuotaPrecise() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitTime(TIME_TO_CHECK_BACKLOG_QUOTA) @@ -675,7 +675,7 @@ private Producer createProducer(PulsarClient client, String topic) @Test public void testConsumerBacklogEvictionWithAckTimeQuota() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); @Cleanup PulsarClient client = PulsarClient.builder().serviceUrl(adminUrl.toString()).build(); @@ -745,7 +745,7 @@ public void testConsumerBacklogEvictionWithAckTimeQuota() throws Exception { @Test public void testConcurrentAckAndEviction() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitSize(10 * 1024) @@ -816,7 +816,7 @@ public void testConcurrentAckAndEviction() throws Exception { @Test public void testNoEviction() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitSize(10 * 1024) @@ -880,7 +880,7 @@ public void testNoEviction() throws Exception { @Test public void testEvictionMulti() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/ns-quota", BacklogQuota.builder() .limitSize(15 * 1024) @@ -982,7 +982,7 @@ public void testEvictionMulti() throws Exception { @Test public void testAheadProducerOnHold() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/quotahold"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/quotahold", BacklogQuota.builder() .limitSize(10 * 1024) @@ -1024,7 +1024,7 @@ public void testAheadProducerOnHold() throws Exception { @Test public void testAheadProducerOnHoldTimeout() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/quotahold"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/quotahold", BacklogQuota.builder() .limitSize(10 * 1024) @@ -1062,7 +1062,7 @@ public void testAheadProducerOnHoldTimeout() throws Exception { @Test public void testProducerException() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/quotahold"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/quotahold", BacklogQuota.builder() .limitSize(10 * 1024) @@ -1102,7 +1102,7 @@ public void testProducerException() throws Exception { @Test public void testProducerExceptionAndThenUnblockSizeQuota() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/quotahold"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/quotahold", BacklogQuota.builder() .limitSize(10 * 1024) @@ -1166,7 +1166,7 @@ public void testProducerExceptionAndThenUnblockSizeQuota() throws Exception { @Test public void testProducerExceptionAndThenUnblockTimeQuotaPrecise() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/quotahold"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/quotahold", BacklogQuota.builder() .limitTime(TIME_TO_CHECK_BACKLOG_QUOTA) @@ -1232,7 +1232,7 @@ public void testProducerExceptionAndThenUnblockTimeQuotaPrecise() throws Excepti @Test public void testProducerExceptionAndThenUnblockTimeQuota() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/quotahold"), - Maps.newHashMap()); + new HashMap<>()); admin.namespaces().setBacklogQuota("prop/quotahold", BacklogQuota.builder() .limitTime(TIME_TO_CHECK_BACKLOG_QUOTA) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageTest.java index e9c5032063f11..fe2551287c8c6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageTest.java @@ -22,11 +22,8 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; - -import com.google.common.collect.Lists; - -import java.util.Arrays; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Random; @@ -39,7 +36,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; - import lombok.Cleanup; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; @@ -130,7 +126,7 @@ public void testSimpleBatchProducerWithFixedBatchSize(CompressionType compressio .batcherBuilder(builder) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("my-message-" + i).getBytes(); sendFutureList.add(producer.sendAsync(message)); @@ -179,7 +175,7 @@ public void testSimpleBatchProducerWithFixedBatchBytes(CompressionType compressi .batcherBuilder(builder) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("my-message-" + i).getBytes(); sendFutureList.add(producer.sendAsync(message)); @@ -223,7 +219,7 @@ public void testSimpleBatchProducerWithFixedBatchTime(CompressionType compressio .create(); Random random = new Random(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { // put a random sleep from 0 to 3 ms Thread.sleep(random.nextInt(4)); @@ -259,7 +255,7 @@ public void testSimpleBatchProducerWithFixedBatchSizeAndTime(CompressionType com .compressionType(compressionType).enableBatching(true).create(); Random random = new Random(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { // put a random sleep from 0 to 3 ms Thread.sleep(random.nextInt(4)); @@ -298,7 +294,7 @@ public void testBatchProducerWithLargeMessage(CompressionType compressionType, B .batcherBuilder(builder) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { if (i == 25) { // send a large message @@ -361,7 +357,7 @@ public void testSimpleBatchProducerConsumer(CompressionType compressionType, Bat .batcherBuilder(builder) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("msg-" + i).getBytes(); sendFutureList.add(producer.sendAsync(message)); @@ -509,7 +505,7 @@ public void testSimpleBatchProducerConsumer1kMessages(BatcherBuilder builder) th .batcherBuilder(builder) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("msg-" + i).getBytes(); sendFutureList.add(producer.sendAsync(message)); @@ -571,7 +567,7 @@ public void testOutOfOrderAcksForBatchMessage() throws Exception { .batchingMaxPublishDelay(5, TimeUnit.SECONDS).batchingMaxMessages(numMsgsInBatch).enableBatching(true) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("msg-" + i).getBytes(); sendFutureList.add(producer.sendAsync(message)); @@ -640,7 +636,7 @@ public void testNonBatchCumulativeAckAfterBatchPublish(BatcherBuilder builder) t // create producer to publish non batch messages Producer noBatchProducer = pulsarClient.newProducer().topic(topicName).create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("msg-" + i).getBytes(); sendFutureList.add(producer.sendAsync(message)); @@ -697,7 +693,7 @@ public void testBatchAndNonBatchCumulativeAcks(BatcherBuilder builder) throws Ex .messageRoutingMode(MessageRoutingMode.SinglePartition) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs / 2; i++) { byte[] message = ("msg-" + i).getBytes(); sendFutureList.add(producer.sendAsync(message)); @@ -760,7 +756,7 @@ public void testConcurrentBatchMessageAck(BatcherBuilder builder) throws Excepti .batcherBuilder(builder) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("my-message-" + i).getBytes(); sendFutureList.add(producer.sendAsync(message)); @@ -814,7 +810,7 @@ public void testOrderingOfKeyBasedBatchMessageContainer() throws PulsarClientExc .subscriptionName(subscriptionName) .subscriptionType(SubscriptionType.Key_Shared) .subscribe(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); String[] keys = new String[]{"key-1", "key-2", "key-3"}; for (int i = 0; i < 10; i++) { byte[] message = ("my-message-" + i).getBytes(); @@ -912,7 +908,7 @@ public void testRetrieveSequenceIdGenerated(BatcherBuilder builder) throws Excep .batcherBuilder(builder) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("my-message-" + i).getBytes(); sendFutureList.add(producer.sendAsync(message)); @@ -944,7 +940,7 @@ public void testRetrieveSequenceIdSpecify(BatcherBuilder builder) throws Excepti .batcherBuilder(builder) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("my-message-" + i).getBytes(); sendFutureList.add(producer.newMessage().sequenceId(i + 100).value(message).sendAsync()); @@ -1008,7 +1004,7 @@ public void testBatchMessageDispatchingAccordingToPermits() throws Exception { Producer producer = pulsarClient.newProducer().topic(topicName).batchingMaxMessages(batchMessages) .batchingMaxPublishDelay(500, TimeUnit.MILLISECONDS).enableBatching(true).create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("my-message-" + i).getBytes(); sendFutureList.add(producer.newMessage().value(message).sendAsync()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java index 731c65dd33d33..c15c93c0e9a52 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java @@ -18,7 +18,13 @@ */ package org.apache.pulsar.broker.service; -import com.google.common.collect.Lists; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import lombok.Cleanup; import lombok.SneakyThrows; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers; @@ -33,13 +39,6 @@ import org.awaitility.Awaitility; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertNull; @Test(groups = "broker") public class BatchMessageWithBatchIndexLevelTest extends BatchMessageTest { @@ -78,7 +77,7 @@ public void testBatchMessageAck() { .enableBatching(true) .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("batch-message-" + i).getBytes(); sendFutureList.add(producer.newMessage().value(message).sendAsync()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java index e575154a743e5..62469fdac9bfb 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java @@ -23,9 +23,10 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.fail; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.Maps; import com.google.common.collect.Sets; import java.lang.reflect.Method; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -390,7 +391,7 @@ public void testSetRackInfoAndAffinityGroupDuringProduce() throws Exception { long ledgerId = lInfo.getLedgerId(); CompletableFuture> ledgerMetaFuture = ledgerManager.readLedgerMetadata(ledgerId); LedgerMetadata ledgerMetadata = ledgerMetaFuture.get().getValue(); - Set ledgerBookies = Sets.newHashSet(); + Set ledgerBookies = new HashSet<>(); ledgerBookies.addAll(ledgerMetadata.getAllEnsembles().values().iterator().next()); assertEquals(ledgerBookies.size(), isolatedBookies.size()); ledgerBookies.removeAll(isolatedBookies); @@ -736,7 +737,7 @@ public void testDeleteIsolationGroup() throws Exception { setDefaultIsolationGroup(brokerBookkeeperClientIsolationGroups, zkClient, defaultBookies); // primary group empty - setDefaultIsolationGroup(tenantNamespaceIsolationGroupsPrimary, zkClient, Sets.newHashSet()); + setDefaultIsolationGroup(tenantNamespaceIsolationGroupsPrimary, zkClient, new HashSet<>()); setDefaultIsolationGroup(tenantNamespaceIsolationGroupsSecondary, zkClient, isolatedBookies); ServiceConfiguration config = new ServiceConfiguration(); @@ -811,7 +812,7 @@ private void assertAffinityBookies(LedgerManager ledgerManager, List long ledgerId = lInfo.getLedgerId(); CompletableFuture> ledgerMetaFuture = ledgerManager.readLedgerMetadata(ledgerId); LedgerMetadata ledgerMetadata = ledgerMetaFuture.get().getValue(); - Set ledgerBookies = Sets.newHashSet(); + Set ledgerBookies = new HashSet<>(); ledgerBookies.addAll(ledgerMetadata.getAllEnsembles().values().iterator().next()); assertEquals(ledgerBookies.size(), defaultBookies.size()); ledgerBookies.removeAll(defaultBookies); @@ -854,7 +855,7 @@ private void setDefaultIsolationGroup(String brokerBookkeeperClientIsolationGrou bookies = new BookiesRackConfiguration(); } - Map bookieInfoMap = Maps.newHashMap(); + Map bookieInfoMap = new HashMap<>(); for (BookieId bkSocket : bookieAddresses) { BookieInfo info = BookieInfo.builder().rack("use").hostname(bkSocket.toString()).build(); bookieInfoMap.put(bkSocket.toString(), info); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java index 389e9420c025c..f011ef9b84209 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java @@ -29,7 +29,6 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.google.gson.Gson; import com.google.gson.JsonArray; @@ -579,8 +578,8 @@ public void testBrokerServiceNamespaceStats() throws Exception { final String ns1 = "prop/stats1"; final String ns2 = "prop/stats2"; - List nsList = Lists.newArrayList(ns1, ns2); - List> producerList = Lists.newArrayList(); + List nsList = List.of(ns1, ns2); + List> producerList = new ArrayList<>(); BrokerStats brokerStatsClient = admin.brokerStats(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceThrottlingTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceThrottlingTest.java index 64cde26847fbe..5b1589a7c9ba6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceThrottlingTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceThrottlingTest.java @@ -22,8 +22,6 @@ import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - -import com.google.common.collect.Lists; import io.netty.buffer.ByteBuf; import io.netty.channel.EventLoopGroup; import io.netty.util.concurrent.DefaultThreadFactory; @@ -259,7 +257,7 @@ public void testLookupThrottlingForClientByBrokerInternalRetry() throws Exceptio .statsInterval(0, TimeUnit.SECONDS) .ioThreads(20).connectionsPerBroker(20).build(); upsertLookupPermits(100); - List> consumers = Collections.synchronizedList(Lists.newArrayList()); + List> consumers = Collections.synchronizedList(new ArrayList<>()); @Cleanup("shutdownNow") ExecutorService executor = Executors.newFixedThreadPool(10); final int totalConsumers = 8; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/DistributedIdGeneratorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/DistributedIdGeneratorTest.java index 75334d60beac3..6aa9a53b33674 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/DistributedIdGeneratorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/DistributedIdGeneratorTest.java @@ -19,8 +19,8 @@ package org.apache.pulsar.broker.service; import static org.testng.Assert.assertEquals; -import com.google.common.collect.Lists; import com.google.common.collect.Sets; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Optional; @@ -86,7 +86,7 @@ public void concurrent() throws Exception { @Cleanup("shutdownNow") ExecutorService executor = Executors.newCachedThreadPool(); - List results = Collections.synchronizedList(Lists.newArrayList()); + List results = Collections.synchronizedList(new ArrayList<>()); for (int i = 0; i < Threads; i++) { executor.execute(() -> { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/HashRangeExclusiveStickyKeyConsumerSelectorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/HashRangeExclusiveStickyKeyConsumerSelectorTest.java index dde0a586f03f2..ff9cb56b7811c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/HashRangeExclusiveStickyKeyConsumerSelectorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/HashRangeExclusiveStickyKeyConsumerSelectorTest.java @@ -20,16 +20,12 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; - -import com.google.common.collect.Lists; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.pulsar.client.api.Range; import org.apache.pulsar.common.api.proto.IntRange; import org.apache.pulsar.common.api.proto.KeySharedMeta; @@ -228,12 +224,12 @@ public void testMultipleRangeConflict() throws BrokerServiceException.ConsumerAs Assert.assertEquals(selector.getRangeConsumer().size(),2); final List> testRanges = new ArrayList<>(); - testRanges.add(Lists.newArrayList( + testRanges.add(List.of( new IntRange().setStart(2).setEnd(2), new IntRange().setStart(3).setEnd(3), new IntRange().setStart(4).setEnd(5)) ); - testRanges.add(Lists.newArrayList( + testRanges.add(List.of( new IntRange().setStart(0).setEnd(0), new IntRange().setStart(1).setEnd(2)) ); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/MessageTTLTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/MessageTTLTest.java index 76f09377edcb5..38bb301ae38ca 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/MessageTTLTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/MessageTTLTest.java @@ -23,11 +23,10 @@ import static org.mockito.Mockito.verify; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; -import com.google.common.collect.Lists; +import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; - import lombok.Cleanup; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.service.persistent.PersistentTopic; @@ -81,7 +80,7 @@ public void testMessageExpiryAfterTopicUnload() throws Exception { .enableBatching(false) // this makes the test easier and predictable .create(); - List> sendFutureList = Lists.newArrayList(); + List> sendFutureList = new ArrayList<>(); for (int i = 0; i < numMsgs; i++) { byte[] message = ("my-message-" + i).getBytes(); sendFutureList.add(producer.sendAsync(message)); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PeerReplicatorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PeerReplicatorTest.java index 8381ad599f10f..5f145e258cf75 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PeerReplicatorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PeerReplicatorTest.java @@ -24,10 +24,10 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.fail; - +import com.google.common.collect.Sets; import java.util.LinkedHashSet; +import java.util.List; import java.util.concurrent.TimeUnit; - import lombok.Cleanup; import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.persistent.PersistentTopic; @@ -43,9 +43,6 @@ import org.testng.annotations.BeforeClass; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import org.testng.collections.Lists; - -import com.google.common.collect.Sets; @Test(groups = "broker") public class PeerReplicatorTest extends ReplicatorTestBase { @@ -130,7 +127,7 @@ public void testPeerClusterTopicLookup(String protocol) throws Exception { } // set peer-clusters : r3->r1 - admin1.clusters().updatePeerClusterNames("r3", Sets.newLinkedHashSet(Lists.newArrayList("r1"))); + admin1.clusters().updatePeerClusterNames("r3", Sets.newLinkedHashSet(List.of("r1"))); Producer producer = client3.newProducer().topic(topic1).create(); PersistentTopic topic = (PersistentTopic) pulsar1.getBrokerService().getOrCreateTopic(topic1).get(); assertNotNull(topic); @@ -145,7 +142,7 @@ public void testPeerClusterTopicLookup(String protocol) throws Exception { producer.close(); // set peer-clusters : r3->r2 - admin2.clusters().updatePeerClusterNames("r3", Sets.newLinkedHashSet(Lists.newArrayList("r2"))); + admin2.clusters().updatePeerClusterNames("r3", Sets.newLinkedHashSet(List.of("r2"))); producer = client3.newProducer().topic(topic2).create(); topic = (PersistentTopic) pulsar2.getBrokerService().getOrCreateTopic(topic2).get(); assertNotNull(topic); @@ -172,7 +169,7 @@ public void testGetPeerClusters() throws Exception { final String mainClusterName = "r1"; assertNull(admin1.clusters().getPeerClusterNames(mainClusterName)); - LinkedHashSet peerClusters = Sets.newLinkedHashSet(Lists.newArrayList("r2", "r3")); + LinkedHashSet peerClusters = Sets.newLinkedHashSet(List.of("r2", "r3")); admin1.clusters().updatePeerClusterNames(mainClusterName, peerClusters); retryStrategically((test) -> { try { @@ -214,8 +211,8 @@ public void testPeerClusterInReplicationClusterListChange() throws Exception { @Cleanup PulsarClient client3 = PulsarClient.builder().serviceUrl(serviceUrl).statsInterval(0, TimeUnit.SECONDS).build(); // set peer-clusters : r3->r1 - admin1.clusters().updatePeerClusterNames("r3", Sets.newLinkedHashSet(Lists.newArrayList("r1"))); - admin1.clusters().updatePeerClusterNames("r1", Sets.newLinkedHashSet(Lists.newArrayList("r3"))); + admin1.clusters().updatePeerClusterNames("r3", Sets.newLinkedHashSet(List.of("r1"))); + admin1.clusters().updatePeerClusterNames("r1", Sets.newLinkedHashSet(List.of("r3"))); Producer producer = client3.newProducer().topic(topic1).create(); PersistentTopic topic = (PersistentTopic) pulsar1.getBrokerService().getOrCreateTopic(topic1).get(); assertNotNull(topic); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentFailoverE2ETest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentFailoverE2ETest.java index 9fe83f7b11d25..fa382ef3c5e58 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentFailoverE2ETest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentFailoverE2ETest.java @@ -23,17 +23,14 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - -import com.google.common.collect.Lists; import com.google.common.collect.Sets; - +import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; - import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherSingleActiveConsumer; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; @@ -123,8 +120,8 @@ private void verifyConsumerInactive(TestConsumerStateEventListener listener, int private static class ActiveInactiveListenerEvent implements ConsumerEventListener { - private final Set activePtns = Sets.newHashSet(); - private final Set inactivePtns = Sets.newHashSet(); + private final Set activePtns = new HashSet<>(); + private final Set inactivePtns = new HashSet<>(); @Override public synchronized void becameActive(Consumer consumer, int partitionId) { @@ -171,7 +168,7 @@ public void testSimpleConsumerEventsWithoutPartition() throws Exception { assertTrue(subRef.getDispatcher().isConsumerConnected()); assertEquals(subRef.getDispatcher().getType(), SubType.Failover); - List> futures = Lists.newArrayListWithCapacity(numMsgs); + List> futures = new ArrayList<>(numMsgs); Producer producer = pulsarClient.newProducer().topic(topicName) .enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition) @@ -332,7 +329,7 @@ public void testSimpleConsumerEventsWithPartition() throws Exception { // equal distribution between both consumers int totalMessages = 0; Message msg = null; - Set receivedPtns = Sets.newHashSet(); + Set receivedPtns = new HashSet<>(); while (true) { msg = consumer1.receive(1, TimeUnit.SECONDS); if (msg == null) { @@ -349,7 +346,7 @@ public void testSimpleConsumerEventsWithPartition() throws Exception { Assert.assertEquals(totalMessages, numMsgs / 2); - receivedPtns = Sets.newHashSet(); + receivedPtns = new HashSet<>(); while (true) { msg = consumer2.receive(1, TimeUnit.SECONDS); if (msg == null) { @@ -469,7 +466,7 @@ public void testActiveConsumerFailoverWithDelay() throws Exception { final String topicName = "persistent://prop/use/ns-abc/failover-topic3"; final String subName = "sub1"; final int numMsgs = 100; - List> receivedMessages = Lists.newArrayList(); + List> receivedMessages = new ArrayList<>(); ConsumerBuilder consumerBuilder = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) .subscriptionType(SubscriptionType.Failover).messageListener((consumer, msg) -> { @@ -496,7 +493,7 @@ public void testActiveConsumerFailoverWithDelay() throws Exception { PersistentSubscription subRef = topicRef.getSubscription(subName); // enqueue messages - List> futures = Lists.newArrayListWithCapacity(numMsgs); + List> futures = new ArrayList<>(numMsgs); Producer producer = pulsarClient.newProducer().topic(topicName) .enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentQueueE2ETest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentQueueE2ETest.java index 9564449ffcc89..6d111a9806ef5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentQueueE2ETest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentQueueE2ETest.java @@ -23,7 +23,7 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - +import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -31,7 +31,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.commons.collections4.CollectionUtils; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; @@ -48,8 +47,8 @@ import org.apache.pulsar.client.impl.ConsumerImpl; import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; import org.apache.pulsar.common.policies.data.ConsumerStats; -import org.apache.pulsar.common.policies.data.TopicStats; import org.apache.pulsar.common.policies.data.SubscriptionStats; +import org.apache.pulsar.common.policies.data.TopicStats; import org.apache.pulsar.common.util.FutureUtil; import org.awaitility.Awaitility; import org.eclipse.jetty.util.BlockingArrayQueue; @@ -59,8 +58,6 @@ import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import com.google.common.collect.Lists; - @Test(groups = "broker") public class PersistentQueueE2ETest extends BrokerTestBase { @@ -110,7 +107,7 @@ public void testSimpleConsumerEvents() throws Exception { assertTrue(subRef.getDispatcher().isConsumerConnected()); assertEquals(subRef.getDispatcher().getType(), SubType.Shared); - List> futures = Lists.newArrayListWithCapacity(numMsgs * 2); + List> futures = new ArrayList<>(numMsgs * 2); Producer producer = pulsarClient.newProducer().topic(topicName) .enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition) @@ -193,7 +190,7 @@ public void testReplayOnConsumerDisconnect() throws Exception { final String subName = "sub3"; final int numMsgs = 100; - final List messagesProduced = Lists.newArrayListWithCapacity(numMsgs); + final List messagesProduced = new ArrayList<>(numMsgs); final List messagesConsumed = new BlockingArrayQueue<>(numMsgs); Consumer consumer1 = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) @@ -213,7 +210,7 @@ public void testReplayOnConsumerDisconnect() throws Exception { // do nothing }).subscribe(); - List> futures = Lists.newArrayListWithCapacity(numMsgs * 2); + List> futures = new ArrayList<>(numMsgs * 2); Producer producer = pulsarClient.newProducer().topic(topicName).create(); for (int i = 0; i < numMsgs; i++) { String message = "msg-" + i; @@ -286,7 +283,7 @@ public void testRoundRobinBatchDistribution() throws Exception { } }).subscribe(); - List> futures = Lists.newArrayListWithCapacity(numMsgs); + List> futures = new ArrayList<>(numMsgs); Producer producer = pulsarClient.newProducer().topic(topicName).create(); for (int i = 0; i < numMsgs * 3; i++) { String message = "msg-" + i; @@ -302,8 +299,8 @@ public void testRoundRobinBatchDistribution() throws Exception { * i.e. each consumer will get 130 messages. In the 14th round, the balance is 411 - 130*3 = 21. Two consumers * will get another batch of 10 messages (Total: 140) and the 3rd one will get the last one (Total: 131) */ - assertTrue(CollectionUtils.subtract(Lists.newArrayList(140, 140, 131), - Lists.newArrayList(counter1.get(), counter2.get(), counter3.get())).isEmpty()); + assertTrue(CollectionUtils.subtract(List.of(140, 140, 131), + List.of(counter1.get(), counter2.get(), counter3.get())).isEmpty()); consumer1.close(); consumer2.close(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicConcurrentTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicConcurrentTest.java index cc987a2ddc0b7..29a8834b4baa6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicConcurrentTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicConcurrentTest.java @@ -24,10 +24,10 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.testng.Assert.assertFalse; -import com.google.common.collect.Lists; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; import java.lang.reflect.Method; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Optional; @@ -108,7 +108,7 @@ public void setup(Method m) throws Exception { doReturn(true).when(nsSvc).isServiceUnitActive(any(TopicName.class)); doReturn(CompletableFuture.completedFuture(true)).when(nsSvc).checkTopicOwnership(any(TopicName.class)); - final List addedEntries = Lists.newArrayList(); + final List addedEntries = new ArrayList<>(); for (int i = 0; i < 100; i++) { Position pos = ledger.addEntry("entry".getBytes()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java index 2d8ff60fc27d9..970bfd763a4e5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java @@ -44,8 +44,6 @@ import static org.testng.Assert.assertSame; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.Channel; @@ -777,7 +775,7 @@ public void testChangeSubscriptionType() throws Exception { consumer.close(); SubType previousSubType = SubType.Exclusive; - for (SubType subType : Lists.newArrayList(SubType.Shared, SubType.Failover, SubType.Key_Shared, + for (SubType subType : List.of(SubType.Shared, SubType.Failover, SubType.Key_Shared, SubType.Exclusive)) { Dispatcher previousDispatcher = sub.getDispatcher(); @@ -1875,7 +1873,7 @@ public void testCompactorSubscription() throws Exception { PositionImpl position = new PositionImpl(1, 1); long ledgerId = 0xc0bfefeL; sub.acknowledgeMessage(Collections.singletonList(position), AckType.Cumulative, - ImmutableMap.of(Compactor.COMPACTED_TOPIC_LEDGER_PROPERTY, ledgerId)); + Map.of(Compactor.COMPACTED_TOPIC_LEDGER_PROPERTY, ledgerId)); verify(compactedTopic, Mockito.times(1)).newCompactedLedger(position, ledgerId); } @@ -1883,7 +1881,7 @@ public void testCompactorSubscription() throws Exception { @Test public void testCompactorSubscriptionUpdatedOnInit() throws Exception { long ledgerId = 0xc0bfefeL; - Map properties = ImmutableMap.of(Compactor.COMPACTED_TOPIC_LEDGER_PROPERTY, ledgerId); + Map properties = Map.of(Compactor.COMPACTED_TOPIC_LEDGER_PROPERTY, ledgerId); PositionImpl position = new PositionImpl(1, 1); doAnswer((invokactionOnMock) -> properties).when(cursorMock).getProperties(); @@ -1935,7 +1933,7 @@ public void testCompactionTriggeredAfterThresholdSecondInvocation() throws Excep doReturn(compactPromise).when(compactor).compact(anyString()); ManagedCursor subCursor = mock(ManagedCursor.class); - doReturn(Lists.newArrayList(subCursor)).when(ledgerMock).getCursors(); + doReturn(List.of(subCursor)).when(ledgerMock).getCursors(); doReturn(Compactor.COMPACTION_SUBSCRIPTION).when(subCursor).getName(); Policies policies = new Policies(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/RackAwareTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/RackAwareTest.java index 25ae0e2f1fd2f..6de41a548cca1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/RackAwareTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/RackAwareTest.java @@ -40,10 +40,9 @@ import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.NetworkTopologyImpl; import org.apache.bookkeeper.test.ServerTester; +import org.apache.pulsar.bookie.rackawareness.BookieRackAffinityMapping; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.common.policies.data.BookieInfo; -import org.apache.pulsar.bookie.rackawareness.BookieRackAffinityMapping; -import org.assertj.core.util.Lists; import org.awaitility.Awaitility; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -132,7 +131,7 @@ public void testPlacement() throws Exception { .map(Map::values) .flatMap(bookieId -> bookieId.stream().map(rackInfo -> rackInfo.get("rack"))) .collect(Collectors.toSet()); - assertTrue(racks.containsAll(Lists.newArrayList("rack-1", "rack-2"))); + assertTrue(racks.containsAll(List.of("rack-1", "rack-2"))); }); BookKeeper bkc = this.pulsar.getBookKeeperClient(); @@ -284,7 +283,7 @@ public void testRackUpdate() throws Exception { .flatMap(bookieId -> bookieId.stream().map(rackInfo -> rackInfo.get("rack"))) .collect(Collectors.toSet()); assertEquals(racks.size(), 2); - assertTrue(racks.containsAll(Lists.newArrayList("rack-0", "rack-1"))); + assertTrue(racks.containsAll(List.of("rack-0", "rack-1"))); }); Awaitility.await().untilAsserted(() -> { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java index 9b87b06012e24..09c7e042f58b7 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java @@ -28,6 +28,7 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; +import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.scurrilous.circe.checksum.Crc32cIntChecksum; import io.netty.buffer.ByteBuf; @@ -51,7 +52,6 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import lombok.Cleanup; - import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteCursorCallback; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.ManagedCursor; @@ -107,7 +107,6 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import org.testng.collections.Lists; /** * Starts 3 brokers that are in 3 different clusters @@ -148,7 +147,7 @@ public void testConfigChange() throws Exception { // This test is to verify that the config change on global namespace is successfully applied in broker during // runtime. // Run a set of producer tasks to create the topics - List> results = Lists.newArrayList(); + List> results = new ArrayList<>(); for (int i = 0; i < 10; i++) { final TopicName dest = TopicName.get(BrokerTestUtil.newUniqueName("persistent://pulsar/ns/topic-" + i)); @@ -526,7 +525,7 @@ public void testReplicationOverrides() throws Exception { assertTrue(consumer3.drained()); // Produce a message not replicated to r2 - producer1.produce(1, producer1.newMessage().replicationClusters(Lists.newArrayList("r1", "r3"))); + producer1.produce(1, producer1.newMessage().replicationClusters(List.of("r1", "r3"))); consumer1.receive(1); assertTrue(consumer2.drained()); consumer3.receive(1); @@ -823,7 +822,7 @@ public void testReplicatorProducerNameWithUserDefinedReplicatorPrefix() throws E @Test(timeOut = 60000, priority = -1) public void testResumptionAfterBacklogRelaxed() throws Exception { - List policies = Lists.newArrayList(); + List policies = new ArrayList<>(); policies.add(RetentionPolicy.producer_exception); policies.add(RetentionPolicy.producer_request_hold); @@ -1098,7 +1097,7 @@ public void testReplicatedCluster() throws Exception { byte[] value = "test".getBytes(); // publish message local only - TypedMessageBuilder msg = producer1.newMessage().replicationClusters(Lists.newArrayList("r1")).value(value); + TypedMessageBuilder msg = producer1.newMessage().replicationClusters(List.of("r1")).value(value); msg.send(); assertEquals(consumer1.receive().getValue(), value); @@ -1498,7 +1497,7 @@ public void testWhenUpdateReplicationCluster() throws Exception { assertTrue(topic.getReplicators().containsKey("r2")); }); - admin1.topics().setReplicationClusters(dest.toString(), Lists.newArrayList("r1")); + admin1.topics().setReplicationClusters(dest.toString(), List.of("r1")); Awaitility.await().untilAsserted(() -> { Set replicationClusters = admin1.topics().getReplicationClusters(dest.toString(), false); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTlsTest.java index 7c28f183c9907..d9bde8d4d5ac0 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTlsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTlsTest.java @@ -20,14 +20,13 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; - +import java.util.List; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import org.testng.collections.Lists; @Test(groups = "broker") public class ReplicatorTlsTest extends ReplicatorTestBase { @@ -50,7 +49,7 @@ public void cleanup() throws Exception { @Test public void testReplicationClient() throws Exception { log.info("--- Starting ReplicatorTlsTest::testReplicationClient ---"); - for (BrokerService ns : Lists.newArrayList(ns1, ns2, ns3)) { + for (BrokerService ns : List.of(ns1, ns2, ns3)) { ns.getReplicationClients().forEach((cluster, client) -> { assertTrue(((PulsarClientImpl) client).getConfiguration().isUseTls()); assertEquals(((PulsarClientImpl) client).getConfiguration().getTlsTrustCertsFilePath(), diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java index 78e994568e881..afc4d5a7ef766 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java @@ -37,7 +37,6 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; -import com.google.common.collect.Maps; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandler; @@ -52,6 +51,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -123,8 +123,8 @@ import org.apache.pulsar.common.protocol.PulsarHandler; import org.apache.pulsar.common.topics.TopicList; import org.apache.pulsar.common.util.FutureUtil; -import org.apache.pulsar.common.util.collections.ConcurrentLongHashMap; import org.apache.pulsar.common.util.GracefulExecutorServicesShutdown; +import org.apache.pulsar.common.util.collections.ConcurrentLongHashMap; import org.apache.pulsar.common.util.netty.EventLoopUtil; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; import org.apache.pulsar.metadata.impl.ZKMetadataStore; @@ -1422,17 +1422,17 @@ public void testProducerSuccessOnEncryptionRequiredTopic() throws Exception { // Set encryption_required to true Policies policies = mock(Policies.class); policies.encryption_required = true; - policies.topicDispatchRate = Maps.newHashMap(); - policies.clusterSubscribeRate = Maps.newHashMap(); + policies.topicDispatchRate = new HashMap<>(); + policies.clusterSubscribeRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.persistent.DispatchRateLimiter.getPoliciesDispatchRate` - policies.clusterDispatchRate = Maps.newHashMap(); + policies.clusterDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceSubscriptionDispatchRate` - policies.subscriptionDispatchRate = Maps.newHashMap(); + policies.subscriptionDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceReplicatorDispatchRate` - policies.replicatorDispatchRate = Maps.newHashMap(); + policies.replicatorDispatchRate = new HashMap<>(); doReturn(CompletableFuture.completedFuture(Optional.of(policies))).when(namespaceResources) .getPoliciesAsync(TopicName.get(encryptionRequiredTopicName).getNamespaceObject()); @@ -1458,17 +1458,17 @@ public void testProducerFailureOnEncryptionRequiredTopic() throws Exception { // Set encryption_required to true Policies policies = mock(Policies.class); policies.encryption_required = true; - policies.topicDispatchRate = Maps.newHashMap(); - policies.clusterSubscribeRate = Maps.newHashMap(); + policies.topicDispatchRate = new HashMap<>(); + policies.clusterSubscribeRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.persistent.DispatchRateLimiter.getPoliciesDispatchRate` - policies.clusterDispatchRate = Maps.newHashMap(); + policies.clusterDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceSubscriptionDispatchRate` - policies.subscriptionDispatchRate = Maps.newHashMap(); + policies.subscriptionDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceReplicatorDispatchRate` - policies.replicatorDispatchRate = Maps.newHashMap(); + policies.replicatorDispatchRate = new HashMap<>(); doReturn(CompletableFuture.completedFuture(Optional.of(policies))).when(namespaceResources) .getPoliciesAsync(TopicName.get(encryptionRequiredTopicName).getNamespaceObject()); @@ -1499,16 +1499,16 @@ public void testProducerFailureOnEncryptionRequiredOnBroker() throws Exception { Policies policies = mock(Policies.class); // Namespace policy doesn't require encryption policies.encryption_required = false; - policies.topicDispatchRate = Maps.newHashMap(); - policies.clusterSubscribeRate = Maps.newHashMap(); + policies.topicDispatchRate = new HashMap<>(); + policies.clusterSubscribeRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE - policies.clusterDispatchRate = Maps.newHashMap(); + policies.clusterDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceSubscriptionDispatchRate` - policies.subscriptionDispatchRate = Maps.newHashMap(); + policies.subscriptionDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceReplicatorDispatchRate` - policies.replicatorDispatchRate = Maps.newHashMap(); + policies.replicatorDispatchRate = new HashMap<>(); doReturn(CompletableFuture.completedFuture(Optional.of(policies))).when(namespaceResources) .getPoliciesAsync(TopicName.get(encryptionRequiredTopicName).getNamespaceObject()); @@ -1536,17 +1536,17 @@ public void testSendSuccessOnEncryptionRequiredTopic() throws Exception { // Set encryption_required to true Policies policies = mock(Policies.class); policies.encryption_required = true; - policies.topicDispatchRate = Maps.newHashMap(); - policies.clusterSubscribeRate = Maps.newHashMap(); + policies.topicDispatchRate = new HashMap<>(); + policies.clusterSubscribeRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.persistent.DispatchRateLimiter.getPoliciesDispatchRate` - policies.clusterDispatchRate = Maps.newHashMap(); + policies.clusterDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceSubscriptionDispatchRate` - policies.subscriptionDispatchRate = Maps.newHashMap(); + policies.subscriptionDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceReplicatorDispatchRate` - policies.replicatorDispatchRate = Maps.newHashMap(); + policies.replicatorDispatchRate = new HashMap<>(); doReturn(CompletableFuture.completedFuture(Optional.of(policies))).when(namespaceResources) .getPoliciesAsync(TopicName.get(encryptionRequiredTopicName).getNamespaceObject()); @@ -1580,17 +1580,17 @@ public void testSendFailureOnEncryptionRequiredTopic() throws Exception { // Set encryption_required to true Policies policies = mock(Policies.class); policies.encryption_required = true; - policies.topicDispatchRate = Maps.newHashMap(); - policies.clusterSubscribeRate = Maps.newHashMap(); + policies.topicDispatchRate = new HashMap<>(); + policies.clusterSubscribeRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.persistent.DispatchRateLimiter.getPoliciesDispatchRate` - policies.clusterDispatchRate = Maps.newHashMap(); + policies.clusterDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceSubscriptionDispatchRate` - policies.subscriptionDispatchRate = Maps.newHashMap(); + policies.subscriptionDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceReplicatorDispatchRate` - policies.replicatorDispatchRate = Maps.newHashMap(); + policies.replicatorDispatchRate = new HashMap<>(); doReturn(CompletableFuture.completedFuture(Optional.of(policies))).when(namespaceResources) .getPoliciesAsync(TopicName.get(encryptionRequiredTopicName).getNamespaceObject()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SubscriptionSeekTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SubscriptionSeekTest.java index 66e6c7d1ff948..0e73db5291e00 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SubscriptionSeekTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SubscriptionSeekTest.java @@ -24,7 +24,6 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.google.common.collect.Lists; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -367,7 +366,7 @@ public void testConcurrentResetCursor() throws Exception { messageIds.add(msgId); } - List exceptions = Lists.newLinkedList(); + List exceptions = new ArrayList<>(); class ResetCursorThread extends Thread { public void run() { try { @@ -378,7 +377,7 @@ public void run() { } } - List resetCursorThreads = Lists.newLinkedList(); + List resetCursorThreads = new ArrayList<>(); for (int i = 0; i < 4; i ++) { ResetCursorThread thread = new ResetCursorThread(); resetCursorThreads.add(thread); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java index 6334eb3a74f34..d33d5341e0b40 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java @@ -26,11 +26,12 @@ import static org.testng.AssertJUnit.assertNotNull; import static org.testng.AssertJUnit.assertNull; import static org.testng.AssertJUnit.assertTrue; -import com.google.common.collect.Sets; import java.lang.reflect.Field; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -281,7 +282,7 @@ public void testListenerCleanupByPartition() throws Exception { private void prepareData() throws PulsarAdminException { admin.clusters().createCluster("test", ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); admin.tenants().createTenant("system-topic", - new TenantInfoImpl(Sets.newHashSet(), Sets.newHashSet("test"))); + new TenantInfoImpl(new HashSet<>(), Set.of("test"))); admin.namespaces().createNamespace(NAMESPACE1); admin.namespaces().createNamespace(NAMESPACE2); admin.namespaces().createNamespace(NAMESPACE3); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java index 3cd6fc23de744..10b1e4961ccfb 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java @@ -24,8 +24,6 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; - -import com.google.common.collect.Sets; import java.lang.reflect.Field; import java.util.Set; import java.util.TreeSet; @@ -165,18 +163,18 @@ public void testContainsStickyKeyHashes(boolean allowOutOfOrderDelivery) throws controller.add(2, 1, 104); if (allowOutOfOrderDelivery) { - assertFalse(controller.containsStickyKeyHashes(Sets.newHashSet(100))); - assertFalse(controller.containsStickyKeyHashes(Sets.newHashSet(101, 102, 103))); - assertFalse(controller.containsStickyKeyHashes(Sets.newHashSet(104, 105))); + assertFalse(controller.containsStickyKeyHashes(Set.of(100))); + assertFalse(controller.containsStickyKeyHashes(Set.of(101, 102, 103))); + assertFalse(controller.containsStickyKeyHashes(Set.of(104, 105))); } else { - assertTrue(controller.containsStickyKeyHashes(Sets.newHashSet(100))); - assertTrue(controller.containsStickyKeyHashes(Sets.newHashSet(101, 102, 103))); - assertTrue(controller.containsStickyKeyHashes(Sets.newHashSet(104, 105))); + assertTrue(controller.containsStickyKeyHashes(Set.of(100))); + assertTrue(controller.containsStickyKeyHashes(Set.of(101, 102, 103))); + assertTrue(controller.containsStickyKeyHashes(Set.of(104, 105))); } - assertFalse(controller.containsStickyKeyHashes(Sets.newHashSet())); - assertFalse(controller.containsStickyKeyHashes(Sets.newHashSet(99))); - assertFalse(controller.containsStickyKeyHashes(Sets.newHashSet(105, 106))); + assertFalse(controller.containsStickyKeyHashes(Set.of())); + assertFalse(controller.containsStickyKeyHashes(Set.of(99))); + assertFalse(controller.containsStickyKeyHashes(Set.of(105, 106))); } @Test(dataProvider = "allowOutOfOrderDelivery", timeOut = 10000) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java index c10ab392da052..bd8017390d807 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java @@ -28,10 +28,9 @@ import static org.mockito.Mockito.when; import static org.testng.AssertJUnit.assertEquals; import static org.testng.AssertJUnit.assertNotNull; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import java.lang.reflect.Field; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.UUID; @@ -93,7 +92,7 @@ public void testOverride() throws Exception { EntryFilterWithClassLoader mockFilter = mock(EntryFilterWithClassLoader.class); when(mockFilter.filterEntry(any(Entry.class), any(FilterContext.class))).thenReturn( EntryFilter.FilterResult.REJECT); - ImmutableMap entryFilters = ImmutableMap.of("key", mockFilter); + Map entryFilters = Map.of("key", mockFilter); Field field = topicRef.getClass().getSuperclass().getDeclaredField("entryFilters"); field.setAccessible(true); @@ -102,7 +101,7 @@ public void testOverride() throws Exception { EntryFilterWithClassLoader mockFilter1 = mock(EntryFilterWithClassLoader.class); when(mockFilter1.filterEntry(any(Entry.class), any(FilterContext.class))).thenReturn( EntryFilter.FilterResult.ACCEPT); - ImmutableMap entryFilters1 = ImmutableMap.of("key2", mockFilter1); + Map entryFilters1 = Map.of("key2", mockFilter1); Field field2 = pulsar.getBrokerService().getClass().getDeclaredField("entryFilters"); field2.setAccessible(true); field2.set(pulsar.getBrokerService(), entryFilters1); @@ -168,7 +167,7 @@ public void testFilter() throws Exception { EntryFilterWithClassLoader loader1 = spyWithClassAndConstructorArgsRecordingInvocations(EntryFilterWithClassLoader.class, filter1, narClassLoader); EntryFilter filter2 = new EntryFilter2Test(); EntryFilterWithClassLoader loader2 = spyWithClassAndConstructorArgsRecordingInvocations(EntryFilterWithClassLoader.class, filter2, narClassLoader); - field.set(dispatcher, ImmutableList.of(loader1, loader2)); + field.set(dispatcher, List.of(loader1, loader2)); Producer producer = pulsarClient.newProducer(Schema.STRING) .enableBatching(false).topic(topic).create(); @@ -263,7 +262,7 @@ public void testFilter() throws Exception { .getTopicReference(topic).get(); Field field1 = topicRef.getClass().getSuperclass().getDeclaredField("entryFilters"); field1.setAccessible(true); - field1.set(topicRef, ImmutableMap.of("1", loader1, "2", loader2)); + field1.set(topicRef, Map.of("1", loader1, "2", loader2)); cleanup(); verify(loader1, times(1)).close(); @@ -293,7 +292,7 @@ public void testFilteredMsgCount() throws Throwable { EntryFilterWithClassLoader loader1 = spyWithClassAndConstructorArgs(EntryFilterWithClassLoader.class, filter1, narClassLoader); EntryFilter filter2 = new EntryFilter2Test(); EntryFilterWithClassLoader loader2 = spyWithClassAndConstructorArgs(EntryFilterWithClassLoader.class, filter2, narClassLoader); - field.set(dispatcher, ImmutableList.of(loader1, loader2)); + field.set(dispatcher, List.of(loader1, loader2)); for (int i = 0; i < 10; i++) { producer.send("test"); @@ -368,7 +367,7 @@ public void testEntryFilterRescheduleMessageDependingOnConsumerSharedSubscriptio EntryFilter filter2 = new EntryFilterTest(); EntryFilterWithClassLoader loader2 = spyWithClassAndConstructorArgs(EntryFilterWithClassLoader.class, filter2, narClassLoader); - field.set(dispatcher, ImmutableList.of(loader1, loader2)); + field.set(dispatcher, List.of(loader1, loader2)); for (int i = 0; i < numMessages; i++) { if (i % 2 == 0) { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/KeyValueSchemaCompatibilityCheckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/KeyValueSchemaCompatibilityCheckTest.java index 78cb9af48cbf2..a2e3bed7bbf52 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/KeyValueSchemaCompatibilityCheckTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/KeyValueSchemaCompatibilityCheckTest.java @@ -18,13 +18,14 @@ */ package org.apache.pulsar.broker.service.schema; -import com.google.common.collect.Maps; +import java.util.HashMap; +import java.util.Map; import lombok.Data; import org.apache.pulsar.client.api.schema.SchemaDefinition; import org.apache.pulsar.client.impl.schema.AvroSchema; import org.apache.pulsar.client.impl.schema.JSONSchema; -import org.apache.pulsar.client.impl.schema.StringSchema; import org.apache.pulsar.client.impl.schema.KeyValueSchemaImpl; +import org.apache.pulsar.client.impl.schema.StringSchema; import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; import org.apache.pulsar.common.protocol.schema.SchemaData; import org.apache.pulsar.common.schema.SchemaType; @@ -32,12 +33,10 @@ import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import java.util.Map; - @Test(groups = "broker") public class KeyValueSchemaCompatibilityCheckTest { - private final Map checkers = Maps.newHashMap(); + private final Map checkers = new HashMap<>(); @Data private static class Foo { @@ -63,7 +62,7 @@ protected void setup() { public void testCheckKeyValueAvroCompatibilityFull() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -77,7 +76,7 @@ public void testCheckKeyValueAvroCompatibilityFull() { public void testCheckKeyValueAvroInCompatibilityFull() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -91,7 +90,7 @@ public void testCheckKeyValueAvroInCompatibilityFull() { public void testCheckKeyValueAvroCompatibilityBackward() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -105,7 +104,7 @@ public void testCheckKeyValueAvroCompatibilityBackward() { public void testCheckKeyValueAvroInCompatibilityBackward() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -119,7 +118,7 @@ public void testCheckKeyValueAvroInCompatibilityBackward() { public void testCheckKeyValueAvroCompatibilityForward() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -133,7 +132,7 @@ public void testCheckKeyValueAvroCompatibilityForward() { public void testCheckKeyValueAvroInCompatibilityForward() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -147,7 +146,7 @@ public void testCheckKeyValueAvroInCompatibilityForward() { public void testCheckKeyValueJsonCompatibilityFull() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -161,7 +160,7 @@ public void testCheckKeyValueJsonCompatibilityFull() { public void testCheckKeyValueJsonInCompatibilityFull() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -175,7 +174,7 @@ public void testCheckKeyValueJsonInCompatibilityFull() { public void testCheckKeyValueJsonCompatibilityBackward() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -189,7 +188,7 @@ public void testCheckKeyValueJsonCompatibilityBackward() { public void testCheckKeyValueJsonInCompatibilityBackWard() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -203,7 +202,7 @@ public void testCheckKeyValueJsonInCompatibilityBackWard() { public void testCheckKeyValueJsonCompatibilityForward() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -217,7 +216,7 @@ public void testCheckKeyValueJsonCompatibilityForward() { public void testCheckKeyValueJsonInCompatibilityForward() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -231,7 +230,7 @@ public void testCheckKeyValueJsonInCompatibilityForward() { public void testCheckKeyAvroValueJsonCompatibilityFull() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -245,7 +244,7 @@ public void testCheckKeyAvroValueJsonCompatibilityFull() { public void testCheckKeyAvroValueJsonInCompatibilityFull() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -259,7 +258,7 @@ public void testCheckKeyAvroValueJsonInCompatibilityFull() { public void testCheckKeyAvroValueJsonCompatibilityBackward() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -273,7 +272,7 @@ public void testCheckKeyAvroValueJsonCompatibilityBackward() { public void testCheckKeyAvroValueJsonInCompatibilityBackward() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -287,7 +286,7 @@ public void testCheckKeyAvroValueJsonInCompatibilityBackward() { public void testCheckKeyAvroValueJsonCompatibilityForward() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -301,7 +300,7 @@ public void testCheckKeyAvroValueJsonCompatibilityForward() { public void testCheckKeyAvroValueJsonInCompatibilityForward() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); JSONSchema barSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); properties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -315,7 +314,7 @@ public void testCheckKeyAvroValueJsonInCompatibilityForward() { public void testCheckKeyJsonValueAvroCompatibilityFull() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -329,7 +328,7 @@ public void testCheckKeyJsonValueAvroCompatibilityFull() { public void testCheckKeyJsonValueAvroInCompatibilityFull() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -343,7 +342,7 @@ public void testCheckKeyJsonValueAvroInCompatibilityFull() { public void testCheckKeyJsonValueAvroCompatibilityBackward() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -357,7 +356,7 @@ public void testCheckKeyJsonValueAvroCompatibilityBackward() { public void testCheckKeyJsonValueAvroInCompatibilityBackward() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -372,7 +371,7 @@ public void testCheckKeyJsonValueAvroInCompatibilityBackward() { public void testCheckKeyJsonValueAvroCompatibilityForward() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -386,7 +385,7 @@ public void testCheckKeyJsonValueAvroCompatibilityForward() { public void testCheckKeyJsonValueAvroInCompatibilityForward() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map properties = Maps.newHashMap(); + Map properties = new HashMap<>(); properties.put("key.schema.type", String.valueOf(SchemaType.JSON)); properties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -400,10 +399,10 @@ public void testCheckKeyJsonValueAvroInCompatibilityForward() { public void testCheckKeyJsonValueAvroKeyTypeInCompatibility() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map fromProperties = Maps.newHashMap(); + Map fromProperties = new HashMap<>(); fromProperties.put("key.schema.type", String.valueOf(SchemaType.JSON)); fromProperties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); - Map toProperties = Maps.newHashMap(); + Map toProperties = new HashMap<>(); toProperties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); toProperties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -417,10 +416,10 @@ public void testCheckKeyJsonValueAvroKeyTypeInCompatibility() { public void testCheckKeyJsonValueAvroValueTypeInCompatibility() { JSONSchema fooSchema = JSONSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map fromProperties = Maps.newHashMap(); + Map fromProperties = new HashMap<>(); fromProperties.put("key.schema.type", String.valueOf(SchemaType.JSON)); fromProperties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); - Map toProperties = Maps.newHashMap(); + Map toProperties = new HashMap<>(); toProperties.put("key.schema.type", String.valueOf(SchemaType.JSON)); toProperties.put("value.schema.type", String.valueOf(SchemaType.JSON)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -434,10 +433,10 @@ public void testCheckKeyJsonValueAvroValueTypeInCompatibility() { public void testCheckPropertiesNullTypeCompatibility() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map fromProperties = Maps.newHashMap(); + Map fromProperties = new HashMap<>(); fromProperties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); fromProperties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); - Map toProperties = Maps.newHashMap(); + Map toProperties = new HashMap<>(); toProperties.put("key.schema.type", String.valueOf(SchemaType.AVRO)); toProperties.put("value.schema.type", String.valueOf(SchemaType.AVRO)); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) @@ -451,8 +450,8 @@ public void testCheckPropertiesNullTypeCompatibility() { public void testCheckSchemaTypeNullCompatibility() { AvroSchema fooSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Foo.class).build()); AvroSchema barSchema = AvroSchema.of(SchemaDefinition.builder().withPojo(Bar.class).build()); - Map fromProperties = Maps.newHashMap(); - Map toProperties = Maps.newHashMap(); + Map fromProperties = new HashMap<>(); + Map toProperties = new HashMap<>(); SchemaData fromSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) .data(KeyValueSchemaImpl.of(fooSchema, barSchema).getSchemaInfo().getSchema()).props(fromProperties).build(); SchemaData toSchemaData = SchemaData.builder().type(SchemaType.KEY_VALUE) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReaderTests.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReaderTests.java index 52a684bbf27dc..0a63e7a2b3ed5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReaderTests.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReaderTests.java @@ -18,9 +18,28 @@ */ package org.apache.pulsar.broker.service.streamingdispatch; -import com.google.common.base.Charsets; +import static org.awaitility.Awaitility.await; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Stack; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.bookkeeper.common.util.OrderedExecutor; import org.apache.bookkeeper.common.util.OrderedScheduler; import org.apache.bookkeeper.mledger.AsyncCallbacks; @@ -40,34 +59,13 @@ import org.mockito.stubbing.Answer; import org.testng.annotations.Test; -import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Stack; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; -import static org.testng.Assert.assertEquals; - /** * Tests for {@link StreamingEntryReader} */ @Test(groups = "flaky") public class StreamingEntryReaderTests extends MockedBookKeeperTestCase { - private static final Charset Encoding = Charsets.UTF_8; + private static final Charset Encoding = StandardCharsets.UTF_8; private PersistentTopic mockTopic; private StreamingDispatcher mockDispatcher; private BrokerService mockBrokerService; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java index 801a55ad419be..7eb96e2bd5880 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java @@ -25,7 +25,6 @@ import static org.testng.AssertJUnit.assertEquals; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableMap; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; import java.io.ByteArrayOutputStream; @@ -409,7 +408,7 @@ public void testAvgMessagesPerEntry() throws Exception { EntryFilterWithClassLoader loader = spyWithClassAndConstructorArgs(EntryFilterWithClassLoader.class, filter, narClassLoader); - ImmutableMap entryFilters = ImmutableMap.of("filter", loader); + Map entryFilters = Map.of("filter", loader); PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService() .getTopicReference(topic).get(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/SubscriptionStatsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/SubscriptionStatsTest.java index b1b865727a055..67c60585d2f5b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/SubscriptionStatsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/SubscriptionStatsTest.java @@ -20,21 +20,29 @@ import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.mock; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Multimap; import java.io.ByteArrayOutputStream; import java.lang.reflect.Field; import java.util.Collection; +import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.service.Dispatcher; import org.apache.pulsar.broker.service.EntryFilterSupport; -import org.apache.pulsar.broker.service.plugin.*; +import org.apache.pulsar.broker.service.plugin.EntryFilter; +import org.apache.pulsar.broker.service.plugin.EntryFilterTest; +import org.apache.pulsar.broker.service.plugin.EntryFilterWithClassLoader; import org.apache.pulsar.broker.stats.prometheus.PrometheusMetricsGenerator; import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.api.*; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.nar.NarClassLoader; import org.apache.pulsar.common.policies.data.SubscriptionStats; @@ -187,7 +195,7 @@ public void testSubscriptionStats(final String topic, final String subName, bool EntryFilter filter1 = new EntryFilterTest(); EntryFilterWithClassLoader loader1 = spyWithClassAndConstructorArgs(EntryFilterWithClassLoader.class, filter1, narClassLoader); - field.set(dispatcher, ImmutableList.of(loader1)); + field.set(dispatcher, List.of(loader1)); } for (int i = 0; i < 100; i++) { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionCoordinatorClientTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionCoordinatorClientTest.java index a277e91f13a10..7421a1b69f9b1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionCoordinatorClientTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionCoordinatorClientTest.java @@ -20,8 +20,8 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; -import com.google.common.collect.Lists; import java.lang.reflect.Field; +import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import org.apache.pulsar.broker.PulsarService; @@ -83,7 +83,7 @@ public void testNewTxn() throws TransactionCoordinatorClientException { @Test public void testCommitAndAbort() throws TransactionCoordinatorClientException { TxnID txnID = transactionCoordinatorClient.newTransaction(); - transactionCoordinatorClient.addPublishPartitionToTxn(txnID, Lists.newArrayList("persistent://public/default/testCommitAndAbort")); + transactionCoordinatorClient.addPublishPartitionToTxn(txnID, List.of("persistent://public/default/testCommitAndAbort")); transactionCoordinatorClient.commit(txnID); try { transactionCoordinatorClient.abort(txnID); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/ExceptionHandlerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/ExceptionHandlerTest.java index ed708695dc142..35d6ac0ce9b2f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/ExceptionHandlerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/ExceptionHandlerTest.java @@ -18,6 +18,9 @@ */ package org.apache.pulsar.broker.web; +import static org.eclipse.jetty.http.HttpStatus.INTERNAL_SERVER_ERROR_500; +import static org.eclipse.jetty.http.HttpStatus.PRECONDITION_FAILED_412; +import javax.servlet.http.HttpServletResponse; import lombok.SneakyThrows; import org.apache.pulsar.common.intercept.InterceptException; import org.eclipse.jetty.server.HttpChannel; @@ -25,11 +28,6 @@ import org.mockito.Mockito; import org.testng.annotations.Test; -import javax.servlet.http.HttpServletResponse; - -import static org.eclipse.jetty.http.HttpStatus.INTERNAL_SERVER_ERROR_500; -import static org.eclipse.jetty.http.HttpStatus.PRECONDITION_FAILED_412; - /** * Unit test for ExceptionHandler. */ diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/PulsarWebResourceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/PulsarWebResourceTest.java index 18e3e9613708c..06f80afda95a4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/PulsarWebResourceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/PulsarWebResourceTest.java @@ -18,6 +18,8 @@ */ package org.apache.pulsar.broker.web; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; import javax.servlet.ServletContext; import javax.ws.rs.core.Context; import javax.ws.rs.core.Feature; @@ -36,9 +38,6 @@ import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; - /** * A base class for testing subclasses of {@link PulsarWebResource}. */ diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/RestExceptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/RestExceptionTest.java index c1938adfc5f86..8e6df74d5a0e7 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/RestExceptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/RestExceptionTest.java @@ -19,7 +19,6 @@ package org.apache.pulsar.broker.web; import static org.testng.Assert.assertEquals; - import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Response.Status; import org.apache.pulsar.common.policies.data.ErrorData; diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSplitManager.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSplitManager.java index 42c29fd7ab65e..2f623bfdf1c8d 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSplitManager.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSplitManager.java @@ -25,7 +25,6 @@ import static org.apache.pulsar.sql.presto.PulsarConnectorUtils.restoreNamespaceDelimiterIfNeeded; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Predicate; import io.airlift.log.Logger; import io.trino.spi.TrinoException; import io.trino.spi.block.Block; @@ -48,7 +47,6 @@ import java.util.List; import javax.inject.Inject; import lombok.Data; -import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.ManagedLedgerFactory; @@ -442,19 +440,18 @@ public static PredicatePushdownInfo getPredicatePushdownInfo(String connectorId, private static PositionImpl findPosition(ReadOnlyCursor readOnlyCursor, long timestamp) throws ManagedLedgerException, InterruptedException { - return (PositionImpl) readOnlyCursor.findNewestMatching(SearchAllAvailableEntries, new Predicate() { - @Override - public boolean apply(Entry entry) { - try { - long entryTimestamp = Commands.getEntryTimestamp(entry.getDataBuffer()); - return entryTimestamp <= timestamp; - } catch (Exception e) { - log.error(e, "Failed To deserialize message when finding position with error: %s", e); - } finally { - entry.release(); - } - return false; - } - }); + return (PositionImpl) readOnlyCursor.findNewestMatching( + SearchAllAvailableEntries, + entry -> { + try { + long entryTimestamp = Commands.getEntryTimestamp(entry.getDataBuffer()); + return entryTimestamp <= timestamp; + } catch (Exception e) { + log.error(e, "Failed To deserialize message when finding position with error: %s", e); + } finally { + entry.release(); + } + return false; + }); } } diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java index ad1c0f4b03dca..3668921138a3a 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java @@ -18,6 +18,17 @@ */ package org.apache.pulsar.sql.presto; +import static org.apache.pulsar.common.protocol.Commands.serializeMetadataAndPayload; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertNotNull; import com.fasterxml.jackson.databind.ObjectMapper; import io.airlift.log.Logger; import io.netty.buffer.ByteBuf; @@ -26,6 +37,21 @@ import io.trino.spi.predicate.TupleDomain; import io.trino.testing.TestingConnectorContext; import java.math.BigDecimal; +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.ZoneId; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import javax.ws.rs.ClientErrorException; +import javax.ws.rs.core.Response; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; @@ -62,33 +88,6 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; -import javax.ws.rs.ClientErrorException; -import javax.ws.rs.core.Response; -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.ZoneId; -import java.time.temporal.ChronoUnit; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; - -import static org.apache.pulsar.common.protocol.Commands.serializeMetadataAndPayload; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; -import static org.testng.Assert.assertNotNull; - public abstract class TestPulsarConnector { protected static final long currentTimeMicros = 1534806330000000L; @@ -664,8 +663,8 @@ public Boolean answer(InvocationOnMock invocationOnMock) throws Throwable { @Override public Position answer(InvocationOnMock invocationOnMock) throws Throwable { Object[] args = invocationOnMock.getArguments(); - com.google.common.base.Predicate predicate - = (com.google.common.base.Predicate) args[1]; + Predicate predicate + = (Predicate) args[1]; String schemaName = TopicName.get( TopicName.get( @@ -676,7 +675,7 @@ public Position answer(InvocationOnMock invocationOnMock) throws Throwable { Integer target = null; for (int i=entries.size() - 1; i >= 0; i--) { Entry entry = entries.get(i); - if (predicate.apply(entry)) { + if (predicate.test(entry)) { target = i; break; } From dfd4882d0c18ffc09945ad5beb26dae136933441 Mon Sep 17 00:00:00 2001 From: Lari Hotari Date: Wed, 28 Sep 2022 11:32:07 +0300 Subject: [PATCH 15/59] [improve][broker] Add a message to a NullPointerException created in ManagedLedgerImpl (#17293) - a NPE with no description is confusing --- .../org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java index 71bc8cad6f0b3..a03d38096e184 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java @@ -3475,7 +3475,7 @@ public PositionImpl getNextValidPositionInternal(final PositionImpl position) { while (!isValidPosition(nextPosition)) { Long nextLedgerId = ledgers.ceilingKey(nextPosition.getLedgerId() + 1); if (nextLedgerId == null) { - throw new NullPointerException(); + throw new NullPointerException("nextLedgerId is null. No valid next position after " + position); } nextPosition = PositionImpl.get(nextLedgerId, 0); } From 7648a119b10098c0b14f15d5a54a30b0aba7716d Mon Sep 17 00:00:00 2001 From: fengyubiao Date: Wed, 28 Sep 2022 17:56:00 +0800 Subject: [PATCH 16/59] [improve][test] Improve TransactionEndToEndTest to reduce the execution time (#17790) Fixes - https://github.com/apache/pulsar/issues/17623 - https://github.com/apache/pulsar/issues/17637 ### Motivation Manually release resources, including `consumer`, `producer`, `pulsar client`, `transaction`, and `topic`. This saves `setup` and `cleanup` time before and after each method. ### Modifications - Manually release resources instead of calling `cleanup` & `setup` each method - remove useless method `markDeletePositionCheck` - `Integer.valueOf(int)` instead of `new Integer(int)`, because `new Integer(int)` is deprecated ### Matching PR in forked repository PR in forked repository: - https://github.com/poorbarcode/pulsar/pull/10 --- .../client/impl/TransactionEndToEndTest.java | 155 ++++++++++++++---- ...ctionEndToEndWithoutBatchIndexAckTest.java | 4 +- 2 files changed, 128 insertions(+), 31 deletions(-) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java index aea77bec13678..e3fc05ae0424e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java @@ -80,8 +80,8 @@ import org.apache.pulsar.transaction.coordinator.TransactionSubscription; import org.awaitility.Awaitility; import org.testng.Assert; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -96,14 +96,21 @@ public class TransactionEndToEndTest extends TransactionTestBase { protected static final String TOPIC_OUTPUT = NAMESPACE1 + "/output"; protected static final String TOPIC_MESSAGE_ACK_TEST = NAMESPACE1 + "/message-ack-test"; protected static final int NUM_PARTITIONS = 16; - @BeforeMethod + @BeforeClass protected void setup() throws Exception { conf.setAcknowledgmentAtBatchIndexLevelEnabled(true); setUpBase(1, NUM_PARTITIONS, TOPIC_OUTPUT, TOPIC_PARTITION); admin.topics().createPartitionedTopic(TOPIC_MESSAGE_ACK_TEST, 1); } - @AfterMethod(alwaysRun = true) + protected void resetTopicOutput() throws Exception { + admin.topics().deletePartitionedTopic(TOPIC_OUTPUT, true); + admin.topics().createPartitionedTopic(TOPIC_OUTPUT, TOPIC_PARTITION); + admin.topics().deletePartitionedTopic(TOPIC_MESSAGE_ACK_TEST, true); + admin.topics().createPartitionedTopic(TOPIC_MESSAGE_ACK_TEST, 1); + } + + @AfterClass(alwaysRun = true) protected void cleanup() { super.internalCleanup(); } @@ -167,6 +174,10 @@ private void produceCommitTest(boolean enableBatch) throws Exception { message = consumer.receive(5, TimeUnit.SECONDS); Assert.assertNull(message); + // cleanup. + producer.close(); + consumer.close(); + resetTopicOutput(); log.info("message commit test enableBatch {}", enableBatch); } @@ -175,7 +186,6 @@ public void produceAbortTest() throws Exception { Transaction txn = getTxn(); String subName = "test"; - @Cleanup Producer producer = pulsarClient .newProducer() .topic(TOPIC_OUTPUT) @@ -188,7 +198,6 @@ public void produceAbortTest() throws Exception { producer.newMessage(txn).value(("Hello Txn - " + i).getBytes(UTF_8)).send(); } - @Cleanup Consumer consumer = pulsarClient .newConsumer() .topic(TOPIC_OUTPUT) @@ -253,6 +262,10 @@ public void produceAbortTest() throws Exception { return flag; }); + // cleanup. + producer.close(); + consumer.close(); + resetTopicOutput(); log.info("finished test partitionAbortTest"); } @@ -311,6 +324,11 @@ private void testAckWithTransactionReduceUnAckMessageCount(boolean enableBatch) } } assertTrue(flag); + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(topicName, true); } @Test @@ -406,6 +424,11 @@ protected void txnAckTest(boolean batchEnable, int maxBatchSize, Assert.assertTrue(reCommitError.getCause() instanceof TransactionNotFoundException); } } + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(normalTopic, true); } @Test @@ -423,6 +446,11 @@ public void testAfterDeleteTopicOtherTopicCanRecover() throws Exception { String content = "test"; producer.send(content); assertEquals(consumer.receive().getValue(), content); + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(topicTwo, true); } @Test @@ -536,6 +564,10 @@ public void txnMessageAckTest() throws Exception { } assertTrue(exist); + // cleanup. + producer.close(); + consumer.close(); + resetTopicOutput(); log.info("receive transaction messages count: {}", receiveCnt); } @@ -638,6 +670,11 @@ private void txnCumulativeAckTest(boolean batchEnable, int maxBatchSize, Subscri message = consumer.receive(1, TimeUnit.SECONDS); Assert.assertNull(message); } + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(normalTopic, true); } private Transaction getTxn() throws Exception { @@ -648,25 +685,6 @@ private Transaction getTxn() throws Exception { .get(); } - private void markDeletePositionCheck(String topic, String subName, boolean equalsWithLastConfirm) throws Exception { - for (int i = 0; i < TOPIC_PARTITION; i++) { - PersistentTopicInternalStats stats = null; - String checkTopic = TopicName.get(topic).getPartition(i).toString(); - for (int j = 0; j < 10; j++) { - stats = admin.topics().getInternalStats(checkTopic, false); - if (stats.lastConfirmedEntry.equals(stats.cursors.get(subName).markDeletePosition)) { - break; - } - Thread.sleep(200); - } - if (equalsWithLastConfirm) { - Assert.assertEquals(stats.cursors.get(subName).markDeletePosition, stats.lastConfirmedEntry); - } else { - Assert.assertNotEquals(stats.cursors.get(subName).markDeletePosition, stats.lastConfirmedEntry); - } - } - } - @Test public void txnMetadataHandlerRecoverTest() throws Exception { String topic = NAMESPACE1 + "/tc-metadata-handler-recover"; @@ -714,6 +732,12 @@ public void txnMetadataHandlerRecoverTest() throws Exception { Message message = consumer.receive(); Assert.assertNotNull(message); } + + // cleanup. + producer.close(); + consumer.close(); + recoverPulsarClient.close(); + admin.topics().delete(topic, true); } @Test @@ -748,9 +772,14 @@ public void produceTxnMessageOrderTest() throws Exception { for (int i = 0; i < 1000; i++) { Message message = consumer.receive(5, TimeUnit.SECONDS); Assert.assertNotNull(message); - Assert.assertEquals(Integer.valueOf(new String(message.getData())), new Integer(i)); + Assert.assertEquals(Integer.valueOf(new String(message.getData())), Integer.valueOf(i)); } } + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(topic, true); } @Test @@ -855,10 +884,20 @@ public void produceAndConsumeCloseStateTxnTest() throws Exception { field.setAccessible(true); TransactionImpl.State state = (TransactionImpl.State) field.get(timeoutTxnSkipClientTimeout); assertEquals(state, TransactionImpl.State.ERROR); + + // cleanup. + timeoutTxn.abort(); + producer.close(); + consumer.close(); + admin.topics().delete(topic, true); } @Test public void testTxnTimeoutAtTransactionMetadataStore() throws Exception{ + Collection transactionMetadataStores = + getPulsarServiceList().get(0).getTransactionMetadataStoreService().getStores().values(); + long timeoutCountOriginal = transactionMetadataStores.stream() + .mapToLong(store -> store.getMetadataStoreStats().timeoutCount).sum(); TxnID txnID = pulsarServiceList.get(0).getTransactionMetadataStoreService() .newTransaction(new TransactionCoordinatorID(0), 1).get(); Awaitility.await().until(() -> { @@ -869,11 +908,9 @@ public void testTxnTimeoutAtTransactionMetadataStore() throws Exception{ return true; } }); - Collection transactionMetadataStores = - getPulsarServiceList().get(0).getTransactionMetadataStoreService().getStores().values(); long timeoutCount = transactionMetadataStores.stream() .mapToLong(store -> store.getMetadataStoreStats().timeoutCount).sum(); - Assert.assertEquals(timeoutCount, 1); + Assert.assertEquals(timeoutCount, timeoutCountOriginal + 1); } @Test @@ -914,6 +951,11 @@ public void transactionTimeoutTest() throws Exception { assertEquals(reReceiveMessage.getMessageId(), message.getMessageId()); + // cleanup. + consumeTimeoutTxn.abort(); + producer.close(); + consumer.close(); + admin.topics().delete(topic, true); } @DataProvider(name = "ackType") @@ -979,6 +1021,11 @@ public void txnTransactionRedeliverNullDispatcher(CommandAck.AckType ackType) th } txn.abort().get(); assertTrue(exist); + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(topic, true); } @Test @@ -1036,6 +1083,13 @@ public void oneTransactionOneTopicWithMultiSubTest() throws Exception { } } assertTrue(flag); + + // cleanup. + txn.abort().get(); + producer.close(); + consumer1.close(); + consumer2.close(); + admin.topics().delete(topic, true); } @Test @@ -1070,6 +1124,11 @@ public void testTxnTimeOutInClient() throws Exception{ Assert.assertTrue(e.getCause() instanceof TransactionCoordinatorClientException .InvalidTxnStatusException); } + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(topic, true); } @Test @@ -1138,6 +1197,11 @@ public void testCumulativeAckRedeliverMessages() throws Exception { // then redeliver will not receive any message message = consumer.receive(3, TimeUnit.SECONDS); assertNull(message); + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(topic, true); } @Test @@ -1172,6 +1236,11 @@ public void testSendTxnMessageTimeout() throws Exception { } catch (PulsarClientException ex) { assertTrue(ex instanceof PulsarClientException.TimeoutException); } + + // cleanup. + transaction.abort().get(); + producer.close(); + admin.topics().delete(topic, true); } @Test @@ -1218,6 +1287,12 @@ public void testAckWithTransactionReduceUnackCountNotInPendingAcks() throws Exce // ack one message, the unack count is 4 assertEquals(getPulsarServiceList().get(0).getBrokerService().getTopic(topic, false) .get().get().getSubscription(subName).getConsumers().get(0).getUnackedMessages(), 4); + + // cleanup. + txn.abort().get(); + consumer.close(); + producer.close(); + admin.topics().delete(topic, true); } @Test @@ -1274,6 +1349,14 @@ public void testSendTxnAckMessageToDLQ() throws Exception { assertEquals(((ConsumerImpl) consumer).getAvailablePermits(), 3); assertEquals(value, new String(deadLetterConsumer.receive(3, TimeUnit.SECONDS).getValue())); + + // cleanup. + consumer.close(); + deadLetterConsumer.close(); + producer.close(); + admin.topics().delete(String.format("%s-%s" + RetryMessageUtil.DLQ_GROUP_TOPIC_SUFFIX, + topic, subName), true); + admin.topics().delete(topic, true); } @Test @@ -1341,6 +1424,14 @@ public void testSendTxnAckBatchMessageToDLQ() throws Exception { assertEquals(value1, new String(deadLetterConsumer.receive(3, TimeUnit.SECONDS).getValue())); assertEquals(value2, new String(deadLetterConsumer.receive(3, TimeUnit.SECONDS).getValue())); + + // cleanup. + consumer.close(); + deadLetterConsumer.close(); + producer.close(); + admin.topics().delete(String.format("%s-%s" + RetryMessageUtil.DLQ_GROUP_TOPIC_SUFFIX, + topic, subName), true); + admin.topics().delete(topic, true); } @Test @@ -1400,5 +1491,11 @@ public void testDelayedTransactionMessages() throws Exception { for (int i = 0; i < 10; i++) { assertTrue(receivedMsgs.contains("msg-" + i)); } + + // cleanup. + sharedConsumer.close(); + failoverConsumer.close(); + producer.close(); + admin.topics().delete(topic, true); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndWithoutBatchIndexAckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndWithoutBatchIndexAckTest.java index 1ef3998c3467d..0b50be807be42 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndWithoutBatchIndexAckTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndWithoutBatchIndexAckTest.java @@ -20,7 +20,7 @@ import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.client.api.SubscriptionType; -import org.testng.annotations.BeforeMethod; +import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; /** @@ -30,7 +30,7 @@ @Test(groups = "flaky") public class TransactionEndToEndWithoutBatchIndexAckTest extends TransactionEndToEndTest { - @BeforeMethod + @BeforeClass protected void setup() throws Exception { conf.setAcknowledgmentAtBatchIndexLevelEnabled(false); setUpBase(1, NUM_PARTITIONS, TOPIC_OUTPUT, TOPIC_PARTITION); From 31203c3952dfb82057ac1fbee4efc10682a5570b Mon Sep 17 00:00:00 2001 From: Cong Zhao Date: Wed, 28 Sep 2022 18:00:40 +0800 Subject: [PATCH 17/59] [fix][flask-test] Fix and improve LookupRetryTest (#17848) Fixes #17785 ### Motivation The `failureMap` need to be clear after run per unit test. ### Modifications Clear `failureMap` after run per unit test, and only run once `setup()`/`cleanup()` to reduce execution time. ### Matching PR in forked repository PR in forked repository: https://github.com/coderzc/pulsar/pull/6 --- .../pulsar/client/impl/LookupRetryTest.java | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/LookupRetryTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/LookupRetryTest.java index 270d838b61cfb..448f099c386d8 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/LookupRetryTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/LookupRetryTest.java @@ -20,14 +20,12 @@ import static org.apache.pulsar.common.protocol.Commands.newLookupErrorResponse; import static org.apache.pulsar.common.protocol.Commands.newPartitionMetadataResponse; - import com.google.common.collect.Sets; import java.util.Queue; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; @@ -44,20 +42,18 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.testng.Assert; -import org.testng.annotations.AfterMethod; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; public class LookupRetryTest extends MockedPulsarServiceBaseTest { - private static final Logger log = LoggerFactory.getLogger(LookupRetryTest.class); private static final String subscription = "reader-sub"; private final AtomicInteger connectionsCreated = new AtomicInteger(0); private final ConcurrentHashMap> failureMap = new ConcurrentHashMap<>(); - @BeforeMethod + @BeforeClass @Override protected void setup() throws Exception { conf.setTopicLevelPoliciesEnabled(false); @@ -69,8 +65,6 @@ protected void setup() throws Exception { admin.tenants().createTenant("public", new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("test"))); admin.namespaces().createNamespace("public/default", Sets.newHashSet("test")); - - connectionsCreated.set(0); } @Override @@ -94,12 +88,18 @@ protected ServerCnx newServerCnx(PulsarService pulsar, String listenerName) thro }; } - @AfterMethod(alwaysRun = true) + @AfterClass(alwaysRun = true) @Override protected void cleanup() throws Exception { super.internalCleanup(); } + @BeforeMethod(alwaysRun = true) + public void reset() { + connectionsCreated.set(0); + failureMap.clear(); + } + PulsarClient newClient() throws Exception { return PulsarClient.builder() .serviceUrl(pulsar.getBrokerServiceUrl()) @@ -244,7 +244,6 @@ public void testCloseConnectionOnBrokerTimeout() throws Exception { try (PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(lookupUrl) .maxNumberOfRejectedRequestPerConnection(100) - .maxNumberOfRejectedRequestPerConnection(1) .connectionTimeout(2, TimeUnit.SECONDS) .operationTimeout(1, TimeUnit.SECONDS) .lookupTimeout(10, TimeUnit.SECONDS) From 0678b821765dc1e4e6ce75ed84641efb64c85335 Mon Sep 17 00:00:00 2001 From: Andras Beni Date: Wed, 28 Sep 2022 12:38:54 +0200 Subject: [PATCH 18/59] [improve][test] Add integration test for websocket (#17843) --- build/run_integration_group.sh | 4 +- .../latest-version-image/Dockerfile | 4 +- .../latest-version-image/conf/websocket.conf | 27 ++++ .../scripts/run-websocket.sh | 4 +- .../containers/WebSocketContainer.java | 41 +++++ .../proxy/TestProxyWithWebSocket.java | 128 --------------- .../integration/topologies/PulsarCluster.java | 8 +- .../topologies/PulsarClusterSpec.java | 6 + .../integration/websocket/TestWebSocket.java | 74 +++++++++ .../websocket/WebSocketTestSuite.java | 148 ++++++++++++++++++ ...oxy-websocket.xml => pulsar-websocket.xml} | 4 +- 11 files changed, 311 insertions(+), 137 deletions(-) create mode 100644 tests/docker-images/latest-version-image/conf/websocket.conf create mode 100644 tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/WebSocketContainer.java delete mode 100644 tests/integration/src/test/java/org/apache/pulsar/tests/integration/proxy/TestProxyWithWebSocket.java create mode 100644 tests/integration/src/test/java/org/apache/pulsar/tests/integration/websocket/TestWebSocket.java create mode 100644 tests/integration/src/test/java/org/apache/pulsar/tests/integration/websocket/WebSocketTestSuite.java rename tests/integration/src/test/resources/{pulsar-proxy-websocket.xml => pulsar-websocket.xml} (84%) diff --git a/build/run_integration_group.sh b/build/run_integration_group.sh index 71a816a9b2b73..ab69544308ff3 100755 --- a/build/run_integration_group.sh +++ b/build/run_integration_group.sh @@ -120,8 +120,8 @@ test_group_messaging() { mvn_run_integration_test "$@" -DintegrationTestSuiteFile=pulsar-messaging.xml -DintegrationTests # run integration proxy tests mvn_run_integration_test --skip-build-deps "$@" -DintegrationTestSuiteFile=pulsar-proxy.xml -DintegrationTests - # run integration proxy with WebSocket tests - mvn_run_integration_test --skip-build-deps "$@" -DintegrationTestSuiteFile=pulsar-proxy-websocket.xml -DintegrationTests + # run integration WebSocket tests + mvn_run_integration_test --skip-build-deps "$@" -DintegrationTestSuiteFile=pulsar-websocket.xml -DintegrationTests } test_group_plugin() { diff --git a/tests/docker-images/latest-version-image/Dockerfile b/tests/docker-images/latest-version-image/Dockerfile index 16644a2c8051e..b23e35a2e5d61 100644 --- a/tests/docker-images/latest-version-image/Dockerfile +++ b/tests/docker-images/latest-version-image/Dockerfile @@ -70,7 +70,7 @@ RUN mkdir -p /var/log/pulsar && mkdir -p /var/run/supervisor/ && mkdir -p /pulsa COPY conf/supervisord.conf /etc/supervisord.conf COPY conf/global-zk.conf conf/local-zk.conf conf/bookie.conf conf/broker.conf conf/functions_worker.conf \ - conf/proxy.conf conf/presto_worker.conf /etc/supervisord/conf.d/ + conf/proxy.conf conf/presto_worker.conf conf/websocket.conf /etc/supervisord/conf.d/ COPY ssl/ca.cert.pem ssl/broker.key-pk8.pem ssl/broker.cert.pem \ ssl/admin.key-pk8.pem ssl/admin.cert.pem \ @@ -81,7 +81,7 @@ COPY ssl/ca.cert.pem ssl/broker.key-pk8.pem ssl/broker.cert.pem \ COPY scripts/init-cluster.sh scripts/run-global-zk.sh scripts/run-local-zk.sh \ scripts/run-bookie.sh scripts/run-broker.sh scripts/run-functions-worker.sh scripts/run-proxy.sh scripts/run-presto-worker.sh \ - scripts/run-standalone.sh \ + scripts/run-standalone.sh scripts/run-websocket.sh \ /pulsar/bin/ COPY conf/presto/jvm.config /pulsar/trino/conf diff --git a/tests/docker-images/latest-version-image/conf/websocket.conf b/tests/docker-images/latest-version-image/conf/websocket.conf new file mode 100644 index 0000000000000..7a0fc3e009659 --- /dev/null +++ b/tests/docker-images/latest-version-image/conf/websocket.conf @@ -0,0 +1,27 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +[program:websocket] +autostart=false +redirect_stderr=true +stdout_logfile=/var/log/pulsar/pulsar-websocket.log +directory=/pulsar +environment=PULSAR_MEM="-Xmx128M",PULSAR_GC="-XX:+UseZGC" +command=/pulsar/bin/pulsar websocket +user=pulsar diff --git a/tests/docker-images/latest-version-image/scripts/run-websocket.sh b/tests/docker-images/latest-version-image/scripts/run-websocket.sh index 4836a890bda46..a49ee11176868 100755 --- a/tests/docker-images/latest-version-image/scripts/run-websocket.sh +++ b/tests/docker-images/latest-version-image/scripts/run-websocket.sh @@ -18,11 +18,11 @@ # under the License. # -bin/apply-config-from-env.py conf/proxy.conf && \ +bin/apply-config-from-env.py conf/websocket.conf && \ bin/apply-config-from-env.py conf/pulsar_env.sh if [ -z "$NO_AUTOSTART" ]; then - sed -i 's/autostart=.*/autostart=true/' /etc/supervisord/conf.d/proxy.conf + sed -i 's/autostart=.*/autostart=true/' /etc/supervisord/conf.d/websocket.conf fi bin/watch-znode.py -z $zookeeperServers -p /initialized-$clusterName -w diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/WebSocketContainer.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/WebSocketContainer.java new file mode 100644 index 0000000000000..658f6e1016a92 --- /dev/null +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/WebSocketContainer.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.tests.integration.containers; + +import org.apache.pulsar.tests.integration.utils.DockerUtils; + +public class WebSocketContainer extends PulsarContainer { + + public WebSocketContainer(String clusterName, String hostName) { + super(clusterName, hostName, hostName, + "bin/run-websocket.sh", + -1, + BROKER_HTTP_PORT, "/admin/v2/proxy-stats/stats"); + } + + public String getWSUrl() { + return "ws://" + getHost() + ":" + getMappedPort(BROKER_HTTP_PORT); + } + + @Override + protected void afterStart() { + DockerUtils.runCommandAsyncWithLogging(this.dockerClient, this.getContainerId(), + "tail", "-f", "/var/log/pulsar/pulsar-websocket.log"); + } +} diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/proxy/TestProxyWithWebSocket.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/proxy/TestProxyWithWebSocket.java deleted file mode 100644 index 5a1b310b6c6e7..0000000000000 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/proxy/TestProxyWithWebSocket.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.pulsar.tests.integration.proxy; - -import lombok.Cleanup; -import org.apache.pulsar.client.admin.PulsarAdmin; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.apache.pulsar.tests.integration.suites.PulsarTestSuite; -import org.apache.pulsar.tests.integration.topologies.PulsarClusterSpec; -import org.awaitility.Awaitility; -import org.eclipse.jetty.client.HttpClient; -import org.eclipse.jetty.websocket.api.Session; -import org.eclipse.jetty.websocket.api.WebSocketListener; -import org.eclipse.jetty.websocket.api.annotations.WebSocket; -import org.eclipse.jetty.websocket.client.WebSocketClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testng.Assert; -import org.testng.annotations.Test; - -import java.net.URI; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.Future; - -/** - * Test cases for proxy. - */ -public class TestProxyWithWebSocket extends PulsarTestSuite { - private static final Logger log = LoggerFactory.getLogger(TestProxyWithWebSocket.class); - - @Override - protected PulsarClusterSpec.PulsarClusterSpecBuilder beforeSetupCluster( - String clusterName, - PulsarClusterSpec.PulsarClusterSpecBuilder specBuilder) { - Map envs = new HashMap<>(); - envs.put("webSocketServiceEnabled", "true"); - specBuilder.proxyEnvs(envs); - return super.beforeSetupCluster(clusterName, specBuilder); - } - - @Test - public void testWebSocket() throws Exception { - - final String tenant = "proxy-test-" + randomName(10); - final String namespace = tenant + "/ns1"; - - @Cleanup - PulsarAdmin admin = PulsarAdmin.builder() - .serviceHttpUrl(pulsarCluster.getHttpServiceUrl()) - .build(); - - admin.tenants().createTenant(tenant, - new TenantInfoImpl(Collections.emptySet(), Collections.singleton(pulsarCluster.getClusterName()))); - - admin.namespaces().createNamespace(namespace, Collections.singleton(pulsarCluster.getClusterName())); - - HttpClient httpClient = new HttpClient(); - WebSocketClient webSocketClient = new WebSocketClient(httpClient); - webSocketClient.start(); - MyWebSocket myWebSocket = new MyWebSocket(); - String webSocketUri = pulsarCluster.getProxy().getHttpServiceUrl().replaceFirst("http", "ws") - + "/ws/v2/producer/persistent/" + namespace + "/my-topic"; - Future sessionFuture = webSocketClient.connect(myWebSocket, - URI.create(webSocketUri)); - sessionFuture.get().getRemote().sendString("{\n" + - " \"payload\": \"SGVsbG8gV29ybGQ=\",\n" + - " \"properties\": {\"key1\": \"value1\", \"key2\": \"value2\"},\n" + - " \"context\": \"1\"\n" + - "}"); - - Awaitility.await().untilAsserted(() -> { - String response = myWebSocket.getResponse(); - Assert.assertNotNull(response); - Assert.assertTrue(response.contains("ok")); - }); - } - - @WebSocket - public static class MyWebSocket implements WebSocketListener { - Queue incomingMessages = new ArrayBlockingQueue<>(10); - @Override - public void onWebSocketBinary(byte[] bytes, int i, int i1) { - } - - @Override - public void onWebSocketText(String s) { - incomingMessages.add(s); - } - - @Override - public void onWebSocketClose(int i, String s) { - } - - @Override - public void onWebSocketConnect(Session session) { - - } - - @Override - public void onWebSocketError(Throwable throwable) { - - } - - public String getResponse() { - return incomingMessages.poll(); - } - } -} diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java index 3b94808360b43..1ef0bba1b6803 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java @@ -95,6 +95,7 @@ public static PulsarCluster forSpec(PulsarClusterSpec spec, CSContainer csContai @Getter private Map sqlFollowWorkerContainers; private Map> externalServices = Collections.emptyMap(); + private Map> externalServiceEnvs; private final boolean enablePrestoWorker; private PulsarCluster(PulsarClusterSpec spec, CSContainer csContainer, boolean sharedCsContainer) { @@ -280,14 +281,19 @@ public void start() throws Exception { // start external services this.externalServices = spec.externalServices; + this.externalServiceEnvs = spec.externalServiceEnvs; if (null != externalServices) { externalServices.entrySet().parallelStream().forEach(service -> { GenericContainer serviceContainer = service.getValue(); serviceContainer.withNetwork(network); serviceContainer.withNetworkAliases(service.getKey()); + if (null != externalServiceEnvs && null != externalServiceEnvs.get(service.getKey())) { + Map env = externalServiceEnvs.getOrDefault(service.getKey(), Collections.emptyMap()); + serviceContainer.withEnv(env); + } PulsarContainer.configureLeaveContainerRunning(serviceContainer); serviceContainer.start(); - log.info("Successfully start external service {}.", service.getKey()); + log.info("Successfully started external service {}.", service.getKey()); }); } } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java index 016f8bb4c98cd..6f93ce9f2561a 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java @@ -112,6 +112,12 @@ public class PulsarClusterSpec { @Singular Map> externalServices; + /** + * Specify envs for external services. + */ + @Singular + Map> externalServiceEnvs; + /** * Returns the flag whether to enable/disable container log. * diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/websocket/TestWebSocket.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/websocket/TestWebSocket.java new file mode 100644 index 0000000000000..312b64b6a0921 --- /dev/null +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/websocket/TestWebSocket.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.tests.integration.websocket; + + +import com.google.common.collect.ImmutableMap; +import org.apache.pulsar.tests.integration.containers.BrokerContainer; +import org.apache.pulsar.tests.integration.containers.CSContainer; +import org.apache.pulsar.tests.integration.containers.WebSocketContainer; +import org.apache.pulsar.tests.integration.topologies.PulsarClusterSpec; +import org.testng.annotations.Test; +import java.util.Collections; +import java.util.Map; + +/** + * Test cases for websocket. + */ +public class TestWebSocket extends WebSocketTestSuite { + + public static final String WEBSOCKET = "websocket"; + + @Override + protected PulsarClusterSpec.PulsarClusterSpecBuilder beforeSetupCluster( + String clusterName, + PulsarClusterSpec.PulsarClusterSpecBuilder specBuilder) { + + Map enableWebSocket = Collections.singletonMap("webSocketServiceEnabled", "true"); + specBuilder.brokerEnvs(enableWebSocket); + specBuilder.proxyEnvs(enableWebSocket); + + specBuilder.externalService(WEBSOCKET, new WebSocketContainer(clusterName, WEBSOCKET)); + specBuilder.externalServiceEnv(WEBSOCKET, ImmutableMap.builder() + .put("configurationMetadataStoreUrl", CSContainer.NAME + ":" + CSContainer.CS_PORT) + .put("webServicePort", "" + WebSocketContainer.BROKER_HTTP_PORT) + .put("clusterName", clusterName) + .build()); + return super.beforeSetupCluster(clusterName, specBuilder); + } + + @Test + public void testExternalService() throws Exception { + WebSocketContainer service = (WebSocketContainer) pulsarCluster.getExternalServices().get(WEBSOCKET); + testWebSocket(service.getWSUrl()); + } + + @Test + public void testBroker() throws Exception { + BrokerContainer broker = pulsarCluster.getAnyBroker(); + String url = "ws://" + broker.getHost() + ":" + broker.getMappedPort(BrokerContainer.BROKER_HTTP_PORT); + testWebSocket(url); + } + + @Test + public void testProxy() throws Exception { + String url = pulsarCluster.getProxy().getHttpServiceUrl().replaceFirst("http", "ws"); + testWebSocket(url); + } +} diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/websocket/WebSocketTestSuite.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/websocket/WebSocketTestSuite.java new file mode 100644 index 0000000000000..75f77c6675ae7 --- /dev/null +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/websocket/WebSocketTestSuite.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.tests.integration.websocket; + +import com.fasterxml.jackson.core.type.TypeReference; +import lombok.Cleanup; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.util.ObjectMapperFactory; +import org.apache.pulsar.tests.integration.suites.PulsarTestSuite; +import org.eclipse.jetty.client.HttpClient; +import org.eclipse.jetty.websocket.api.WebSocketAdapter; +import org.eclipse.jetty.websocket.api.annotations.WebSocket; +import org.eclipse.jetty.websocket.client.WebSocketClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.Assert; +import java.io.IOException; +import java.net.URI; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; + +public abstract class WebSocketTestSuite extends PulsarTestSuite { + private static final Logger log = LoggerFactory.getLogger(WebSocketTestSuite.class); + + protected void testWebSocket(String url) throws Exception { + + final String tenant = "websocket-test-" + randomName(10); + final String namespace = tenant + "/ns1"; + final String topic = namespace + "/topic-" + randomName(5); + + @Cleanup + PulsarAdmin admin = PulsarAdmin.builder() + .serviceHttpUrl(pulsarCluster.getHttpServiceUrl()) + .build(); + + admin.tenants().createTenant(tenant, + new TenantInfoImpl(Collections.emptySet(), Collections.singleton(pulsarCluster.getClusterName()))); + + admin.namespaces().createNamespace(namespace, Collections.singleton(pulsarCluster.getClusterName())); + + log.debug("Using url {}", url); + + @Cleanup + WebSocketConsumer consumer = new WebSocketConsumer(url, topic); + log.debug("Created ws consumer"); + + @Cleanup + WebSocketPublisher publisher = new WebSocketPublisher(url, topic); + log.debug("Created ws publisher"); + + publisher.send("SGVsbG8gV29ybGQ="); + log.debug("Sent message through publisher"); + + Map response = publisher.getResponse(); + Assert.assertEquals(response.get("result"), "ok", "Bad response: " + response); + log.debug("Publisher received response {}", response); + + String received = consumer.getPayloadFromResponse(); + log.debug("Consumer received message {} ", received); + Assert.assertEquals(received, "SGVsbG8gV29ybGQ="); + } + + @WebSocket + public static class Client extends WebSocketAdapter implements AutoCloseable { + final BlockingQueue incomingMessages = new ArrayBlockingQueue<>(10); + private final WebSocketClient client; + + Client(String webSocketUri) throws Exception { + HttpClient httpClient = new HttpClient(); + client = new WebSocketClient(httpClient); + client.start(); + client.connect(this, URI.create(webSocketUri)).get(); + } + + void sendText(String payload) throws IOException { + getSession().getRemote().sendString(payload); + } + + @Override + public void onWebSocketText(String s) { + incomingMessages.add(s); + } + + Map getResponse() throws Exception { + String response = incomingMessages.poll(5, TimeUnit.SECONDS); + if (response == null) { + Assert.fail("Did not get websocket response within timeout"); + } + return ObjectMapperFactory.getThreadLocal().readValue(response, new TypeReference<>() {}); + + } + + @Override + public void close() throws Exception { + client.stop(); + } + } + + @WebSocket + protected static class WebSocketPublisher extends Client { + + WebSocketPublisher(String url, String topic) throws Exception { + super(url + "/ws/v2/producer/persistent/" + topic); + } + + void send(String payload) throws IOException { + sendText("{\n" + + " \"payload\": \"" + payload + "\",\n" + + " \"properties\": {\"key1\": \"value1\", \"key2\": \"value2\"},\n" + + " \"context\": \"1\"\n" + + "}"); + } + } + + @WebSocket + protected static class WebSocketConsumer extends Client { + + WebSocketConsumer(String url, String topic) throws Exception { + super(url + "/ws/v2/consumer/persistent/" + topic + "/" + randomName(8)); + } + + String getPayloadFromResponse() throws Exception { + Map response = getResponse(); + return String.valueOf(response.get("payload")); + } + } + +} diff --git a/tests/integration/src/test/resources/pulsar-proxy-websocket.xml b/tests/integration/src/test/resources/pulsar-websocket.xml similarity index 84% rename from tests/integration/src/test/resources/pulsar-proxy-websocket.xml rename to tests/integration/src/test/resources/pulsar-websocket.xml index 6e32ac9f454a0..87bf832d4e40a 100644 --- a/tests/integration/src/test/resources/pulsar-proxy-websocket.xml +++ b/tests/integration/src/test/resources/pulsar-websocket.xml @@ -19,10 +19,10 @@ --> - + - + \ No newline at end of file From 716f5e258ccc1429b96c03157fcd369355fcf9c5 Mon Sep 17 00:00:00 2001 From: Qiang Zhao Date: Wed, 28 Sep 2022 21:55:09 +0800 Subject: [PATCH 19/59] [improve][ML] Print log when delete empty ledger. (#17859) --- .../org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java index a03d38096e184..aa98d258bcb5d 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java @@ -458,9 +458,7 @@ private synchronized void initializeBookKeeper(final ManagedLedgerInitializeLedg } else { iterator.remove(); bookKeeper.asyncDeleteLedger(li.getLedgerId(), (rc, ctx) -> { - if (log.isDebugEnabled()) { - log.debug("[{}] Deleted empty ledger ledgerId={} rc={}", name, li.getLedgerId(), rc); - } + log.info("[{}] Deleted empty ledger ledgerId={} rc={}", name, li.getLedgerId(), rc); }, null); } } From 62d900f6792b6540bf0b992cd6e868b8bea6231c Mon Sep 17 00:00:00 2001 From: Jiwei Guo Date: Wed, 28 Sep 2022 21:55:26 +0800 Subject: [PATCH 20/59] Fix NPE when ResourceGroupService execute scheduled task. (#17840) --- .../apache/pulsar/broker/PulsarService.java | 8 ++++ .../resourcegroup/ResourceGroupService.java | 37 ++++++++++++++++--- .../ResourceGroupServiceTest.java | 8 ++++ 3 files changed, 47 insertions(+), 6 deletions(-) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java index c69c6e2418f7c..e658115e1c28b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java @@ -419,6 +419,14 @@ public CompletableFuture closeAsync() { } this.resourceUsageTransportManager = null; } + if (this.resourceGroupServiceManager != null) { + try { + this.resourceGroupServiceManager.close(); + } catch (Exception e) { + LOG.warn("ResourceGroupServiceManager closing failed {}", e.getMessage()); + } + this.resourceGroupServiceManager = null; + } if (this.webService != null) { try { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupService.java index 4bb1bc8ab243a..c74681fdb731a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupService.java @@ -53,7 +53,7 @@ * * @see PulsarService */ -public class ResourceGroupService { +public class ResourceGroupService implements AutoCloseable{ /** * Default constructor. */ @@ -303,6 +303,21 @@ public ResourceGroup getNamespaceResourceGroup(NamespaceName namespaceName) { return this.namespaceToRGsMap.get(namespaceName); } + @Override + public void close() throws Exception { + if (aggregateLocalUsagePeriodicTask != null) { + aggregateLocalUsagePeriodicTask.cancel(true); + } + if (calculateQuotaPeriodicTask != null) { + calculateQuotaPeriodicTask.cancel(true); + } + resourceGroupsMap.clear(); + tenantToRGsMap.clear(); + namespaceToRGsMap.clear(); + topicProduceStats.clear(); + topicConsumeStats.clear(); + } + /** * Increments usage stats for the resource groups associated with the given namespace and tenant. * Expected to be called when a message is produced or consumed on a topic, or when we calculate @@ -565,17 +580,17 @@ protected void aggregateResourceGroupLocalUsages() { ServiceConfiguration config = pulsar.getConfiguration(); long newPeriodInSeconds = config.getResourceUsageTransportPublishIntervalInSecs(); if (newPeriodInSeconds != this.aggregateLocalUsagePeriodInSeconds) { - if (this.aggreagteLocalUsagePeriodicTask == null) { + if (this.aggregateLocalUsagePeriodicTask == null) { log.error("aggregateResourceGroupLocalUsages: Unable to find running task to cancel when " + "publish period changed from {} to {} {}", this.aggregateLocalUsagePeriodInSeconds, newPeriodInSeconds, timeUnitScale); } else { - boolean cancelStatus = this.aggreagteLocalUsagePeriodicTask.cancel(true); + boolean cancelStatus = this.aggregateLocalUsagePeriodicTask.cancel(true); log.info("aggregateResourceGroupLocalUsages: Got status={} in cancel of periodic " + "when publish period changed from {} to {} {}", cancelStatus, this.aggregateLocalUsagePeriodInSeconds, newPeriodInSeconds, timeUnitScale); } - this.aggreagteLocalUsagePeriodicTask = pulsar.getExecutor().scheduleAtFixedRate( + this.aggregateLocalUsagePeriodicTask = pulsar.getExecutor().scheduleAtFixedRate( catchingAndLoggingThrowables(this::aggregateResourceGroupLocalUsages), newPeriodInSeconds, newPeriodInSeconds, @@ -680,7 +695,7 @@ private void initialize() { ServiceConfiguration config = this.pulsar.getConfiguration(); long periodInSecs = config.getResourceUsageTransportPublishIntervalInSecs(); this.aggregateLocalUsagePeriodInSeconds = this.resourceUsagePublishPeriodInSeconds = periodInSecs; - this.aggreagteLocalUsagePeriodicTask = this.pulsar.getExecutor().scheduleAtFixedRate( + this.aggregateLocalUsagePeriodicTask = this.pulsar.getExecutor().scheduleAtFixedRate( catchingAndLoggingThrowables(this::aggregateResourceGroupLocalUsages), periodInSecs, periodInSecs, @@ -737,7 +752,7 @@ private void checkRGCreateParams(String rgName, org.apache.pulsar.common.policie // The task that periodically re-calculates the quota budget for local usage. - private ScheduledFuture aggreagteLocalUsagePeriodicTask; + private ScheduledFuture aggregateLocalUsagePeriodicTask; private long aggregateLocalUsagePeriodInSeconds; // The task that periodically re-calculates the quota budget for local usage. @@ -840,4 +855,14 @@ ConcurrentHashMap getTopicConsumeStats() { ConcurrentHashMap getTopicProduceStats() { return this.topicProduceStats; } + + @VisibleForTesting + ScheduledFuture getAggregateLocalUsagePeriodicTask() { + return this.aggregateLocalUsagePeriodicTask; + } + + @VisibleForTesting + ScheduledFuture getCalculateQuotaPeriodicTask() { + return this.calculateQuotaPeriodicTask; + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupServiceTest.java index e0e3ec9c16a23..86dff398f9774 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupServiceTest.java @@ -257,6 +257,14 @@ public void testResourceGroupOps() throws PulsarAdminException, InterruptedExcep Assert.assertEquals(rgs.getNumResourceGroups(), 0); } + @Test + public void testClose() throws Exception { + ResourceGroupService service = new ResourceGroupService(pulsar, TimeUnit.MILLISECONDS, null, null); + service.close(); + Assert.assertTrue(service.getAggregateLocalUsagePeriodicTask().isCancelled()); + Assert.assertTrue(service.getCalculateQuotaPeriodicTask().isCancelled()); + } + private ResourceGroupService rgs; int numAnonymousQuotaCalculations; From bde5ac7fa0ec438430b5fb1912a74bcef0d73445 Mon Sep 17 00:00:00 2001 From: Lari Hotari Date: Wed, 28 Sep 2022 18:24:26 +0300 Subject: [PATCH 21/59] [fix][broker] Make timestamp fields thread safe by using volatile (#17252) - fixes issue with stats where timestamps might be inconsistent because of visibility issues - fields should be volatile to ensure visibility of updated values in a consistent manner - in replication, the lastDataMessagePublishedTimestamp field in PersistentTopic might be inconsistent unless volatile is used --- .../bookkeeper/mledger/impl/ManagedCursorImpl.java | 4 ++-- .../bookkeeper/mledger/impl/ManagedLedgerImpl.java | 8 ++++---- .../org/apache/pulsar/broker/service/Consumer.java | 6 +++--- .../service/persistent/PersistentSubscription.java | 6 +++--- .../broker/service/persistent/PersistentTopic.java | 2 +- .../persistent/ReplicatedSubscriptionsController.java | 2 +- .../org/apache/pulsar/compaction/CompactionRecord.java | 10 +++++----- .../pulsar/client/impl/auth/AuthenticationAthenz.java | 2 +- .../apache/pulsar/client/impl/AutoClusterFailover.java | 4 ++-- 9 files changed, 22 insertions(+), 22 deletions(-) diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java index fe1798a0d7eff..b67c74fdc7956 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java @@ -260,11 +260,11 @@ public void triggerFailed(ManagedLedgerException exception) { AtomicIntegerFieldUpdater.newUpdater(ManagedCursorImpl.class, "pendingMarkDeletedSubmittedCount"); @SuppressWarnings("unused") private volatile int pendingMarkDeletedSubmittedCount = 0; - private long lastLedgerSwitchTimestamp; + private volatile long lastLedgerSwitchTimestamp; private final Clock clock; // The last active time (Unix time, milliseconds) of the cursor - private long lastActive; + private volatile long lastActive; public enum State { Uninitialized, // Cursor is being initialized diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java index aa98d258bcb5d..254ee767bc7fc 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java @@ -215,13 +215,13 @@ public class ManagedLedgerImpl implements ManagedLedger, CreateCallback { private volatile LedgerHandle currentLedger; private long currentLedgerEntries = 0; private long currentLedgerSize = 0; - private long lastLedgerCreatedTimestamp = 0; - private long lastLedgerCreationFailureTimestamp = 0; + private volatile long lastLedgerCreatedTimestamp = 0; + private volatile long lastLedgerCreationFailureTimestamp = 0; private long lastLedgerCreationInitiationTimestamp = 0; private long lastOffloadLedgerId = 0; - private long lastOffloadSuccessTimestamp = 0; - private long lastOffloadFailureTimestamp = 0; + private volatile long lastOffloadSuccessTimestamp = 0; + private volatile long lastOffloadFailureTimestamp = 0; private int minBacklogCursorsForCaching = 0; private int minBacklogEntriesForCaching = 1000; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java index cc47976e82a31..767c7bb92747d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java @@ -86,9 +86,9 @@ public class Consumer { private final LongAdder bytesOutCounter; private final Rate messageAckRate; - private long lastConsumedTimestamp; - private long lastAckedTimestamp; - private long lastConsumedFlowTimestamp; + private volatile long lastConsumedTimestamp; + private volatile long lastAckedTimestamp; + private volatile long lastConsumedFlowTimestamp; private Rate chunkedMessageRate; // Represents how many messages we can safely send to the consumer without diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java index 4afc5b6bdf9f8..855bec48527e5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java @@ -107,9 +107,9 @@ public class PersistentSubscription extends AbstractSubscription implements Subs private volatile int isFenced = FALSE; private PersistentMessageExpiryMonitor expiryMonitor; - private long lastExpireTimestamp = 0L; - private long lastConsumedFlowTimestamp = 0L; - private long lastMarkDeleteAdvancedTimestamp = 0L; + private volatile long lastExpireTimestamp = 0L; + private volatile long lastConsumedFlowTimestamp = 0L; + private volatile long lastMarkDeleteAdvancedTimestamp = 0L; // for connected subscriptions, message expiry will be checked if the backlog is greater than this threshold private static final int MINIMUM_BACKLOG_FOR_EXPIRY_CHECK = 1000; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java index 33d97970569ed..fdcaaf2ffbdb3 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java @@ -224,7 +224,7 @@ protected TopicStatsHelper initialValue() { protected final TransactionBuffer transactionBuffer; // Record the last time a data message (ie: not an internal Pulsar marker) is published on the topic - private long lastDataMessagePublishedTimestamp = 0; + private volatile long lastDataMessagePublishedTimestamp = 0; private static class TopicStatsHelper { public double averageMsgSize; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java index 1e1245ed36b6f..cf46fac4a24a8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java @@ -59,7 +59,7 @@ public class ReplicatedSubscriptionsController implements AutoCloseable, Topic.P private final String localCluster; // The timestamp of when the last snapshot was initiated - private long lastCompletedSnapshotStartTime = 0; + private volatile long lastCompletedSnapshotStartTime = 0; private String lastCompletedSnapshotId; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactionRecord.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactionRecord.java index 4a8274389eb62..b6ae55b00b84b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactionRecord.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactionRecord.java @@ -30,16 +30,16 @@ public class CompactionRecord { 200_000, 1000_000 }; @Getter - private long lastCompactionRemovedEventCount = 0L; + private volatile long lastCompactionRemovedEventCount = 0L; @Getter - private long lastCompactionSucceedTimestamp = 0L; + private volatile long lastCompactionSucceedTimestamp = 0L; @Getter - private long lastCompactionFailedTimestamp = 0L; + private volatile long lastCompactionFailedTimestamp = 0L; @Getter - private long lastCompactionDurationTimeInMills = 0L; + private volatile long lastCompactionDurationTimeInMills = 0L; private LongAdder lastCompactionRemovedEventCountOp = new LongAdder(); - private long lastCompactionStartTimeOp; + private volatile long lastCompactionStartTimeOp; private final LongAdder compactionRemovedEventCount = new LongAdder(); private final LongAdder compactionSucceedCount = new LongAdder(); diff --git a/pulsar-client-auth-athenz/src/main/java/org/apache/pulsar/client/impl/auth/AuthenticationAthenz.java b/pulsar-client-auth-athenz/src/main/java/org/apache/pulsar/client/impl/auth/AuthenticationAthenz.java index a10bee1711c30..8c719b861b0bc 100644 --- a/pulsar-client-auth-athenz/src/main/java/org/apache/pulsar/client/impl/auth/AuthenticationAthenz.java +++ b/pulsar-client-auth-athenz/src/main/java/org/apache/pulsar/client/impl/auth/AuthenticationAthenz.java @@ -65,7 +65,7 @@ public class AuthenticationAthenz implements Authentication, EncodedAuthenticati // ZTSClient.cancelPrefetch() is called. // cf. https://github.com/AthenZ/athenz/issues/544 private boolean autoPrefetchEnabled = false; - private long cachedRoleTokenTimestamp; + private volatile long cachedRoleTokenTimestamp; private String roleToken; // athenz will only give this token if it's at least valid for 2hrs private static final int minValidity = 2 * 60 * 60; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java index 1f34de7197962..7138c79bd9ecb 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java @@ -58,8 +58,8 @@ public class AutoClusterFailover implements ServiceUrlProvider { private final long failoverDelayNs; private final long switchBackDelayNs; private final ScheduledExecutorService executor; - private long recoverTimestamp; - private long failedTimestamp; + private volatile long recoverTimestamp; + private volatile long failedTimestamp; private final long intervalMs; private static final int TIMEOUT = 30_000; private final PulsarServiceNameResolver resolver; From e1e3baf1eef37567d84eb4a3947a9305c63324e7 Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Thu, 29 Sep 2022 10:46:55 +0800 Subject: [PATCH 22/59] [improve][doc] Add doc of reader config to `pulsar-io-debezium` (#16891) * add reader config doc * update to the versioned doc * Update site2/docs/io-debezium-source.md Co-authored-by: momo-jun <60642177+momo-jun@users.noreply.github.com> * Update site2/docs/io-debezium-source.md Co-authored-by: momo-jun <60642177+momo-jun@users.noreply.github.com> * revert changes to 2.10.1 and 2.9.3 Co-authored-by: momo-jun <60642177+momo-jun@users.noreply.github.com> --- site2/docs/io-debezium-source.md | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/site2/docs/io-debezium-source.md b/site2/docs/io-debezium-source.md index 0f7dd19b5d796..c4ce2c2f63e3f 100644 --- a/site2/docs/io-debezium-source.md +++ b/site2/docs/io-debezium-source.md @@ -26,7 +26,9 @@ The configuration of the Debezium source connector has the following properties. | `database.history.pulsar.topic` | true | null | The name of the database history topic where the connector writes and recovers DDL statements.

**Note: this topic is for internal use only and should not be used by consumers.** | | `database.history.pulsar.service.url` | true | null | Pulsar cluster service URL for history topic. | | `offset.storage.topic` | true | null | Record the last committed offsets that the connector successfully completes. | -| `json-with-envelope` | false | false | Present the message only consist of payload. +| `json-with-envelope` | false | false | Present the message only consist of payload. | +| `database.history.pulsar.reader.config` | false | null | The configs of the reader for the database schema history topic, in the form of a JSON string with key-value pairs. | +| `offset.storage.reader.config` | false | null | The configs of the reader for the kafka connector offsets topic, in the form of a JSON string with key-value pairs. | ### Converter Options @@ -50,7 +52,32 @@ Schema.AUTO_CONSUME(), KeyValueEncodingType.SEPARATED)`, and the message consist | `mongodb.password` | true | null | Password to be used when connecting to MongoDB. This is required only when MongoDB is configured to use authentication. | | `mongodb.task.id` | true | null | The taskId of the MongoDB connector that attempts to use a separate task for each replica set. | +### Customize the Reader config for the metadata topics +The Debezium Connector exposes `database.history.pulsar.reader.config` and `offset.storage.reader.config` to configure the reader of database schema history topic and the Kafka connector offsets topic. For example, it can be used to configure the subscription name and other reader configurations. You can find the available configurations at [ReaderConfigurationData](https://github.com/apache/pulsar/blob/master/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ReaderConfigurationData.java). + +For example, to configure the subscription name for both Readers, you can add the following configuration: + +* JSON + + ```json + { + "configs": { + "database.history.pulsar.reader.config": "{\"subscriptionName\":\"history-reader\"}", + "offset.storage.reader.config": "{\"subscriptionName\":\"offset-reader\"}", + } + } + ``` + +* YAML + + ```yaml + + configs: + database.history.pulsar.reader.config: "{\"subscriptionName\":\"history-reader\"}" + offset.storage.reader.config: "{\"subscriptionName\":\"offset-reader\"}" + + ``` ## Example of MySQL From 048ccae028a4e519cc4400f70f4244c6581679b2 Mon Sep 17 00:00:00 2001 From: momo-jun <60642177+momo-jun@users.noreply.github.com> Date: Thu, 29 Sep 2022 10:58:46 +0800 Subject: [PATCH 23/59] [improve][doc] Add a limitation for key_shared subscription type (#15709) --- site2/docs/concepts-messaging.md | 84 ++++++++++++++++++++++---------- 1 file changed, 58 insertions(+), 26 deletions(-) diff --git a/site2/docs/concepts-messaging.md b/site2/docs/concepts-messaging.md index 368fd45c71f42..6137d5aa0c7cd 100644 --- a/site2/docs/concepts-messaging.md +++ b/site2/docs/concepts-messaging.md @@ -500,9 +500,12 @@ Topic name component | Description `namespace` | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the [namespace](#namespaces) level. Each tenant has one or more namespaces. `topic` | The final part of the name. Topic names have no special meaning in a Pulsar instance. -> **No need to explicitly create new topics** -> You do not need to explicitly create topics in Pulsar. If a client attempts to write or receive messages to/from a topic that does not yet exist, Pulsar creates that topic under the namespace provided in the [topic name](#topics) automatically. -> If no tenant or namespace is specified when a client creates a topic, the topic is created in the default tenant and namespace. You can also create a topic in a specified tenant and namespace, such as `persistent://my-tenant/my-namespace/my-topic`. `persistent://my-tenant/my-namespace/my-topic` means the `my-topic` topic is created in the `my-namespace` namespace of the `my-tenant` tenant. +:::note + +You do not need to explicitly create topics in Pulsar. If a client attempts to write or receive messages to/from a topic that does not yet exist, Pulsar creates that topic under the namespace provided in the [topic name](#topics) automatically. +If no tenant or namespace is specified when a client creates a topic, the topic is created in the default tenant and namespace. You can also create a topic in a specified tenant and namespace, such as `persistent://my-tenant/my-namespace/my-topic`. `persistent://my-tenant/my-namespace/my-topic` means the `my-topic` topic is created in the `my-namespace` namespace of the `my-tenant` tenant. + +::: ## Namespaces @@ -514,11 +517,15 @@ A subscription is a named configuration rule that determines how messages are de ![Subscription types](/assets/pulsar-subscription-types.png) -> **Pub-Sub or Queuing** -> In Pulsar, you can use different subscriptions flexibly. -> * If you want to achieve traditional "fan-out pub-sub messaging" among consumers, specify a unique subscription name for each consumer. It is exclusive subscription type. -> * If you want to achieve "message queuing" among consumers, share the same subscription name among multiple consumers(shared, failover, key_shared). -> * If you want to achieve both effects simultaneously, combine exclusive subscription type with other subscription types for consumers. +:::tip + +**Pub-Sub or Queuing** + In Pulsar, you can use different subscriptions flexibly. + * If you want to achieve traditional "fan-out pub-sub messaging" among consumers, specify a unique subscription name for each consumer. It is an exclusive subscription type. + * If you want to achieve "message queuing" among consumers, share the same subscription name among multiple consumers(shared, failover, key_shared). + * If you want to achieve both effects simultaneously, combine exclusive subscription types with other subscription types for consumers. + +::: ### Subscription types @@ -530,7 +537,11 @@ In the *Exclusive* type, only a single consumer is allowed to attach to the subs In the diagram below, only **Consumer A-0** is allowed to consume messages. -> Exclusive is the default subscription type. +:::tip + +Exclusive is the default subscription type. + +::: ![Exclusive subscriptions](/assets/pulsar-exclusive-subscriptions.png) @@ -553,10 +564,14 @@ In *shared* or *round robin* type, multiple consumers can attach to the same sub In the diagram below, **Consumer-C-1** and **Consumer-C-2** are able to subscribe to the topic, but **Consumer-C-3** and others could as well. -> **Limitations of Shared type** -> When using Shared type, be aware that: -> * Message ordering is not guaranteed. -> * You cannot use cumulative acknowledgment with Shared type. +:::note + +**Limitations of Shared type** + When using Shared type, be aware that: + * Message ordering is not guaranteed. + * You cannot use cumulative acknowledgment with Shared type. + +::: ![Shared subscriptions](/assets/pulsar-shared-subscriptions.png) @@ -609,10 +624,16 @@ producer = client.create_producer(topic='my-topic', batching_type=pulsar.Batchin ```` -> **Limitations of Key_Shared type** -> When you use Key_Shared type, be aware that: -> * You need to specify a key or orderingKey for messages. -> * You cannot use cumulative acknowledgment with Key_Shared type. +:::note + +**Limitations of Key_Shared type** + +When you use Key_Shared type, be aware that: + * You need to specify a key or ordering key for messages. + * You cannot use cumulative acknowledgment with Key_Shared type. + * When the position of the newest message in a topic is `X`, a key-shared consumer that is newly attached to the same subscription and connected to the topic will **not** receive any messages until all the messages before `X` have been acknowledged. + +::: ### Subscription modes @@ -629,7 +650,7 @@ Subscription mode | Description | Note `Durable` | The cursor is durable, which retains messages and persists the current position.
If a broker restarts from a failure, it can recover the cursor from the persistent storage (BookKeeper), so that messages can continue to be consumed from the last consumed position. | `Durable` is the **default** subscription mode. `NonDurable` | The cursor is non-durable.
Once a broker stops, the cursor is lost and can never be recovered, so that messages **can not** continue to be consumed from the last consumed position. | Reader’s subscription mode is `NonDurable` in nature and it does not prevent data in a topic from being deleted. Reader’s subscription mode **can not** be changed. -A [subscription](#subscriptions) can have one or more consumers. When a consumer subscribes to a topic, it must specify the subscription name. A durable subscription and a non-durable subscription can have the same name, they are independent of each other. If a consumer specifies a subscription which does not exist before, the subscription is automatically created. +A [subscription](#subscriptions) can have one or more consumers. When a consumer subscribes to a topic, it must specify the subscription name. A durable subscription and a non-durable subscription can have the same name, they are independent of each other. If a consumer specifies a subscription that does not exist before, the subscription is automatically created. #### When to use @@ -679,12 +700,20 @@ When a consumer subscribes to a Pulsar topic, by default it subscribes to one sp * On the basis of a [**reg**ular **ex**pression](https://en.wikipedia.org/wiki/Regular_expression) (regex), for example, `persistent://public/default/finance-.*` * By explicitly defining a list of topics -> When subscribing to multiple topics by regex, all topics must be in the same [namespace](#namespaces). +:::note + +When subscribing to multiple topics by regex, all topics must be in the same [namespace](#namespaces). + +::: When subscribing to multiple topics, the Pulsar client automatically makes a call to the Pulsar API to discover the topics that match the regex pattern/list, and then subscribe to all of them. If any of the topics do not exist, the consumer auto-subscribes to them once the topics are created. -> **No ordering guarantees across multiple topics** -> When a producer sends messages to a single topic, all messages are guaranteed to be read from that topic in the same order. However, these guarantees do not hold across multiple topics. So when a producer sends message to multiple topics, the order in which messages are read from those topics is not guaranteed to be the same. +:::note + + **No ordering guarantees across multiple topics** + When a producer sends messages to a single topic, all messages are guaranteed to be read from that topic in the same order. However, these guarantees do not hold across multiple topics. So when a producer sends messages to multiple topics, the order in which messages are read from those topics is not guaranteed to be the same. + +::: The following are multi-topic subscription examples for Java. @@ -778,11 +807,11 @@ Non-persistent topics have names of this form (note the `non-persistent` in the non-persistent://tenant/namespace/topic ``` -> For more info on using non-persistent topics, see the [Non-persistent messaging cookbook](cookbooks-non-persistent.md). +For more info on using non-persistent topics, see the [Non-persistent messaging cookbook](cookbooks-non-persistent). In non-persistent topics, brokers immediately deliver messages to all connected subscribers *without persisting them* in [BookKeeper](concepts-architecture-overview.md#persistent-storage). If a subscriber is disconnected, the broker will not be able to deliver those in-transit messages, and subscribers will never be able to receive those messages again. Eliminating the persistent storage step makes messaging on non-persistent topics slightly faster than on persistent topics in some cases, but with the caveat that some core benefits of Pulsar are lost. -> With non-persistent topics, message data lives only in memory, without a specific buffer - which means data *is not* buffered in memory. The received messages are immediately transmitted to all *connected consumers*. If a message broker fails or message data can otherwise not be retrieved from memory, your message data may be lost. Use non-persistent topics only if you're *certain* that your use case requires it and can sustain it. +> With non-persistent topics, message data lives only in memory, without a specific buffer - which means data *is not* buffered in memory. The received messages are immediately transmitted to all *connected consumers*. If a message broker fails or message data can otherwise not be retrieved from memory, your message data may be lost. Use non-persistent topics only if you're *certain* that your use case requires it and can sustain it. By default, non-persistent topics are enabled on Pulsar brokers. You can disable them in the broker's [configuration](reference-configuration.md#broker-enableNonPersistentTopics). You can manage non-persistent topics using the `pulsar-admin topics` command. For more information, see [`pulsar-admin`](/tools/pulsar-admin/). @@ -879,7 +908,11 @@ Pulsar has two features, however, that enable you to override this default behav * Message **retention** enables you to store messages that have been acknowledged by a consumer * Message **expiry** enables you to set a time to live (TTL) for messages that have not yet been acknowledged -> All message retention and expiry is managed at the [namespace](#namespaces) level. For a how-to, see the [Message retention and expiry](cookbooks-retention-expiry.md) cookbook. +:::tip + +All message retention and expiry are managed at the [namespace](#namespaces) level. For a how-to, see the [Message retention and expiry](cookbooks-retention-expiry.md) cookbook. + +::: The diagram below illustrates both concepts: @@ -903,7 +936,7 @@ Message deduplication is disabled in the scenario shown at the top. Here, a prod In the second scenario at the bottom, the producer publishes message 1, which is received by the broker and persisted, as in the first scenario. When the producer attempts to publish the message again, however, the broker knows that it has already seen message 1 and thus does not persist the message. > Message deduplication is handled at the namespace level or the topic level. For more instructions, see the [message deduplication cookbook](cookbooks-deduplication.md). -> You can read the design of Message Deduplication in [PIP-6](https://github.com/aahmed-se/pulsar-wiki/blob/master/PIP-6:-Guaranteed-Message-Deduplication.md) +> You can read the design of Message Deduplication in [PIP-6](https://github.com/aahmed-se/pulsar-wiki/blob/master/PIP-6:-Guaranteed-Message-Deduplication.md). ### Producer idempotency @@ -913,7 +946,6 @@ The other available approach to message deduplication is to ensure that each mes Message deduplication makes Pulsar an ideal messaging system to be used in conjunction with stream processing engines (SPEs) and other systems seeking to provide effectively-once processing semantics. Messaging systems that do not offer automatic message deduplication require the SPE or other system to guarantee deduplication, which means that strict message ordering comes at the cost of burdening the application with the responsibility of deduplication. With Pulsar, strict ordering guarantees come at no application-level cost. -> You can find more in-depth information in [this post](https://www.splunk.com/en_us/blog/it/exactly-once-is-not-exactly-the-same.html). ## Delayed message delivery Delayed message delivery enables you to consume a message later. In this mechanism, a message is stored in BookKeeper. The `DelayedDeliveryTracker` maintains the time index (time -> messageId) in memory after the message is published to a broker. This message will be delivered to a consumer once the specified delay is over. From 537aa545c70aa0b3023ff026c4a919546340656b Mon Sep 17 00:00:00 2001 From: Zixuan Liu Date: Thu, 29 Sep 2022 11:49:21 +0800 Subject: [PATCH 24/59] [improve][cli] Using separate TLS config on the compactor (#17426) Signed-off-by: Zixuan Liu Signed-off-by: Zixuan Liu ### Motivation Improve the compactor tool, using separate TLS config ### Modifications - Add separate TLS config on the compactor, both Keystore and PEM formats are supported - Fix correct use of service URL by `brokerConfig.isBrokerClientTlsEnabled()` value ### Verifying this change Test has been added. ### Documentation Check the box below or label this PR directly. Need to update docs? - [ ] `doc-required` (Your PR needs to update docs and you will update later) - [x] `doc-not-needed` (Please explain why) - [ ] `doc` (Your PR contains doc changes) - [ ] `doc-complete` (Docs have been already added) --- .../pulsar/compaction/CompactorTool.java | 73 +++++++++++-------- .../pulsar/compaction/CompactorToolTest.java | 50 +++++++++++++ 2 files changed, 91 insertions(+), 32 deletions(-) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactorTool.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactorTool.java index 4cb1fd347df13..91c079b27becc 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactorTool.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactorTool.java @@ -31,13 +31,13 @@ import java.util.concurrent.ScheduledExecutorService; import lombok.Cleanup; import org.apache.bookkeeper.client.BookKeeper; -import org.apache.bookkeeper.common.util.OrderedScheduler; import org.apache.pulsar.broker.BookKeeperClientFactory; import org.apache.pulsar.broker.BookKeeperClientFactoryImpl; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.ServiceConfigurationUtils; import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.SizeUnit; import org.apache.pulsar.client.internal.PropertiesUtils; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; @@ -65,6 +65,45 @@ private static class Arguments { private boolean generateDocs = false; } + public static PulsarClient createClient(ServiceConfiguration brokerConfig) throws PulsarClientException { + ClientBuilder clientBuilder = PulsarClient.builder() + .memoryLimit(0, SizeUnit.BYTES); + + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + clientBuilder.loadConf(PropertiesUtils.filterAndMapProperties(brokerConfig.getProperties(), "brokerClient_")); + + if (isNotBlank(brokerConfig.getBrokerClientAuthenticationPlugin())) { + clientBuilder.authentication(brokerConfig.getBrokerClientAuthenticationPlugin(), + brokerConfig.getBrokerClientAuthenticationParameters()); + } + + AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(brokerConfig, "pulsar+ssl"); + if (internalListener.getBrokerServiceUrlTls() != null && brokerConfig.isBrokerClientTlsEnabled()) { + clientBuilder.serviceUrl(internalListener.getBrokerServiceUrlTls().toString()) + .allowTlsInsecureConnection(brokerConfig.isTlsAllowInsecureConnection()); + if (brokerConfig.isBrokerClientTlsEnabledWithKeyStore()) { + clientBuilder.useKeyStoreTls(true) + .tlsKeyStoreType(brokerConfig.getBrokerClientTlsKeyStoreType()) + .tlsKeyStorePath(brokerConfig.getBrokerClientTlsKeyStore()) + .tlsKeyStorePassword(brokerConfig.getBrokerClientTlsKeyStorePassword()) + .tlsTrustStoreType(brokerConfig.getBrokerClientTlsTrustStoreType()) + .tlsTrustStorePath(brokerConfig.getBrokerClientTlsTrustStore()) + .tlsTrustStorePassword(brokerConfig.getBrokerClientTlsTrustStorePassword()); + } else { + clientBuilder.tlsTrustCertsFilePath(brokerConfig.getBrokerClientTrustCertsFilePath()) + .tlsKeyFilePath(brokerConfig.getBrokerClientKeyFilePath()) + .tlsCertificateFilePath(brokerConfig.getBrokerClientCertificateFilePath()); + } + } else { + internalListener = ServiceConfigurationUtils.getInternalListener(brokerConfig, "pulsar"); + clientBuilder.serviceUrl(internalListener.getBrokerServiceUrl().toString()); + } + + return clientBuilder.build(); + } + public static void main(String[] args) throws Exception { Arguments arguments = new Arguments(); JCommander jcommander = new JCommander(arguments); @@ -105,40 +144,10 @@ public static void main(String[] args) throws Exception { ); } - ClientBuilder clientBuilder = PulsarClient.builder() - .memoryLimit(0, SizeUnit.BYTES); - - // Apply all arbitrary configuration. This must be called before setting any fields annotated as - // @Secret on the ClientConfigurationData object because of the way they are serialized. - // See https://github.com/apache/pulsar/issues/8509 for more information. - clientBuilder.loadConf(PropertiesUtils.filterAndMapProperties(brokerConfig.getProperties(), "brokerClient_")); - - if (isNotBlank(brokerConfig.getBrokerClientAuthenticationPlugin())) { - clientBuilder.authentication(brokerConfig.getBrokerClientAuthenticationPlugin(), - brokerConfig.getBrokerClientAuthenticationParameters()); - } - - AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(brokerConfig, "pulsar+ssl"); - if (internalListener.getBrokerServiceUrlTls() != null) { - log.info("Found a TLS-based advertised listener in configuration file. \n" - + "Will connect pulsar use TLS."); - clientBuilder - .serviceUrl(internalListener.getBrokerServiceUrlTls().toString()) - .allowTlsInsecureConnection(brokerConfig.isTlsAllowInsecureConnection()) - .tlsTrustCertsFilePath(brokerConfig.getTlsCertificateFilePath()); - - } else { - internalListener = ServiceConfigurationUtils.getInternalListener(brokerConfig, "pulsar"); - clientBuilder.serviceUrl(internalListener.getBrokerServiceUrl().toString()); - } - @Cleanup(value = "shutdownNow") ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder().setNameFormat("compaction-%d").setDaemon(true).build()); - @Cleanup(value = "shutdownNow") - OrderedScheduler executor = OrderedScheduler.newSchedulerBuilder().build(); - @Cleanup MetadataStoreExtended store = MetadataStoreExtended.create(brokerConfig.getMetadataStoreUrl(), MetadataStoreConfig.builder() @@ -157,7 +166,7 @@ public static void main(String[] args) throws Exception { BookKeeper bk = bkClientFactory.create(brokerConfig, store, eventLoopGroup, Optional.empty(), null); @Cleanup - PulsarClient pulsar = clientBuilder.build(); + PulsarClient pulsar = createClient(brokerConfig); Compactor compactor = new TwoPhaseCompactor(brokerConfig, pulsar, bk, scheduler); long ledgerId = compactor.compact(arguments.topic).get(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorToolTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorToolTest.java index 795cf2b7f7cd7..85f6f16fdb01a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorToolTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorToolTest.java @@ -18,6 +18,9 @@ */ package org.apache.pulsar.compaction; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.testng.Assert.assertTrue; import com.beust.jcommander.Parameter; import java.io.ByteArrayOutputStream; @@ -25,6 +28,13 @@ import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.util.Arrays; +import java.util.Optional; +import java.util.Properties; +import lombok.Cleanup; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.util.CmdGenerateDocs; import org.testng.annotations.Test; @@ -72,4 +82,44 @@ public void testGenerateDocs() throws Exception { System.setOut(oldStream); } } + + @Test + public void testUseTlsUrlWithPEM() throws PulsarClientException { + ServiceConfiguration serviceConfiguration = spy(ServiceConfiguration.class); + serviceConfiguration.setBrokerServicePortTls(Optional.of(6651)); + serviceConfiguration.setBrokerClientTlsEnabled(true); + serviceConfiguration.setProperties(new Properties()); + + @Cleanup + PulsarClient ignored = CompactorTool.createClient(serviceConfiguration); + + verify(serviceConfiguration, times(1)).isBrokerClientTlsEnabled(); + verify(serviceConfiguration, times(1)).isTlsAllowInsecureConnection(); + verify(serviceConfiguration, times(1)).getBrokerClientKeyFilePath(); + verify(serviceConfiguration, times(1)).getBrokerClientTrustCertsFilePath(); + verify(serviceConfiguration, times(1)).getBrokerClientCertificateFilePath(); + } + + @Test + public void testUseTlsUrlWithKeystore() throws PulsarClientException { + ServiceConfiguration serviceConfiguration = spy(ServiceConfiguration.class); + serviceConfiguration.setBrokerServicePortTls(Optional.of(6651)); + serviceConfiguration.setBrokerClientTlsEnabled(true); + serviceConfiguration.setBrokerClientTlsEnabledWithKeyStore(true); + serviceConfiguration.setBrokerClientTlsTrustStore(MockedPulsarServiceBaseTest.BROKER_KEYSTORE_FILE_PATH); + + serviceConfiguration.setProperties(new Properties()); + + @Cleanup + PulsarClient ignored = CompactorTool.createClient(serviceConfiguration); + + verify(serviceConfiguration, times(1)).isBrokerClientTlsEnabled(); + verify(serviceConfiguration, times(1)).isBrokerClientTlsEnabledWithKeyStore(); + verify(serviceConfiguration, times(1)).getBrokerClientTlsKeyStore(); + verify(serviceConfiguration, times(1)).getBrokerClientTlsKeyStorePassword(); + verify(serviceConfiguration, times(1)).getBrokerClientTlsKeyStoreType(); + verify(serviceConfiguration, times(1)).getBrokerClientTlsTrustStore(); + verify(serviceConfiguration, times(1)).getBrokerClientTlsTrustStorePassword(); + verify(serviceConfiguration, times(1)).getBrokerClientTlsTrustStoreType(); + } } From 73739e105ea86327839149c0406682d656bee233 Mon Sep 17 00:00:00 2001 From: Qiang Huang Date: Thu, 29 Sep 2022 11:56:35 +0800 Subject: [PATCH 25/59] [fix][doc]correct the explanation for invalidateEntries in the EntryCache (#17650) --- .../org/apache/bookkeeper/mledger/impl/cache/EntryCache.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCache.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCache.java index 81c89b37d5543..640a7eeefcebb 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCache.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCache.java @@ -39,7 +39,7 @@ public interface EntryCache extends Comparable { /** * Insert an entry in the cache. * - *

If the overall limit have been reached, this will triggered the eviction of other entries, possibly from + *

If the overall limit have been reached, this will trigger the eviction of other entries, possibly from * other EntryCache instances * * @param entry @@ -49,7 +49,7 @@ public interface EntryCache extends Comparable { boolean insert(EntryImpl entry); /** - * Remove from cache all the entries related to a ledger up to lastPosition included. + * Remove from cache all the entries related to a ledger up to lastPosition excluded. * * @param lastPosition * the position of the last entry to be invalidated (non-inclusive) From db26073728bf86fc80deecaece2dc02b50bbb9b5 Mon Sep 17 00:00:00 2001 From: Michael Marshall Date: Wed, 28 Sep 2022 22:02:41 -0700 Subject: [PATCH 26/59] [feat][broker] Add config to count filtered entries towards rate limits (#17686) * [feat][broker] Add config to count filtered entries towards rate limits * Make fixes for checkstyle * Remove * import * Fix incorrect conflict resolution in merge commit ### Motivation Currently, when using entry filters, filtered out messages do not count against the rate limit. Therefore, a subscription that is completely filtered will never be throttled due to rate limiting. When the messages are delivered to the consumer for a filtered subscription, those messages will count against the rate limit, and in that case, the message filtering can be throttled because the check to delay `readMoreEntries()` happens before message filtering. Therefore, the rate limit will essentially be increased as a function of the percent of messages let through the filter (some quick math is that the new rate is likely `dispatchRate * (1 / percentDelivered)`, where percent delivered is a percent as a decimal). It's possible that some use cases prefer this behavior, but in my case, I think it'd be valuable to include these filtered messages in the dispatch throttling because these messages still cost the broker network, memory, and cpu. This PR adds a configuration to count filtered out messages towards dispatch rate limits for the broker, the topic, and the subscription. ### Modifications * Add configuration named `dispatchThrottlingForFilteredEntriesEnabled`. Default it to false so we maintain the original behavior. When true, count filtered messages against rate limits. * Refactor the code to `acquirePermitsForDeliveredMessages` so that it is in the `AbstractBaseDispatcher`, which makes it available to the entry filtering logic. ### Verifying this change A new test is added as part of this PR. ### Does this pull request potentially affect one of the following parts: This PR introduces a new config while maintaining the current behavior. ### Documentation - [x] `doc-not-needed` Config docs are auto-generated. --- conf/broker.conf | 6 ++++ .../pulsar/broker/ServiceConfiguration.java | 10 ++++++ .../service/AbstractBaseDispatcher.java | 27 +++++++++++++++ ...PersistentDispatcherMultipleConsumers.java | 21 ++---------- ...sistentDispatcherSingleActiveConsumer.java | 19 ++--------- ...tStickyKeyDispatcherMultipleConsumers.java | 14 +------- .../service/AbstractBaseDispatcherTest.java | 33 +++++++++++++++---- 7 files changed, 75 insertions(+), 55 deletions(-) diff --git a/conf/broker.conf b/conf/broker.conf index 30e79ebc9f090..d117d679c8532 100644 --- a/conf/broker.conf +++ b/conf/broker.conf @@ -453,6 +453,12 @@ entryFilterNames= # The directory for all the entry filter implementations entryFiltersDirectory= +# Whether the broker should count filtered entries in dispatch rate limit calculations. When disabled, +# only messages sent to a consumer count towards a dispatch rate limit at the broker, topic, and +# subscription level. When enabled, messages filtered out due to entry filter logic are counted towards +# each relevant rate limit. +dispatchThrottlingForFilteredEntriesEnabled=false + # Whether allow topic level entry filters policies overrides broker configuration. allowOverrideEntryFilters=false diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java index a6e9a556820a6..8c883045e66c5 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java @@ -1050,6 +1050,16 @@ The delayed message index bucket time step(in seconds) in per bucket snapshot se ) private boolean dispatcherDispatchMessagesInSubscriptionThread = true; + @FieldContext( + dynamic = false, + category = CATEGORY_SERVER, + doc = "Whether the broker should count filtered entries in dispatch rate limit calculations. When disabled, " + + "only messages sent to a consumer count towards a dispatch rate limit at the broker, topic, and " + + "subscription level. When enabled, messages filtered out due to entry filter logic are counted towards " + + "each relevant rate limit." + ) + private boolean dispatchThrottlingForFilteredEntriesEnabled = false; + // <-- dispatcher read settings --> @FieldContext( dynamic = true, diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java index 29710067a61d4..df02bbd85d470 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java @@ -107,6 +107,9 @@ public int filterEntriesForConsumer(Optional optMetadataArray long totalBytes = 0; int totalChunkedMessages = 0; int totalEntries = 0; + int filteredMessageCount = 0; + int filteredEntryCount = 0; + long filteredBytesCount = 0; final boolean hasFilter = CollectionUtils.isNotEmpty(entryFilters); List entriesToFiltered = hasFilter ? new ArrayList<>() : null; List entriesToRedeliver = hasFilter ? new ArrayList<>() : null; @@ -135,6 +138,9 @@ public int filterEntriesForConsumer(Optional optMetadataArray // FilterResult will be always `ACCEPTED` when there is No Filter // dont need to judge whether `hasFilter` is true or not. this.filterRejectedMsgs.add(entryMsgCnt); + filteredEntryCount++; + filteredMessageCount += entryMsgCnt; + filteredBytesCount += metadataAndPayload.readableBytes(); entry.release(); continue; } else if (filterResult == EntryFilter.FilterResult.RESCHEDULE) { @@ -143,6 +149,9 @@ public int filterEntriesForConsumer(Optional optMetadataArray // FilterResult will be always `ACCEPTED` when there is No Filter // dont need to judge whether `hasFilter` is true or not. this.filterRescheduledMsgs.add(entryMsgCnt); + filteredEntryCount++; + filteredMessageCount += entryMsgCnt; + filteredBytesCount += metadataAndPayload.readableBytes(); entry.release(); continue; } @@ -231,6 +240,11 @@ public int filterEntriesForConsumer(Optional optMetadataArray } + if (serviceConfig.isDispatchThrottlingForFilteredEntriesEnabled()) { + acquirePermitsForDeliveredMessages(subscription.getTopic(), cursor, filteredEntryCount, + filteredMessageCount, filteredBytesCount); + } + sendMessageInfo.setTotalMessages(totalMessages); sendMessageInfo.setTotalBytes(totalBytes); sendMessageInfo.setTotalChunkedMessages(totalChunkedMessages); @@ -243,6 +257,19 @@ private void individualAcknowledgeMessageIfNeeded(Position position, Map + rateLimiter.tryDispatchPermit(permits, totalBytesSent)); + topic.getDispatchRateLimiter().ifPresent(rateLimter -> + rateLimter.tryDispatchPermit(permits, totalBytesSent)); + getRateLimiter().ifPresent(rateLimiter -> rateLimiter.tryDispatchPermit(permits, totalBytesSent)); + } + } + /** * Determine whether the number of consumers on the subscription reaches the threshold. * @return diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java index 02d2e725379b6..15b42fedd38ab 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java @@ -684,7 +684,7 @@ protected synchronized boolean trySendMessagesToConsumers(ReadType readType, Lis totalBytesSent += sendMessageInfo.getTotalBytes(); } - acquirePermitsForDeliveredMessages(totalEntries, totalMessagesSent, totalBytesSent); + acquirePermitsForDeliveredMessages(topic, cursor, totalEntries, totalMessagesSent, totalBytesSent); if (entriesToDispatch > 0) { if (log.isDebugEnabled()) { @@ -700,23 +700,6 @@ protected synchronized boolean trySendMessagesToConsumers(ReadType readType, Lis return true; } - private void acquirePermitsForDeliveredMessages(long totalEntries, long totalMessagesSent, long totalBytesSent) { - // acquire message-dispatch permits for already delivered messages - long permits = dispatchThrottlingOnBatchMessageEnabled ? totalEntries : totalMessagesSent; - if (serviceConfig.isDispatchThrottlingOnNonBacklogConsumerEnabled() || !cursor.isActive()) { - if (topic.getBrokerDispatchRateLimiter().isPresent()) { - topic.getBrokerDispatchRateLimiter().get().tryDispatchPermit(permits, totalBytesSent); - } - if (topic.getDispatchRateLimiter().isPresent()) { - topic.getDispatchRateLimiter().get().tryDispatchPermit(permits, totalBytesSent); - } - - if (dispatchRateLimiter.isPresent()) { - dispatchRateLimiter.get().tryDispatchPermit(permits, totalBytesSent); - } - } - } - private boolean sendChunkedMessagesToConsumers(ReadType readType, List entries, MessageMetadata[] metadataArray) { @@ -775,7 +758,7 @@ private boolean sendChunkedMessagesToConsumers(ReadType readType, totalBytesSent += sendMessageInfo.getTotalBytes(); } - acquirePermitsForDeliveredMessages(totalEntries, totalMessagesSent, totalBytesSent); + acquirePermitsForDeliveredMessages(topic, cursor, totalEntries, totalMessagesSent, totalBytesSent); return numConsumers.get() == 0; // trigger a new readMoreEntries() call } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java index accab20d2daed..3ba7a82aa5e35 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java @@ -221,23 +221,8 @@ protected void dispatchEntriesToConsumer(Consumer currentConsumer, List e redeliveryTracker, epoch) .addListener(future -> { if (future.isSuccess()) { - int permits = dispatchThrottlingOnBatchMessageEnabled ? entries.size() - : sendMessageInfo.getTotalMessages(); - // acquire message-dispatch permits for already delivered messages - if (serviceConfig.isDispatchThrottlingOnNonBacklogConsumerEnabled() || !cursor.isActive()) { - if (topic.getBrokerDispatchRateLimiter().isPresent()) { - topic.getBrokerDispatchRateLimiter().get().tryDispatchPermit(permits, - sendMessageInfo.getTotalBytes()); - } - - if (topic.getDispatchRateLimiter().isPresent()) { - topic.getDispatchRateLimiter().get().tryDispatchPermit(permits, - sendMessageInfo.getTotalBytes()); - } - dispatchRateLimiter.ifPresent(rateLimiter -> - rateLimiter.tryDispatchPermit(permits, - sendMessageInfo.getTotalBytes())); - } + acquirePermitsForDeliveredMessages(topic, cursor, entries.size(), + sendMessageInfo.getTotalMessages(), sendMessageInfo.getTotalBytes()); // Schedule a new read batch operation only after the previous batch has been written to the socket. topic.getBrokerService().getTopicOrderedExecutor().executeOrdered(topicName, diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java index 024ed8581ef1c..5eb553106e679 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java @@ -296,19 +296,7 @@ protected synchronized boolean trySendMessagesToConsumers(ReadType readType, Lis } // acquire message-dispatch permits for already delivered messages - if (serviceConfig.isDispatchThrottlingOnNonBacklogConsumerEnabled() || !cursor.isActive()) { - long permits = dispatchThrottlingOnBatchMessageEnabled ? totalEntries : totalMessagesSent; - if (topic.getBrokerDispatchRateLimiter().isPresent()) { - topic.getBrokerDispatchRateLimiter().get().tryDispatchPermit(permits, totalBytesSent); - } - if (topic.getDispatchRateLimiter().isPresent()) { - topic.getDispatchRateLimiter().get().tryDispatchPermit(permits, totalBytesSent); - } - - if (dispatchRateLimiter.isPresent()) { - dispatchRateLimiter.get().tryDispatchPermit(permits, totalBytesSent); - } - } + acquirePermitsForDeliveredMessages(topic, cursor, totalEntries, totalMessagesSent, totalBytesSent); stuckConsumers.clear(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractBaseDispatcherTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractBaseDispatcherTest.java index b129995a8cc47..cba15b0631006 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractBaseDispatcherTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractBaseDispatcherTest.java @@ -22,6 +22,7 @@ import static org.apache.pulsar.common.protocol.Commands.serializeMetadataAndPayload; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import io.netty.buffer.ByteBuf; @@ -29,11 +30,14 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.mledger.Entry; +import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.impl.EntryImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.service.persistent.DispatchRateLimiter; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.service.plugin.EntryFilter; @@ -60,8 +64,9 @@ public class AbstractBaseDispatcherTest { @BeforeMethod public void setup() throws Exception { this.svcConfig = mock(ServiceConfiguration.class); + when(svcConfig.isDispatchThrottlingForFilteredEntriesEnabled()).thenReturn(true); this.subscriptionMock = mock(PersistentSubscription.class); - this.helper = new AbstractBaseDispatcherTestHelper(this.subscriptionMock, this.svcConfig); + this.helper = new AbstractBaseDispatcherTestHelper(this.subscriptionMock, this.svcConfig, null); } @Test @@ -89,17 +94,24 @@ public void testFilterEntriesForConsumerOfEntryFilter() throws Exception { EntryFilter.FilterResult.REJECT); Map entryFilters = Map.of("key", mockFilter); when(mockTopic.getEntryFilters()).thenReturn(entryFilters); + DispatchRateLimiter subscriptionDispatchRateLimiter = mock(DispatchRateLimiter.class); - this.helper = new AbstractBaseDispatcherTestHelper(this.subscriptionMock, this.svcConfig); + this.helper = new AbstractBaseDispatcherTestHelper(this.subscriptionMock, this.svcConfig, + subscriptionDispatchRateLimiter); List entries = new ArrayList<>(); - entries.add(EntryImpl.create(1, 2, createMessage("message1", 1))); + Entry e = EntryImpl.create(1, 2, createMessage("message1", 1)); + long expectedBytePermits = e.getLength(); + entries.add(e); SendMessageInfo sendMessageInfo = SendMessageInfo.getThreadLocal(); EntryBatchSizes batchSizes = EntryBatchSizes.get(entries.size()); - // - int size = this.helper.filterEntriesForConsumer(entries, batchSizes, sendMessageInfo, null, null, false, null); + + ManagedCursor cursor = mock(ManagedCursor.class); + + int size = this.helper.filterEntriesForConsumer(entries, batchSizes, sendMessageInfo, null, cursor, false, null); assertEquals(size, 0); + verify(subscriptionDispatchRateLimiter).tryDispatchPermit(1, expectedBytePermits); } @Test @@ -201,9 +213,18 @@ private ByteBuf createDelayedMessage(String message, int sequenceId) { private static class AbstractBaseDispatcherTestHelper extends AbstractBaseDispatcher { + private final Optional dispatchRateLimiter; + protected AbstractBaseDispatcherTestHelper(Subscription subscription, - ServiceConfiguration serviceConfig) { + ServiceConfiguration serviceConfig, + DispatchRateLimiter rateLimiter) { super(subscription, serviceConfig); + dispatchRateLimiter = Optional.ofNullable(rateLimiter); + } + + @Override + public Optional getRateLimiter() { + return dispatchRateLimiter; } @Override From dc44ea28ee0ed4c5616e36e7bb5f882e60a706a0 Mon Sep 17 00:00:00 2001 From: fengyubiao Date: Thu, 29 Sep 2022 14:47:59 +0800 Subject: [PATCH 27/59] [fix][flaky-test]PerformanceTransactionTest.testConsumeTxnMessage (#17837) Fixes: #14109 ### Motivation The expected execution flow for this test is: 1. send 505 messages 2. commit 10 transactions, every transaction ack 50 messages 3. receive the last 5 messages in the last transaction, wait for transaction timeout 4. confirm that the last 5 messages can be consumed by new consumer (High light) The default value for transaction TTL is 10 seconds, and the default value for `Awaitility.await` is also 10 seconds, so this test is not stable. Note: This is a guess cause, the problem is not reproduced locally. But after transaction TTL is set to 11s, the probability of the problem occurring is 100%. ### Modifications Fix flaky test - set transaction TTL to 5s Other changes - define a name for the task thread - acknowledge the last 5 messages ### Matching PR in forked repository PR in forked repository: - https://github.com/poorbarcode/pulsar/pull/13 --- .../apache/pulsar/testclient/PerformanceTransactionTest.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceTransactionTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceTransactionTest.java index 883d53540cbc6..0afd69ed4021c 100644 --- a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceTransactionTest.java +++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceTransactionTest.java @@ -198,7 +198,7 @@ public void testProduceTxnMessage() throws InterruptedException, PulsarClientExc @Test public void testConsumeTxnMessage() throws Exception { - String argString = "%s -r 10 -u %s -txn -ss %s -st %s -sp %s -ntxn %d"; + String argString = "%s -r 10 -u %s -txn -ss %s -st %s -sp %s -ntxn %d -tto 5"; String subName = "sub"; String topic = testTopic + UUID.randomUUID(); String args = String.format(argString, topic, pulsar.getBrokerServiceUrl(), subName, @@ -222,7 +222,7 @@ public void testConsumeTxnMessage() throws Exception { } catch (Exception e) { e.printStackTrace(); } - }); + }, "Performance_Consumer_Task"); thread.start(); thread.join(); @@ -240,6 +240,7 @@ public void testConsumeTxnMessage() throws Exception { for (int i = 0; i < 5; i++) { Message message = consumer.receive(2, TimeUnit.SECONDS); Assert.assertNotNull(message); + consumer.acknowledge(message); } Message message = consumer.receive(2, TimeUnit.SECONDS); Assert.assertNull(message); From 0f53c790ff2e3051ebc80aa8d2c9253265725aeb Mon Sep 17 00:00:00 2001 From: fengyubiao Date: Thu, 29 Sep 2022 14:49:18 +0800 Subject: [PATCH 28/59] [improve][txn]PIP-160 make txn components supports buffered writer metrics (#17701) Master Issue: #15370 ### Modifications - Make transaction `MLTransactionMetadataStoreProvider` & `MLPendingAckStoreProvider` support buffered writer metrics. - Motivation: #15370 ---- - Delete constructor of `TxnLogBufferedWriter` without parameter `metrics`. - Motivation: it is unnecessary. ---- - Add a default `DisabledTxnLogBufferedWriterMetricsStats` implementation. ---- - Previous PR remaining code to optimize: remove the check code `if (metrics != null)`. The motivation see: - Motivation: https://github.com/apache/pulsar/pull/16758#discussion_r945512673 ---- - Make transaction log buffered writer only create by the `MLTransactionMetadataStoreProvider` & `MLPendingAckStoreProvider`. - Motivation: https://github.com/apache/pulsar/pull/16758#discussion_r961254161 ### Documentation - [ ] `doc-required` - [x] `doc-not-needed` - [ ] `doc` - [ ] `doc-complete` ### Matching PR in forked repository PR in forked repository: - https://github.com/poorbarcode/pulsar/pull/3 --- .../apache/pulsar/broker/PulsarService.java | 5 + .../pendingack/impl/MLPendingAckStore.java | 6 +- .../impl/MLPendingAckStoreProvider.java | 30 +- .../stats/ManagedLedgerMetricsTest.java | 3 +- .../TransactionBatchWriterMetricsTest.java | 289 ++++++++++++++++++ .../broker/transaction/TransactionTest.java | 6 +- .../pendingack/PendingAckMetadataTest.java | 3 +- ...abledTxnLogBufferedWriterMetricsStats.java | 57 ++++ .../impl/MLTransactionLogImpl.java | 9 +- .../MLTransactionMetadataStoreProvider.java | 29 +- .../impl/TxnLogBufferedWriter.java | 35 +-- .../TxnLogBufferedWriterMetricsStats.java | 67 ++-- .../MLTransactionMetadataStoreTest.java | 20 +- .../impl/MLTransactionLogImplTest.java | 13 +- .../impl/TxnLogBufferedWriterTest.java | 20 +- 15 files changed, 509 insertions(+), 83 deletions(-) create mode 100644 pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/TransactionBatchWriterMetricsTest.java create mode 100644 pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/DisabledTxnLogBufferedWriterMetricsStats.java diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java index e658115e1c28b..0ade26e661254 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java @@ -112,6 +112,7 @@ import org.apache.pulsar.broker.transaction.buffer.TransactionBufferProvider; import org.apache.pulsar.broker.transaction.buffer.impl.TransactionBufferClientImpl; import org.apache.pulsar.broker.transaction.pendingack.TransactionPendingAckStoreProvider; +import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStoreProvider; import org.apache.pulsar.broker.validator.MultipleListenerValidator; import org.apache.pulsar.broker.validator.TransactionBatchedWriteValidator; import org.apache.pulsar.broker.web.WebService; @@ -168,6 +169,7 @@ import org.apache.pulsar.packages.management.core.impl.PackagesManagementImpl; import org.apache.pulsar.policies.data.loadbalancer.AdvertisedListener; import org.apache.pulsar.transaction.coordinator.TransactionMetadataStoreProvider; +import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStoreProvider; import org.apache.pulsar.websocket.WebSocketConsumerServlet; import org.apache.pulsar.websocket.WebSocketPingPongServlet; import org.apache.pulsar.websocket.WebSocketProducerServlet; @@ -830,6 +832,9 @@ public void start() throws PulsarServerException { // Register pulsar system namespaces and start transaction meta store service if (config.isTransactionCoordinatorEnabled()) { + MLTransactionMetadataStoreProvider.initBufferedWriterMetrics(getAdvertisedAddress()); + MLPendingAckStoreProvider.initBufferedWriterMetrics(getAdvertisedAddress()); + this.transactionBufferSnapshotService = new SystemTopicBaseTxnBufferSnapshotService(getClient()); this.transactionTimer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-transaction-timer")); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStore.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStore.java index 3ff4ed8e76808..8b1e0a4e94123 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStore.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStore.java @@ -61,6 +61,7 @@ import org.apache.pulsar.transaction.coordinator.impl.TxnBatchedPositionImpl; import org.apache.pulsar.transaction.coordinator.impl.TxnLogBufferedWriter; import org.apache.pulsar.transaction.coordinator.impl.TxnLogBufferedWriterConfig; +import org.apache.pulsar.transaction.coordinator.impl.TxnLogBufferedWriterMetricsStats; import org.jctools.queues.MessagePassingQueue; import org.jctools.queues.SpscArrayQueue; import org.slf4j.Logger; @@ -119,7 +120,7 @@ public class MLPendingAckStore implements PendingAckStore { public MLPendingAckStore(ManagedLedger managedLedger, ManagedCursor cursor, ManagedCursor subManagedCursor, long transactionPendingAckLogIndexMinLag, TxnLogBufferedWriterConfig bufferedWriterConfig, - Timer timer) { + Timer timer, TxnLogBufferedWriterMetricsStats bufferedWriterMetrics) { this.managedLedger = managedLedger; this.cursor = cursor; this.currentLoadPosition = (PositionImpl) this.cursor.getMarkDeletedPosition(); @@ -132,7 +133,8 @@ public MLPendingAckStore(ManagedLedger managedLedger, ManagedCursor cursor, this.bufferedWriter = new TxnLogBufferedWriter(managedLedger, ((ManagedLedgerImpl) managedLedger).getExecutor(), timer, PendingAckLogSerializer.INSTANCE, bufferedWriterConfig.getBatchedWriteMaxRecords(), bufferedWriterConfig.getBatchedWriteMaxSize(), - bufferedWriterConfig.getBatchedWriteMaxDelayInMillis(), bufferedWriterConfig.isBatchEnabled()); + bufferedWriterConfig.getBatchedWriteMaxDelayInMillis(), bufferedWriterConfig.isBatchEnabled(), + bufferedWriterMetrics); this.batchedPendingAckLogsWaitingForHandle = new ArrayList<>(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java index fdff9f59146d4..bf2771abaa65a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.transaction.pendingack.impl; import io.netty.util.Timer; +import io.prometheus.client.CollectorRegistry; import java.util.concurrent.CompletableFuture; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.AsyncCallbacks; @@ -34,7 +35,9 @@ import org.apache.pulsar.broker.transaction.pendingack.TransactionPendingAckStoreProvider; import org.apache.pulsar.common.api.proto.CommandSubscribe.InitialPosition; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.transaction.coordinator.impl.DisabledTxnLogBufferedWriterMetricsStats; import org.apache.pulsar.transaction.coordinator.impl.TxnLogBufferedWriterConfig; +import org.apache.pulsar.transaction.coordinator.impl.TxnLogBufferedWriterMetricsStats; /** @@ -43,6 +46,21 @@ @Slf4j public class MLPendingAckStoreProvider implements TransactionPendingAckStoreProvider { + private static volatile TxnLogBufferedWriterMetricsStats bufferedWriterMetrics = + DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS; + + public static void initBufferedWriterMetrics(String brokerAdvertisedAddress){ + if (bufferedWriterMetrics != DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS){ + return; + } + synchronized (MLPendingAckStoreProvider.class){ + if (bufferedWriterMetrics != DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS){ + return; + } + bufferedWriterMetrics = new MLTxnPendingAckLogBufferedWriterMetrics(brokerAdvertisedAddress); + } + } + @Override public CompletableFuture newPendingAckStore(PersistentSubscription subscription) { CompletableFuture pendingAckStoreFuture = new CompletableFuture<>(); @@ -105,7 +123,7 @@ public void openCursorComplete(ManagedCursor cursor, Object ctx) { .getConfiguration() .getTransactionPendingAckLogIndexMinLag(), txnLogBufferedWriterConfig, - brokerClientSharedTimer)); + brokerClientSharedTimer, bufferedWriterMetrics)); if (log.isDebugEnabled()) { log.debug("{},{} open MLPendingAckStore cursor success", originPersistentTopic.getName(), @@ -151,4 +169,14 @@ public CompletableFuture checkInitializedBefore(PersistentSubscription return originPersistentTopic.getBrokerService().getManagedLedgerFactory() .asyncExists(TopicName.get(pendingAckTopicName).getPersistenceNamingEncoding()); } + + private static class MLTxnPendingAckLogBufferedWriterMetrics extends TxnLogBufferedWriterMetricsStats{ + + private MLTxnPendingAckLogBufferedWriterMetrics(String brokerAdvertisedAddress) { + super("pulsar_txn_pending_ack_store", + new String[]{"broker"}, + new String[]{brokerAdvertisedAddress}, + CollectorRegistry.defaultRegistry); + } + } } \ No newline at end of file diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ManagedLedgerMetricsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ManagedLedgerMetricsTest.java index b448f00a8ff16..cd07b7f6e25b9 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ManagedLedgerMetricsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ManagedLedgerMetricsTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.stats; +import static org.apache.pulsar.transaction.coordinator.impl.DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS; import io.netty.util.HashedWheelTimer; import io.netty.util.concurrent.DefaultThreadFactory; import java.util.List; @@ -109,7 +110,7 @@ public void testTransactionTopic() throws Exception { managedLedgerConfig.setMaxEntriesPerLedger(2); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(TransactionCoordinatorID.get(0), pulsar.getManagedLedgerFactory(), managedLedgerConfig, txnLogBufferedWriterConfig, - transactionTimer); + transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLog.initialize().get(2, TimeUnit.SECONDS); ManagedLedgerMetrics metrics = new ManagedLedgerMetrics(pulsar); metrics.generate(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/TransactionBatchWriterMetricsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/TransactionBatchWriterMetricsTest.java new file mode 100644 index 0000000000000..ceed05884df2e --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/TransactionBatchWriterMetricsTest.java @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.stats; + +import static org.apache.pulsar.common.policies.data.PoliciesUtil.getBundles; +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import javax.ws.rs.client.Client; +import javax.ws.rs.client.ClientBuilder; +import javax.ws.rs.client.WebTarget; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.resources.ClusterResources; +import org.apache.pulsar.broker.resources.NamespaceResources; +import org.apache.pulsar.broker.resources.TenantResources; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.api.transaction.Transaction; +import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.SystemTopicNames; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.partition.PartitionedTopicMetadata; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.Policies; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +/** + * Test for consuming transaction messages. + */ +@Slf4j +@Test(groups = "broker") +public class TransactionBatchWriterMetricsTest extends MockedPulsarServiceBaseTest { + + private final String clusterName = "test"; + public static final NamespaceName DEFAULT_NAMESPACE = NamespaceName.get("public/default"); + private final String topicNameSuffix = "t-rest-topic"; + private final String topicName = DEFAULT_NAMESPACE.getPersistentTopicName(topicNameSuffix); + + @BeforeClass + public void setup() throws Exception { + super.internalSetup(); + } + + @AfterClass + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + // enable transaction. + conf.setSystemTopicEnabled(true); + conf.setTransactionCoordinatorEnabled(true); + // enabled batch writer. + conf.setTransactionPendingAckBatchedWriteEnabled(true); + conf.setTransactionPendingAckBatchedWriteMaxRecords(10); + conf.setTransactionLogBatchedWriteEnabled(true); + conf.setTransactionLogBatchedWriteMaxRecords(10); + } + + @Override + protected PulsarService startBroker(ServiceConfiguration conf) throws Exception { + PulsarService pulsar = startBrokerWithoutAuthorization(conf); + ensureClusterExists(pulsar, clusterName); + ensureTenantExists(pulsar.getPulsarResources().getTenantResources(), TopicName.PUBLIC_TENANT, clusterName); + ensureNamespaceExists(pulsar.getPulsarResources().getNamespaceResources(), DEFAULT_NAMESPACE, + clusterName); + ensureNamespaceExists(pulsar.getPulsarResources().getNamespaceResources(), NamespaceName.SYSTEM_NAMESPACE, + clusterName); + ensureTopicExists(pulsar.getPulsarResources().getNamespaceResources().getPartitionedTopicResources(), + SystemTopicNames.TRANSACTION_COORDINATOR_ASSIGN, 16); + return pulsar; + } + + @Test + public void testTransactionMetaLogMetrics() throws Exception{ + String metricsLabelCluster = clusterName; + String metricsLabelBroker = pulsar.getAdvertisedAddress().split(":")[0]; + admin.topics().createNonPartitionedTopic(topicName); + + sendAndAckSomeMessages(); + + // call metrics + Client client = ClientBuilder.newClient(); + WebTarget target = client.target(brokerUrl + "/metrics/get"); + Response response = target.request(MediaType.APPLICATION_JSON_TYPE).buildGet().invoke(); + Assert.assertTrue(response.getStatus() / 200 == 1); + List metricsLines = parseResponseEntityToList(response); + + metricsLines = metricsLines.stream().filter(s -> !s.startsWith("#") && s.contains("bufferedwriter")).collect( + Collectors.toList()); + + // verify tc. + String metrics_key_txn_tc_record_count_sum = + "pulsar_txn_tc_bufferedwriter_batch_record_count_sum{cluster=\"%s\",broker=\"%s\"} "; + Assert.assertTrue(searchMetricsValue(metricsLines, + String.format(metrics_key_txn_tc_record_count_sum, metricsLabelCluster, metricsLabelBroker)) + > 0); + String metrics_key_txn_tc_max_delay = + "pulsar_txn_tc_bufferedwriter_flush_trigger_max_delay_total{cluster=\"%s\",broker=\"%s\"} "; + Assert.assertTrue(searchMetricsValue(metricsLines, + String.format(metrics_key_txn_tc_max_delay, metricsLabelCluster, metricsLabelBroker)) + > 0); + String metrics_key_txn_tc_bytes_size = + "pulsar_txn_tc_bufferedwriter_batch_size_bytes_sum{cluster=\"%s\",broker=\"%s\"} "; + Assert.assertTrue(searchMetricsValue(metricsLines, + String.format(metrics_key_txn_tc_bytes_size, metricsLabelCluster, metricsLabelBroker)) + > 0); + // verify pending ack. + String metrics_key_txn_pending_ack_record_count_sum = + "pulsar_txn_pending_ack_store_bufferedwriter_batch_record_count_sum{cluster=\"%s\",broker=\"%s\"} "; + Assert.assertTrue(searchMetricsValue(metricsLines, + String.format(metrics_key_txn_pending_ack_record_count_sum, metricsLabelCluster, metricsLabelBroker)) + > 0); + String metrics_key_txn_pending_ack_max_delay = + "pulsar_txn_pending_ack_store_bufferedwriter_flush_trigger_max_delay_total{cluster=\"%s\",broker=\"%s\"} "; + Assert.assertTrue(searchMetricsValue(metricsLines, + String.format(metrics_key_txn_pending_ack_max_delay, metricsLabelCluster, metricsLabelBroker)) + > 0); + String metrics_key_txn_pending_ack_bytes_size = + "pulsar_txn_pending_ack_store_bufferedwriter_batch_size_bytes_sum{cluster=\"%s\",broker=\"%s\"} "; + Assert.assertTrue(searchMetricsValue(metricsLines, + String.format(metrics_key_txn_pending_ack_bytes_size, metricsLabelCluster, metricsLabelBroker)) + > 0); + + // cleanup. + response.close(); + client.close(); + admin.topics().delete(topicName, true); + } + + private static Double searchMetricsValue(List metricsLines, String key){ + for (int i = 0; i < metricsLines.size(); i++){ + String metricsLine = metricsLines.get(i); + if (metricsLine.startsWith("#")){ + continue; + } + if (metricsLine.startsWith(key)){ + return Double.valueOf(metricsLine.split(" ")[1]); + } + } + return null; + } + + private void sendAndAckSomeMessages() throws Exception { + Producer producer = pulsarClient.newProducer(Schema.BYTES) + .topic(topicName) + .sendTimeout(0, TimeUnit.SECONDS) + .enableBatching(false) + .batchingMaxMessages(2) + .create(); + Consumer consumer = pulsarClient.newConsumer() + .subscriptionType(SubscriptionType.Shared) + .topic(topicName) + .isAckReceiptEnabled(true) + .acknowledgmentGroupTime(0, TimeUnit.SECONDS) + .subscriptionName("my-subscription") + .subscribe(); + producer.sendAsync("normal message x".getBytes()).get(); + for (int i = 0; i < 100; i++){ + Transaction transaction = + pulsarClient.newTransaction().withTransactionTimeout(10, TimeUnit.SECONDS).build().get(); + Message msg = consumer.receive(); + producer.newMessage(transaction).value(("tx msg a-" + i).getBytes(StandardCharsets.UTF_8)).sendAsync(); + producer.newMessage(transaction).value(("tx msg b-" + i).getBytes(StandardCharsets.UTF_8)).sendAsync(); + consumer.acknowledgeAsync(msg.getMessageId(), transaction); + transaction.commit().get(); + } + } + + private static void ensureClusterExists(PulsarService pulsar, String cluster) throws Exception { + ClusterResources clusterResources = pulsar.getPulsarResources().getClusterResources(); + ClusterData clusterData = ClusterData.builder() + .serviceUrl(pulsar.getWebServiceAddress()) + .serviceUrlTls(pulsar.getWebServiceAddress()) + .brokerServiceUrl(pulsar.getBrokerServiceUrl()) + .brokerServiceUrlTls(pulsar.getBrokerServiceUrl()) + .build(); + if (!clusterResources.clusterExists(cluster)) { + clusterResources.createCluster(cluster, clusterData); + } + } + + private static void ensureTopicExists(NamespaceResources.PartitionedTopicResources partitionedTopicResources, + TopicName topicName, int numPartitions) throws Exception { + Optional getResult = + partitionedTopicResources.getPartitionedTopicMetadataAsync(topicName).get(); + if (!getResult.isPresent()) { + partitionedTopicResources.createPartitionedTopic(topicName, new PartitionedTopicMetadata(numPartitions)); + } else { + PartitionedTopicMetadata existsMeta = getResult.get(); + if (existsMeta.partitions < numPartitions) { + partitionedTopicResources.updatePartitionedTopicAsync(topicName, + __ -> new PartitionedTopicMetadata(numPartitions)).get(); + } + } + } + + private static void ensureNamespaceExists(NamespaceResources namespaceResources, NamespaceName namespaceName, + String cluster) throws Exception { + if (!namespaceResources.namespaceExists(namespaceName)) { + Policies policies = new Policies(); + policies.bundles = getBundles(16); + policies.replication_clusters = Collections.singleton(cluster); + namespaceResources.createPolicies(namespaceName, policies); + } else { + namespaceResources.setPolicies(namespaceName, policies -> { + policies.replication_clusters.add(cluster); + return policies; + }); + } + } + + private static void ensureTenantExists(TenantResources tenantResources, String tenant, String cluster) + throws Exception { + if (!tenantResources.tenantExists(tenant)) { + TenantInfoImpl publicTenant = new TenantInfoImpl(Collections.emptySet(), Collections.singleton(cluster)); + tenantResources.createTenant(tenant, publicTenant); + } else { + tenantResources.updateTenantAsync(tenant, ti -> { + ti.getAllowedClusters().add(cluster); + return ti; + }).get(); + } + } + + private List parseResponseEntityToList(Response response) throws Exception { + InputStream inputStream = (InputStream) response.getEntity(); + BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream)); + List list = new ArrayList<>(); + while (true){ + String str = bufferedReader.readLine(); + if (str == null){ + break; + } + list.add(str); + } + return list; + } + + protected PulsarClient newPulsarClient(String url, int intervalInSecs) throws PulsarClientException { + org.apache.pulsar.client.api.ClientBuilder clientBuilder = + PulsarClient.builder() + .serviceUrl(url) + .enableTransaction(true) + .statsInterval(intervalInSecs, TimeUnit.SECONDS); + customizeNewPulsarClientBuilder(clientBuilder); + return createNewPulsarClient(clientBuilder); + } + +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java index c309f69fd566d..6a078fc19a73d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java @@ -21,6 +21,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.pulsar.common.naming.SystemTopicNames.PENDING_ACK_STORE_SUFFIX; import static org.apache.pulsar.transaction.coordinator.impl.MLTransactionLogImpl.TRANSACTION_LOG_PREFIX; +import static org.apache.pulsar.transaction.coordinator.impl.DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.doAnswer; @@ -691,7 +692,8 @@ public void testEndTPRecoveringWhenManagerLedgerDisReadable() throws Exception{ TransactionPendingAckStoreProvider pendingAckStoreProvider = mock(TransactionPendingAckStoreProvider.class); doReturn(CompletableFuture.completedFuture( new MLPendingAckStore(persistentTopic.getManagedLedger(), managedCursor, null, - 500, bufferedWriterConfig, transactionTimer))) + 500, bufferedWriterConfig, transactionTimer, + DISABLED_BUFFERED_WRITER_METRICS))) .when(pendingAckStoreProvider).newPendingAckStore(any()); doReturn(CompletableFuture.completedFuture(true)).when(pendingAckStoreProvider).checkInitializedBefore(any()); @@ -757,7 +759,7 @@ public void testEndTCRecoveringWhenManagerLedgerDisReadable() throws Exception{ MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(new TransactionCoordinatorID(1), null, persistentTopic.getManagedLedger().getConfig(), new TxnLogBufferedWriterConfig(), - transactionTimer); + transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); Class mlTransactionLogClass = MLTransactionLogImpl.class; Field field = mlTransactionLogClass.getDeclaredField("cursor"); field.setAccessible(true); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckMetadataTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckMetadataTest.java index fe5c7fa196977..0b14f6592fa82 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckMetadataTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckMetadataTest.java @@ -41,6 +41,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import static org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.State.WriteFailed; +import static org.apache.pulsar.transaction.coordinator.impl.DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS; import static org.testng.Assert.assertTrue; import static org.testng.AssertJUnit.fail; @@ -81,7 +82,7 @@ public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { ManagedCursor subCursor = completableFuture.get().openCursor("test"); MLPendingAckStore pendingAckStore = new MLPendingAckStore(completableFuture.get(), cursor, subCursor, 500, - bufferedWriterConfig, transactionTimer); + bufferedWriterConfig, transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); Field field = MLPendingAckStore.class.getDeclaredField("managedLedger"); field.setAccessible(true); diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/DisabledTxnLogBufferedWriterMetricsStats.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/DisabledTxnLogBufferedWriterMetricsStats.java new file mode 100644 index 0000000000000..5a6238a95e491 --- /dev/null +++ b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/DisabledTxnLogBufferedWriterMetricsStats.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.transaction.coordinator.impl; + +import io.prometheus.client.Collector; +import io.prometheus.client.CollectorRegistry; + +public class DisabledTxnLogBufferedWriterMetricsStats extends TxnLogBufferedWriterMetricsStats { + + public static final DisabledTxnLogBufferedWriterMetricsStats DISABLED_BUFFERED_WRITER_METRICS = + new DisabledTxnLogBufferedWriterMetricsStats(); + + private static class DisabledCollectorRegistry extends CollectorRegistry { + + private static final DisabledCollectorRegistry INSTANCE = new DisabledCollectorRegistry(); + + public void register(Collector m) { + } + public void unregister(Collector m) { + } + } + + private DisabledTxnLogBufferedWriterMetricsStats() { + super("disabled", new String[0], new String[0], DisabledCollectorRegistry.INSTANCE); + } + + public void close() { + } + + public void triggerFlushByRecordsCount(int recordCount, long bytesSize, long delayMillis) { + } + + public void triggerFlushByBytesSize(int recordCount, long bytesSize, long delayMillis) { + } + + public void triggerFlushByByMaxDelay(int recordCount, long bytesSize, long delayMillis) { + } + + public void triggerFlushByLargeSingleData(int recordCount, long bytesSize, long delayMillis) { + } +} diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImpl.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImpl.java index 7b4b07ee652b6..67c0b54504bf0 100644 --- a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImpl.java +++ b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImpl.java @@ -82,11 +82,14 @@ public class MLTransactionLogImpl implements TransactionLog { private final TxnLogBufferedWriterConfig txnLogBufferedWriterConfig; + private final TxnLogBufferedWriterMetricsStats bufferedWriterMetrics; + public MLTransactionLogImpl(TransactionCoordinatorID tcID, ManagedLedgerFactory managedLedgerFactory, ManagedLedgerConfig managedLedgerConfig, TxnLogBufferedWriterConfig txnLogBufferedWriterConfig, - Timer timer) { + Timer timer, + TxnLogBufferedWriterMetricsStats bufferedWriterMetrics) { this.topicName = getMLTransactionLogName(tcID); this.tcId = tcID.getId(); this.managedLedgerFactory = managedLedgerFactory; @@ -97,6 +100,7 @@ public MLTransactionLogImpl(TransactionCoordinatorID tcID, this.managedLedgerConfig.setDeletionAtBatchIndexLevelEnabled(true); } this.entryQueue = new SpscArrayQueue<>(2000); + this.bufferedWriterMetrics = bufferedWriterMetrics; } public static TopicName getMLTransactionLogName(TransactionCoordinatorID tcID) { @@ -119,7 +123,8 @@ public void openLedgerComplete(ManagedLedger ledger, Object ctx) { txnLogBufferedWriterConfig.getBatchedWriteMaxRecords(), txnLogBufferedWriterConfig.getBatchedWriteMaxSize(), txnLogBufferedWriterConfig.getBatchedWriteMaxDelayInMillis(), - txnLogBufferedWriterConfig.isBatchEnabled()); + txnLogBufferedWriterConfig.isBatchEnabled(), + bufferedWriterMetrics); managedLedger.asyncOpenCursor(TRANSACTION_SUBSCRIPTION_NAME, CommandSubscribe.InitialPosition.Earliest, new AsyncCallbacks.OpenCursorCallback() { diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStoreProvider.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStoreProvider.java index 7e6a7059125d0..c11e422d27a8f 100644 --- a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStoreProvider.java +++ b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStoreProvider.java @@ -19,6 +19,7 @@ package org.apache.pulsar.transaction.coordinator.impl; import io.netty.util.Timer; +import io.prometheus.client.CollectorRegistry; import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.bookkeeper.mledger.ManagedLedgerFactory; @@ -33,6 +34,22 @@ */ public class MLTransactionMetadataStoreProvider implements TransactionMetadataStoreProvider { + + private static volatile TxnLogBufferedWriterMetricsStats bufferedWriterMetrics = + DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS; + + public static void initBufferedWriterMetrics(String brokerAdvertisedAddress){ + if (bufferedWriterMetrics != DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS){ + return; + } + synchronized (MLTransactionMetadataStoreProvider.class){ + if (bufferedWriterMetrics != DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS){ + return; + } + bufferedWriterMetrics = new MLTransactionMetadataStoreBufferedWriterMetrics(brokerAdvertisedAddress); + } + } + @Override public CompletableFuture openStore(TransactionCoordinatorID transactionCoordinatorId, ManagedLedgerFactory managedLedgerFactory, @@ -45,11 +62,21 @@ public CompletableFuture openStore(TransactionCoordina MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl txnLog = new MLTransactionLogImpl(transactionCoordinatorId, - managedLedgerFactory, managedLedgerConfig, txnLogBufferedWriterConfig, timer); + managedLedgerFactory, managedLedgerConfig, txnLogBufferedWriterConfig, timer, bufferedWriterMetrics); // MLTransactionLogInterceptor will init sequenceId and update the sequenceId to managedLedger properties. return txnLog.initialize().thenCompose(__ -> new MLTransactionMetadataStore(transactionCoordinatorId, txnLog, timeoutTracker, mlTransactionSequenceIdGenerator, maxActiveTransactionsPerCoordinator).init(recoverTracker)); } + + private static class MLTransactionMetadataStoreBufferedWriterMetrics extends TxnLogBufferedWriterMetricsStats { + + private MLTransactionMetadataStoreBufferedWriterMetrics(String brokerAdvertisedAddress) { + super("pulsar_txn_tc", + new String[]{"broker"}, + new String[]{brokerAdvertisedAddress}, + CollectorRegistry.defaultRegistry); + } + } } \ No newline at end of file diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriter.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriter.java index df9cb613e1c32..2346ebb3a8024 100644 --- a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriter.java +++ b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriter.java @@ -128,14 +128,6 @@ public class TxnLogBufferedWriter { trigFlushByTimingTask(); }; - public TxnLogBufferedWriter(ManagedLedger managedLedger, OrderedExecutor orderedExecutor, Timer timer, - DataSerializer dataSerializer, - int batchedWriteMaxRecords, int batchedWriteMaxSize, int batchedWriteMaxDelayInMillis, - boolean batchEnabled){ - this(managedLedger, orderedExecutor, timer, dataSerializer, batchedWriteMaxRecords, batchedWriteMaxSize, - batchedWriteMaxDelayInMillis, batchEnabled, null); - } - /** * Constructor. * @param dataSerializer The serializer for the object which called by {@link #asyncAddData}. @@ -174,6 +166,9 @@ public TxnLogBufferedWriter(ManagedLedger managedLedger, OrderedExecutor ordered this.flushContext = FlushContext.newInstance(); this.dataArray = new ArrayList<>(); STATE_UPDATER.set(this, State.OPEN); + if (metrics == null){ + throw new IllegalArgumentException("Build TxnLogBufferedWriter error: param metrics can not be null"); + } this.metrics = metrics; this.timer = timer; // scheduler task. @@ -284,10 +279,8 @@ private void trigFlushByTimingTask(){ if (flushContext.asyncAddArgsList.isEmpty()) { return; } - if (metrics != null) { - metrics.triggerFlushByByMaxDelay(flushContext.asyncAddArgsList.size(), bytesSize, - System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime); - } + metrics.triggerFlushByByMaxDelay(flushContext.asyncAddArgsList.size(), bytesSize, + System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime); doFlush(); } catch (Exception e){ log.error("Trig flush by timing task fail.", e); @@ -303,18 +296,14 @@ private void trigFlushByTimingTask(){ */ private void trigFlushIfReachMaxRecordsOrMaxSize(){ if (flushContext.asyncAddArgsList.size() >= batchedWriteMaxRecords) { - if (metrics != null) { - metrics.triggerFlushByRecordsCount(flushContext.asyncAddArgsList.size(), bytesSize, - System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime); - } + metrics.triggerFlushByRecordsCount(flushContext.asyncAddArgsList.size(), bytesSize, + System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime); doFlush(); return; } if (bytesSize >= batchedWriteMaxSize) { - if (metrics != null) { - metrics.triggerFlushByBytesSize(flushContext.asyncAddArgsList.size(), bytesSize, - System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime); - } + metrics.triggerFlushByBytesSize(flushContext.asyncAddArgsList.size(), bytesSize, + System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime); doFlush(); } } @@ -323,10 +312,8 @@ private void trigFlushByLargeSingleData(){ if (flushContext.asyncAddArgsList.isEmpty()) { return; } - if (metrics != null) { - metrics.triggerFlushByLargeSingleData(this.flushContext.asyncAddArgsList.size(), this.bytesSize, - System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime); - } + metrics.triggerFlushByLargeSingleData(this.flushContext.asyncAddArgsList.size(), this.bytesSize, + System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime); doFlush(); } diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriterMetricsStats.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriterMetricsStats.java index fd8f53b72ed07..6eafe79638c4a 100644 --- a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriterMetricsStats.java +++ b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriterMetricsStats.java @@ -22,6 +22,7 @@ import io.prometheus.client.Counter; import io.prometheus.client.Histogram; import java.io.Closeable; +import java.util.concurrent.atomic.AtomicBoolean; import lombok.Getter; /*** @@ -46,6 +47,7 @@ public class TxnLogBufferedWriterMetricsStats implements Closeable { static final double[] MAX_DELAY_TIME_BUCKETS = {1, 5, 10}; + private final CollectorRegistry collectorRegistry; @Getter private final String metricsPrefix; @@ -87,12 +89,15 @@ public class TxnLogBufferedWriterMetricsStats implements Closeable { private final Counter batchFlushTriggeredByLargeSingleDataMetric; private final Counter.Child batchFlushTriggeredByLargeSingleDataCounter; + private final AtomicBoolean closed; + /** * Users needs to ensure that the {@link TxnLogBufferedWriterMetricsStats} of the same {@param metricsPrefix} can * only create once, otherwise an IllegalArgumentException will be thrown. */ public TxnLogBufferedWriterMetricsStats(String metricsPrefix, String[] labelNames, String[] labelValues, CollectorRegistry registry) { + this.collectorRegistry = registry; this.metricsPrefix = metricsPrefix; this.labelNames = labelNames.clone(); this.labelValues = labelValues.clone(); @@ -101,79 +106,85 @@ public TxnLogBufferedWriterMetricsStats(String metricsPrefix, String[] labelName String.format("%s_bufferedwriter_batch_record_count", metricsPrefix); recordsPerBatchMetric = new Histogram.Builder() .name(recordsPerBatchMetricName) - .labelNames(labelNames) + .labelNames(this.labelNames) .help("Records per batch histogram") .buckets(RECORD_COUNT_PER_ENTRY_BUCKETS) - .register(registry); - recordsPerBatchHistogram = recordsPerBatchMetric.labels(labelValues); + .register(collectorRegistry); + recordsPerBatchHistogram = recordsPerBatchMetric.labels(this.labelValues); String batchSizeBytesMetricName = String.format("%s_bufferedwriter_batch_size_bytes", metricsPrefix); batchSizeBytesMetric = new Histogram.Builder() .name(batchSizeBytesMetricName) - .labelNames(labelNames) + .labelNames(this.labelNames) .help("Batch size in bytes histogram") .buckets(BYTES_SIZE_PER_ENTRY_BUCKETS) - .register(registry); - batchSizeBytesHistogram = batchSizeBytesMetric.labels(labelValues); + .register(collectorRegistry); + batchSizeBytesHistogram = batchSizeBytesMetric.labels(this.labelValues); String oldestRecordInBatchDelayTimeSecondsMetricName = String.format("%s_bufferedwriter_batch_oldest_record_delay_time_second", metricsPrefix); oldestRecordInBatchDelayTimeSecondsMetric = new Histogram.Builder() .name(oldestRecordInBatchDelayTimeSecondsMetricName) - .labelNames(labelNames) + .labelNames(this.labelNames) .help("Max record latency in batch histogram") .buckets(MAX_DELAY_TIME_BUCKETS) - .register(registry); + .register(collectorRegistry); oldestRecordInBatchDelayTimeSecondsHistogram = - oldestRecordInBatchDelayTimeSecondsMetric.labels(labelValues); + oldestRecordInBatchDelayTimeSecondsMetric.labels(this.labelValues); String batchFlushTriggeringByMaxRecordsMetricName = String.format("%s_bufferedwriter_flush_trigger_max_records", metricsPrefix); batchFlushTriggeredByMaxRecordsMetric = new Counter.Builder() .name(batchFlushTriggeringByMaxRecordsMetricName) - .labelNames(labelNames) + .labelNames(this.labelNames) .help("Event count of batch flush triggered by max records count") - .register(registry); - batchFlushTriggeredByMaxRecordsCounter = batchFlushTriggeredByMaxRecordsMetric.labels(labelValues); + .register(collectorRegistry); + batchFlushTriggeredByMaxRecordsCounter = batchFlushTriggeredByMaxRecordsMetric.labels(this.labelValues); String batchFlushTriggeringByMaxSizeMetricName = String.format("%s_bufferedwriter_flush_trigger_max_size", metricsPrefix); batchFlushTriggeredByMaxSizeMetric = new Counter.Builder() .name(batchFlushTriggeringByMaxSizeMetricName) - .labelNames(labelNames) + .labelNames(this.labelNames) .help("Event count of batch flush triggered by max bytes size") - .register(registry); - batchFlushTriggeredByMaxSizeCounter = batchFlushTriggeredByMaxSizeMetric.labels(labelValues); + .register(collectorRegistry); + batchFlushTriggeredByMaxSizeCounter = batchFlushTriggeredByMaxSizeMetric.labels(this.labelValues); String batchFlushTriggeringByMaxDelayMetricName = String.format("%s_bufferedwriter_flush_trigger_max_delay", metricsPrefix); batchFlushTriggeredByMaxDelayMetric = new Counter.Builder() .name(batchFlushTriggeringByMaxDelayMetricName) - .labelNames(labelNames) + .labelNames(this.labelNames) .help("Event count of batch flush triggered by max delay time") - .register(registry); + .register(collectorRegistry); batchFlushTriggeredByMaxDelayCounter = - batchFlushTriggeredByMaxDelayMetric.labels(labelValues); + batchFlushTriggeredByMaxDelayMetric.labels(this.labelValues); String batchFlushTriggeringByLargeSingleDataMetricName = String.format("%s_bufferedwriter_flush_trigger_large_data", metricsPrefix); batchFlushTriggeredByLargeSingleDataMetric = new Counter.Builder() .name(batchFlushTriggeringByLargeSingleDataMetricName) - .labelNames(labelNames) + .labelNames(this.labelNames) .help("Event count of batch flush triggered by the single large data write") - .register(registry); + .register(collectorRegistry); batchFlushTriggeredByLargeSingleDataCounter = - batchFlushTriggeredByLargeSingleDataMetric.labels(labelValues); + batchFlushTriggeredByLargeSingleDataMetric.labels(this.labelValues); + + this.closed = new AtomicBoolean(false); } public void close() { - recordsPerBatchMetric.remove(labelValues); - batchSizeBytesMetric.remove(labelValues); - oldestRecordInBatchDelayTimeSecondsMetric.remove(labelValues); - batchFlushTriggeredByMaxRecordsMetric.remove(labelValues); - batchFlushTriggeredByMaxSizeMetric.remove(labelValues); - batchFlushTriggeredByMaxDelayMetric.remove(labelValues); - batchFlushTriggeredByLargeSingleDataMetric.remove(labelValues); + // Doing unregister more than once will throw exception, so avoid repeating close. + if (!closed.compareAndSet(false, true)){ + return; + } + collectorRegistry.unregister(recordsPerBatchMetric); + collectorRegistry.unregister(batchSizeBytesMetric); + collectorRegistry.unregister(oldestRecordInBatchDelayTimeSecondsMetric); + collectorRegistry.unregister(batchFlushTriggeredByMaxRecordsMetric); + collectorRegistry.unregister(batchFlushTriggeredByMaxSizeMetric); + collectorRegistry.unregister(batchFlushTriggeredByMaxDelayMetric); + collectorRegistry.unregister(batchFlushTriggeredByLargeSingleDataMetric); } public void triggerFlushByRecordsCount(int recordCount, long bytesSize, long delayMillis) { diff --git a/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/MLTransactionMetadataStoreTest.java b/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/MLTransactionMetadataStoreTest.java index 41a2552d04f1f..1d5d9708a275f 100644 --- a/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/MLTransactionMetadataStoreTest.java +++ b/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/MLTransactionMetadataStoreTest.java @@ -54,6 +54,7 @@ import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import static org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.State.WriteFailed; +import static org.apache.pulsar.transaction.coordinator.impl.DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; @@ -84,7 +85,7 @@ public void testTransactionOperation(TxnLogBufferedWriterConfig txnLogBufferedWr MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer); + managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLog.initialize().get(2, TimeUnit.SECONDS); MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, @@ -172,7 +173,7 @@ public void testRecoverSequenceId(boolean isUseManagedLedgerProperties) throws E managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); managedLedgerConfig.setMaxEntriesPerLedger(3); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - managedLedgerConfig, disabledBufferedWriter, transactionTimer); + managedLedgerConfig, disabledBufferedWriter, transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLog.initialize().get(2, TimeUnit.SECONDS); MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, @@ -201,7 +202,7 @@ public void testRecoverSequenceId(boolean isUseManagedLedgerProperties) throws E } mlTransactionLog.closeAsync().get(2, TimeUnit.SECONDS); mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - managedLedgerConfig, disabledBufferedWriter, transactionTimer); + managedLedgerConfig, disabledBufferedWriter, transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLog.initialize().get(2, TimeUnit.SECONDS); transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, @@ -230,7 +231,7 @@ public void testInitTransactionReader(TxnLogBufferedWriterConfig txnLogBufferedW MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer); + managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLog.initialize().get(2, TimeUnit.SECONDS); MLTransactionMetadataStore transactionMetadataStore = @@ -282,7 +283,8 @@ public void testInitTransactionReader(TxnLogBufferedWriterConfig txnLogBufferedW transactionMetadataStore.closeAsync(); MLTransactionLogImpl txnLog2 = new MLTransactionLogImpl(transactionCoordinatorID, factory, - managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer); + managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer, + DISABLED_BUFFERED_WRITER_METRICS); txnLog2.initialize().get(2, TimeUnit.SECONDS); MLTransactionMetadataStore transactionMetadataStoreTest = @@ -356,7 +358,7 @@ public void testDeleteLog(TxnLogBufferedWriterConfig txnLogBufferedWriterConfig) MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer); + managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLog.initialize().get(2, TimeUnit.SECONDS); MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, @@ -436,7 +438,7 @@ public void testRecoverWhenDeleteFromCursor(TxnLogBufferedWriterConfig txnLogBuf MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer); + managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLog.initialize().get(2, TimeUnit.SECONDS); MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, @@ -454,7 +456,7 @@ public void testRecoverWhenDeleteFromCursor(TxnLogBufferedWriterConfig txnLogBuf transactionMetadataStore.updateTxnStatus(txnID2, TxnStatus.ABORTED, TxnStatus.ABORTING, false).get(); mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer); + managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLog.initialize().get(2, TimeUnit.SECONDS); transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, @@ -476,7 +478,7 @@ public void testManageLedgerWriteFailState(TxnLogBufferedWriterConfig txnLogBuff MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer); + managedLedgerConfig, txnLogBufferedWriterConfig, transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLog.initialize().get(2, TimeUnit.SECONDS); MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, diff --git a/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImplTest.java b/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImplTest.java index 5c38a8c52df4f..235e8b42514c8 100644 --- a/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImplTest.java +++ b/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImplTest.java @@ -46,6 +46,7 @@ import org.apache.pulsar.transaction.coordinator.proto.TransactionMetadataEntry; import org.apache.pulsar.transaction.coordinator.proto.TxnStatus; import org.apache.pulsar.transaction.coordinator.test.MockedBookKeeperTestCase; +import static org.apache.pulsar.transaction.coordinator.impl.DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS; import static org.mockito.Mockito.*; import org.awaitility.Awaitility; import org.testng.Assert; @@ -82,7 +83,7 @@ public void testMainProcess(boolean writeWithBatch, boolean readWithBatch) throw bufferedWriterConfigForWrite.setBatchEnabled(writeWithBatch); TransactionCoordinatorID transactionCoordinatorID = TransactionCoordinatorID.get(0); MLTransactionLogImpl mlTransactionLogForWrite = new MLTransactionLogImpl(TransactionCoordinatorID.get(0), factory, - new ManagedLedgerConfig(), bufferedWriterConfigForWrite, transactionTimer); + new ManagedLedgerConfig(), bufferedWriterConfigForWrite, transactionTimer, DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLogForWrite.initialize().get(3, TimeUnit.SECONDS); Map>> expectedMapping = new HashMap<>(); /** @@ -156,8 +157,9 @@ public void testMainProcess(boolean writeWithBatch, boolean readWithBatch) throw bufferedWriterConfigForRecover.setBatchedWriteMaxDelayInMillis(1000 * 3600); bufferedWriterConfigForRecover.setBatchedWriteMaxRecords(3); bufferedWriterConfigForRecover.setBatchEnabled(readWithBatch); - MLTransactionLogImpl mlTransactionLogForRecover = new MLTransactionLogImpl(TransactionCoordinatorID.get(0), factory, - new ManagedLedgerConfig(), bufferedWriterConfigForRecover, transactionTimer); + MLTransactionLogImpl mlTransactionLogForRecover = new MLTransactionLogImpl(TransactionCoordinatorID.get(0), + factory, new ManagedLedgerConfig(), bufferedWriterConfigForRecover, transactionTimer, + DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLogForRecover.initialize().get(3, TimeUnit.SECONDS); // Recover and verify the txnID and position mappings. TransactionTimeoutTracker timeoutTracker = mock(TransactionTimeoutTracker.class); @@ -234,8 +236,9 @@ public void testMainProcess(boolean writeWithBatch, boolean readWithBatch) throw * the cursor-batch-indexes is expected. */ // Create another transaction log for recover. - MLTransactionLogImpl mlTransactionLogForDelete = new MLTransactionLogImpl(TransactionCoordinatorID.get(0), factory, - new ManagedLedgerConfig(), bufferedWriterConfigForRecover, transactionTimer); + MLTransactionLogImpl mlTransactionLogForDelete = new MLTransactionLogImpl(TransactionCoordinatorID.get(0), + factory, new ManagedLedgerConfig(), bufferedWriterConfigForRecover, transactionTimer, + DISABLED_BUFFERED_WRITER_METRICS); mlTransactionLogForDelete.initialize().get(3, TimeUnit.SECONDS); MLTransactionMetadataStore transactionMetadataStoreForDelete = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLogForDelete, timeoutTracker, sequenceIdGenerator, Integer.MAX_VALUE); diff --git a/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriterTest.java b/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriterTest.java index e2151e7388138..5f254eb45b70d 100644 --- a/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriterTest.java +++ b/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/impl/TxnLogBufferedWriterTest.java @@ -60,6 +60,7 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import static org.apache.pulsar.transaction.coordinator.impl.DisabledTxnLogBufferedWriterMetricsStats.DISABLED_BUFFERED_WRITER_METRICS; import static org.testng.Assert.*; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -189,7 +190,7 @@ public void testMainProcess(int batchedWriteMaxRecords, int batchedWriteMaxSize, TxnLogBufferedWriter txnLogBufferedWriter = new TxnLogBufferedWriter( managedLedger, orderedExecutor, transactionTimer, dataSerializer, batchedWriteMaxRecords, batchedWriteMaxSize, - batchedWriteMaxDelayInMillis, batchEnabled); + batchedWriteMaxDelayInMillis, batchEnabled, DISABLED_BUFFERED_WRITER_METRICS); // Store the param-context, param-position, param-exception of callback function and complete-count for verify. List contextArrayOfCallback = Collections.synchronizedList(new ArrayList<>()); List exceptionArrayOfCallback = Collections.synchronizedList(new ArrayList<>()); @@ -394,7 +395,8 @@ public Object answer(InvocationOnMock invocation) throws Throwable { }).when(managedLedger).asyncAddEntry(Mockito.any(ByteBuf.class), Mockito.any(), Mockito.any()); // Test threshold: writeMaxDelayInMillis (use timer). TxnLogBufferedWriter txnLogBufferedWriter1 = new TxnLogBufferedWriter<>(managedLedger, orderedExecutor, - transactionTimer, dataSerializer, 32, 1024 * 4, 100, true); + transactionTimer, dataSerializer, 32, 1024 * 4, + 100, true, DISABLED_BUFFERED_WRITER_METRICS); TxnLogBufferedWriter.AddDataCallback callback = Mockito.mock(TxnLogBufferedWriter.AddDataCallback.class); txnLogBufferedWriter1.asyncAddData(100, callback, 100); Thread.sleep(90); @@ -406,7 +408,8 @@ public Object answer(InvocationOnMock invocation) throws Throwable { // Test threshold: batchedWriteMaxRecords. TxnLogBufferedWriter txnLogBufferedWriter2 = new TxnLogBufferedWriter<>(managedLedger, orderedExecutor, - transactionTimer, dataSerializer, 32, 1024 * 4, 10000, true); + transactionTimer, dataSerializer, 32, 1024 * 4, + 10000, true, DISABLED_BUFFERED_WRITER_METRICS); for (int i = 0; i < 32; i++){ txnLogBufferedWriter2.asyncAddData(1, callback, 1); } @@ -416,7 +419,8 @@ public Object answer(InvocationOnMock invocation) throws Throwable { // Test threshold: batchedWriteMaxSize. TxnLogBufferedWriter txnLogBufferedWriter3 = new TxnLogBufferedWriter<>(managedLedger, orderedExecutor, - transactionTimer, dataSerializer, 1024, 64 * 4, 10000, true); + transactionTimer, dataSerializer, 1024, 64 * 4, + 10000, true, DISABLED_BUFFERED_WRITER_METRICS); for (int i = 0; i < 64; i++){ txnLogBufferedWriter3.asyncAddData(1, callback, 1); } @@ -444,7 +448,8 @@ public void testPendingScheduleTriggerTaskCount() throws Exception { // Create components. OrderedExecutor orderedExecutor = Mockito.mock(OrderedExecutor.class); ArrayBlockingQueue workQueue = new ArrayBlockingQueue<>(65536 * 2); - ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(1, 1, 5, TimeUnit.SECONDS, workQueue); + ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(1, 1, 5, + TimeUnit.SECONDS, workQueue); Mockito.when(orderedExecutor.chooseThread(Mockito.anyString())).thenReturn(threadPoolExecutor); HashedWheelTimer transactionTimer = new HashedWheelTimer(new DefaultThreadFactory("transaction-timer"), 1, TimeUnit.MILLISECONDS); @@ -452,8 +457,9 @@ public void testPendingScheduleTriggerTaskCount() throws Exception { // Mock managed ledger and write counter. MockedManagedLedger mockedManagedLedger = mockManagedLedgerWithWriteCounter(mlName); // Start tests. - TxnLogBufferedWriter txnLogBufferedWriter = new TxnLogBufferedWriter<>(mockedManagedLedger.managedLedger, orderedExecutor, - transactionTimer, dataSerializer, 2, 1024 * 4, 1, true); + TxnLogBufferedWriter txnLogBufferedWriter = new TxnLogBufferedWriter<>(mockedManagedLedger.managedLedger, + orderedExecutor, transactionTimer, dataSerializer, 2, 1024 * 4, + 1, true, DISABLED_BUFFERED_WRITER_METRICS); TxnLogBufferedWriter.AddDataCallback callback = Mockito.mock(TxnLogBufferedWriter.AddDataCallback.class); // Append heavier tasks to the Ledger thread. final ExecutorService executorService = orderedExecutor.chooseThread(mlName); From 3de690d44deb7b52fd89805ece9ac55fa104038d Mon Sep 17 00:00:00 2001 From: fengyubiao Date: Thu, 29 Sep 2022 16:42:06 +0800 Subject: [PATCH 29/59] [fix][flaky-test]ProxyConnectionThrottlingTest.testInboundConnection (#17883) --- pulsar-proxy/pom.xml | 6 ++++++ .../server/ProxyConnectionThrottlingTest.java | 14 ++++++++++---- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/pulsar-proxy/pom.xml b/pulsar-proxy/pom.xml index 103e816497f55..0fcc2c5c7aa8a 100644 --- a/pulsar-proxy/pom.xml +++ b/pulsar-proxy/pom.xml @@ -159,6 +159,12 @@ test + + org.awaitility + awaitility + test + + com.beust jcommander diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionThrottlingTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionThrottlingTest.java index fb4de9a65b0b8..9b18e44ffb142 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionThrottlingTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionThrottlingTest.java @@ -32,6 +32,7 @@ import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.awaitility.Awaitility; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterClass; @@ -108,7 +109,9 @@ public void testInboundConnection() throws Exception { } catch (Exception ex) { // OK } - Assert.assertEquals(ConnectionController.DefaultConnectionController.getTotalConnectionNum(), 4); + Awaitility.await().untilAsserted(() ->{ + Assert.assertEquals(ConnectionController.DefaultConnectionController.getTotalConnectionNum(), 4); + }); Assert.assertEquals(ConnectionController.DefaultConnectionController.getConnections().size(), 1); Set keys = ConnectionController.DefaultConnectionController.getConnections().keySet(); for (String key : keys) { @@ -119,7 +122,9 @@ public void testInboundConnection() throws Exception { client1.close(); - Assert.assertEquals(ConnectionController.DefaultConnectionController.getTotalConnectionNum(), 2); + Awaitility.await().untilAsserted(() ->{ + Assert.assertEquals(ConnectionController.DefaultConnectionController.getTotalConnectionNum(), 2); + }); Assert.assertEquals(ConnectionController.DefaultConnectionController.getConnections().size(), 1); keys = ConnectionController.DefaultConnectionController.getConnections().keySet(); for (String key : keys) { @@ -129,8 +134,9 @@ public void testInboundConnection() throws Exception { Assert.assertEquals(ProxyService.ACTIVE_CONNECTIONS.get(), 2.0d); client2.close(); - - Assert.assertEquals(ConnectionController.DefaultConnectionController.getTotalConnectionNum(), 0); + Awaitility.await().untilAsserted(() ->{ + Assert.assertEquals(ConnectionController.DefaultConnectionController.getTotalConnectionNum(), 0); + }); Assert.assertEquals(ConnectionController.DefaultConnectionController.getConnections().size(), 0); Assert.assertEquals(ProxyService.ACTIVE_CONNECTIONS.get(), 0.0d); } From 9026d1954d180cfb4b3a38f52217b14a3b5e3dc0 Mon Sep 17 00:00:00 2001 From: Andrey Yegorov <8622884+dlg99@users.noreply.github.com> Date: Thu, 29 Sep 2022 02:06:31 -0700 Subject: [PATCH 30/59] [Fix][Tiered Storage] Eagerly Delete Offloaded Segments On Topic Deletion (#15914) * Truncate topic before deletion to avoid orphaned offloaded ledgers * CR feedback --- .../mledger/ManagedLedgerFactory.java | 20 ++ .../impl/ManagedLedgerFactoryImpl.java | 110 +++++++-- .../mledger/impl/ManagedLedgerImpl.java | 78 +++---- .../mledger/offload/OffloadUtils.java | 28 +++ .../mledger/impl/ManagedLedgerTest.java | 8 +- .../mledger/impl/OffloadPrefixTest.java | 61 +++++ .../pulsar/broker/service/BrokerService.java | 32 +-- .../service/persistent/PersistentTopic.java | 108 +++++---- .../broker/service/PersistentTopicTest.java | 2 + .../integration/offload/TestBaseOffload.java | 210 ++++++++++++++++-- .../offload/TestFileSystemOffload.java | 5 +- .../offload/TestOffloadDeletionFS.java | 144 ++++++++++++ .../integration/offload/TestS3Offload.java | 2 +- .../offload/TestUniversalConfigurations.java | 2 +- .../suites/PulsarTieredStorageTestSuite.java | 4 +- 15 files changed, 666 insertions(+), 148 deletions(-) create mode 100644 tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestOffloadDeletionFS.java diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java index e42c2581ba101..21841544f8102 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java @@ -145,6 +145,16 @@ void asyncOpenReadOnlyCursor(String managedLedgerName, Position startPosition, M */ void delete(String name) throws InterruptedException, ManagedLedgerException; + /** + * Delete a managed ledger. If it's not open, it's metadata will get regardless deleted. + * + * @param name + * @throws InterruptedException + * @throws ManagedLedgerException + */ + void delete(String name, CompletableFuture mlConfigFuture) + throws InterruptedException, ManagedLedgerException; + /** * Delete a managed ledger. If it's not open, it's metadata will get regardless deleted. * @@ -154,6 +164,16 @@ void asyncOpenReadOnlyCursor(String managedLedgerName, Position startPosition, M */ void asyncDelete(String name, DeleteLedgerCallback callback, Object ctx); + /** + * Delete a managed ledger. If it's not open, it's metadata will get regardless deleted. + * + * @param name + * @throws InterruptedException + * @throws ManagedLedgerException + */ + void asyncDelete(String name, CompletableFuture mlConfigFuture, + DeleteLedgerCallback callback, Object ctx); + /** * Releases all the resources maintained by the ManagedLedgerFactory. * diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java index d7596a7468a40..e4bc53de52889 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java @@ -26,6 +26,7 @@ import io.netty.util.concurrent.DefaultThreadFactory; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -71,6 +72,7 @@ import org.apache.bookkeeper.mledger.impl.MetaStore.MetaStoreCallback; import org.apache.bookkeeper.mledger.impl.cache.EntryCacheManager; import org.apache.bookkeeper.mledger.impl.cache.RangeEntryCacheManagerImpl; +import org.apache.bookkeeper.mledger.offload.OffloadUtils; import org.apache.bookkeeper.mledger.proto.MLDataFormats; import org.apache.bookkeeper.mledger.proto.MLDataFormats.LongProperty; import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedCursorInfo; @@ -78,6 +80,7 @@ import org.apache.bookkeeper.mledger.util.Futures; import org.apache.bookkeeper.stats.NullStatsLogger; import org.apache.bookkeeper.stats.StatsLogger; +import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.common.policies.data.EnsemblePlacementPolicyConfig; import org.apache.pulsar.common.util.DateFormatter; import org.apache.pulsar.common.util.FutureUtil; @@ -802,12 +805,18 @@ public void operationFailed(MetaStoreException e) { @Override public void delete(String name) throws InterruptedException, ManagedLedgerException { + delete(name, CompletableFuture.completedFuture(null)); + } + + @Override + public void delete(String name, CompletableFuture mlConfigFuture) + throws InterruptedException, ManagedLedgerException { class Result { ManagedLedgerException e = null; } final Result r = new Result(); final CountDownLatch latch = new CountDownLatch(1); - asyncDelete(name, new DeleteLedgerCallback() { + asyncDelete(name, mlConfigFuture, new DeleteLedgerCallback() { @Override public void deleteLedgerComplete(Object ctx) { latch.countDown(); @@ -829,10 +838,16 @@ public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { @Override public void asyncDelete(String name, DeleteLedgerCallback callback, Object ctx) { + asyncDelete(name, CompletableFuture.completedFuture(null), callback, ctx); + } + + @Override + public void asyncDelete(String name, CompletableFuture mlConfigFuture, + DeleteLedgerCallback callback, Object ctx) { CompletableFuture future = ledgers.get(name); if (future == null) { // Managed ledger does not exist and we're not currently trying to open it - deleteManagedLedger(name, callback, ctx); + deleteManagedLedger(name, mlConfigFuture, callback, ctx); } else { future.thenAccept(ml -> { // If it's open, delete in the normal way @@ -847,7 +862,8 @@ public void asyncDelete(String name, DeleteLedgerCallback callback, Object ctx) /** * Delete all managed ledger resources and metadata. */ - void deleteManagedLedger(String managedLedgerName, DeleteLedgerCallback callback, Object ctx) { + void deleteManagedLedger(String managedLedgerName, CompletableFuture mlConfigFuture, + DeleteLedgerCallback callback, Object ctx) { // Read the managed ledger metadata from store asyncGetManagedLedgerInfo(managedLedgerName, new ManagedLedgerInfoCallback() { @Override @@ -859,7 +875,7 @@ public void getInfoComplete(ManagedLedgerInfo info, Object ctx) { .map(e -> deleteCursor(bkc, managedLedgerName, e.getKey(), e.getValue())) .collect(Collectors.toList()); Futures.waitForAll(futures).thenRun(() -> { - deleteManagedLedgerData(bkc, managedLedgerName, info, callback, ctx); + deleteManagedLedgerData(bkc, managedLedgerName, info, mlConfigFuture, callback, ctx); }).exceptionally(ex -> { callback.deleteLedgerFailed(new ManagedLedgerException(ex), ctx); return null; @@ -874,22 +890,80 @@ public void getInfoFailed(ManagedLedgerException exception, Object ctx) { } private void deleteManagedLedgerData(BookKeeper bkc, String managedLedgerName, ManagedLedgerInfo info, - DeleteLedgerCallback callback, Object ctx) { + CompletableFuture mlConfigFuture, + DeleteLedgerCallback callback, Object ctx) { + final CompletableFuture> + ledgerInfosFuture = new CompletableFuture<>(); + store.getManagedLedgerInfo(managedLedgerName, false, null, + new MetaStoreCallback<>() { + @Override + public void operationComplete(MLDataFormats.ManagedLedgerInfo mlInfo, Stat stat) { + Map infos = new HashMap<>(); + for (MLDataFormats.ManagedLedgerInfo.LedgerInfo ls : mlInfo.getLedgerInfoList()) { + infos.put(ls.getLedgerId(), ls); + } + ledgerInfosFuture.complete(infos); + } + + @Override + public void operationFailed(MetaStoreException e) { + log.error("Failed to get managed ledger info for {}", managedLedgerName, e); + ledgerInfosFuture.completeExceptionally(e); + } + }); + Futures.waitForAll(info.ledgers.stream() - .filter(li -> !li.isOffloaded) - .map(li -> bkc.newDeleteLedgerOp().withLedgerId(li.ledgerId).execute() - .handle((result, ex) -> { - if (ex != null) { - int rc = BKException.getExceptionCode(ex); - if (rc == BKException.Code.NoSuchLedgerExistsOnMetadataServerException - || rc == BKException.Code.NoSuchLedgerExistsException) { - log.info("Ledger {} does not exist, ignoring", li.ledgerId); - return null; - } - throw new CompletionException(ex); + .map(li -> { + final CompletableFuture res; + if (li.isOffloaded) { + res = mlConfigFuture + .thenCombine(ledgerInfosFuture, Pair::of) + .thenCompose(pair -> { + ManagedLedgerConfig mlConfig = pair.getLeft(); + Map ledgerInfos = pair.getRight(); + + if (mlConfig == null || ledgerInfos == null) { + return CompletableFuture.completedFuture(null); + } + + MLDataFormats.ManagedLedgerInfo.LedgerInfo ls = ledgerInfos.get(li.ledgerId); + + if (ls.getOffloadContext().hasUidMsb()) { + MLDataFormats.ManagedLedgerInfo.LedgerInfo.Builder newInfoBuilder = ls.toBuilder(); + newInfoBuilder.getOffloadContextBuilder().setBookkeeperDeleted(true); + String driverName = OffloadUtils.getOffloadDriverName(ls, + mlConfig.getLedgerOffloader().getOffloadDriverName()); + Map driverMetadata = OffloadUtils.getOffloadDriverMetadata(ls, + mlConfig.getLedgerOffloader().getOffloadDriverMetadata()); + OffloadUtils.setOffloadDriverMetadata(newInfoBuilder, driverName, driverMetadata); + + UUID uuid = new UUID(ls.getOffloadContext().getUidMsb(), + ls.getOffloadContext().getUidLsb()); + return OffloadUtils.cleanupOffloaded(li.ledgerId, uuid, mlConfig, + OffloadUtils.getOffloadDriverMetadata(ls, + mlConfig.getLedgerOffloader().getOffloadDriverMetadata()), + "Deletion", managedLedgerName, scheduledExecutor); } - return result; - })) + + return CompletableFuture.completedFuture(null); + }); + } else { + res = CompletableFuture.completedFuture(null); + } + return res.thenCompose(__ -> bkc.newDeleteLedgerOp().withLedgerId(li.ledgerId).execute() + .handle((result, ex) -> { + if (ex != null) { + int rc = BKException.getExceptionCode(ex); + if (rc == BKException.Code.NoSuchLedgerExistsOnMetadataServerException + || rc == BKException.Code.NoSuchLedgerExistsException) { + log.info("Ledger {} does not exist, ignoring", li.ledgerId); + return null; + } + throw new CompletionException(ex); + } + return result; + })); + }) .collect(Collectors.toList())) .thenRun(() -> { // Delete the metadata diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java index 254ee767bc7fc..a4f94b2a9b71a 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java @@ -2450,7 +2450,7 @@ void internalTrimConsumedLedgers(CompletableFuture promise) { void internalTrimLedgers(boolean isTruncate, CompletableFuture promise) { if (!factory.isMetadataServiceAvailable()) { // Defer trimming of ledger if we cannot connect to metadata service - promise.complete(null); + promise.completeExceptionally(new MetaStoreException("Metadata service is not available")); return; } @@ -2722,11 +2722,30 @@ public void deleteLedgerFailed(ManagedLedgerException e, Object ctx) { @Override public void asyncDelete(final DeleteLedgerCallback callback, final Object ctx) { + // Delete the managed ledger without closing, since we are not interested in gracefully closing cursors and // ledgers setFenced(); cancelScheduledTasks(); + // Truncate to ensure the offloaded data is not orphaned. + // Also ensures the BK ledgers are deleted and not just scheduled for deletion + CompletableFuture truncateFuture = asyncTruncate(); + truncateFuture.whenComplete((ignore, exc) -> { + if (exc != null) { + log.error("[{}] Error truncating ledger for deletion", name, exc); + callback.deleteLedgerFailed(exc instanceof ManagedLedgerException + ? (ManagedLedgerException) exc : new ManagedLedgerException(exc), + ctx); + } else { + asyncDeleteInternal(callback, ctx); + } + }); + + } + + private void asyncDeleteInternal(final DeleteLedgerCallback callback, final Object ctx) { + List cursors = Lists.newArrayList(this.cursors); if (cursors.isEmpty()) { // No cursors to delete, proceed with next step @@ -2784,10 +2803,9 @@ private void asyncDeleteLedger(long ledgerId, LedgerInfo info) { if (info.getOffloadContext().hasUidMsb()) { UUID uuid = new UUID(info.getOffloadContext().getUidMsb(), info.getOffloadContext().getUidLsb()); - cleanupOffloaded(ledgerId, uuid, - OffloadUtils.getOffloadDriverName(info, config.getLedgerOffloader().getOffloadDriverName()), + OffloadUtils.cleanupOffloaded(ledgerId, uuid, config, OffloadUtils.getOffloadDriverMetadata(info, config.getLedgerOffloader().getOffloadDriverMetadata()), - "Trimming"); + "Trimming", name, scheduledExecutor); } } @@ -2842,7 +2860,7 @@ private void deleteAllLedgers(DeleteLedgerCallback callback, Object ctx) { default: // Handle error log.warn("[{}] Failed to delete ledger {} -- {}", name, ls.getLedgerId(), - BKException.getMessage(rc)); + BKException.getMessage(rc) + " code " + rc); int toDelete = ledgersToDelete.get(); if (toDelete != -1 && ledgersToDelete.compareAndSet(toDelete, -1)) { // Trigger callback only once @@ -3031,18 +3049,17 @@ private void offloadLoop(CompletableFuture promise, Queue prepareLedgerInfoForOffloaded(long ledgerId, UUI oldInfo.getOffloadContext().getUidLsb()); log.info("[{}] Found previous offload attempt for ledger {}, uuid {}" + ", cleaning up", name, ledgerId, uuid); - cleanupOffloaded( + OffloadUtils.cleanupOffloaded( ledgerId, oldUuid, - OffloadUtils.getOffloadDriverName(oldInfo, - config.getLedgerOffloader().getOffloadDriverName()), + config, OffloadUtils.getOffloadDriverMetadata(oldInfo, config.getLedgerOffloader().getOffloadDriverMetadata()), - "Previous failed offload"); + "Previous failed offload", + name, + scheduledExecutor); } LedgerInfo.Builder builder = oldInfo.toBuilder(); builder.getOffloadContextBuilder() @@ -3230,28 +3248,6 @@ private CompletableFuture completeLedgerInfoForOffloaded(long ledgerId, UU }); } - private void cleanupOffloaded(long ledgerId, UUID uuid, String offloadDriverName, /* - * TODO: use driver name to - * identify offloader - */ - Map offloadDriverMetadata, String cleanupReason) { - log.info("[{}] Cleanup offload for ledgerId {} uuid {} because of the reason {}.", - name, ledgerId, uuid.toString(), cleanupReason); - Map metadataMap = new HashMap(); - metadataMap.putAll(offloadDriverMetadata); - metadataMap.put("ManagedLedgerName", name); - - Retries.run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1), TimeUnit.SECONDS.toHours(1)).limit(10), - Retries.NonFatalPredicate, - () -> config.getLedgerOffloader().deleteOffloaded(ledgerId, uuid, metadataMap), - scheduledExecutor, name).whenComplete((ignored, exception) -> { - if (exception != null) { - log.warn("[{}] Error cleaning up offload for {}, (cleanup reason: {})", - name, ledgerId, cleanupReason, exception); - } - }); - } - /** * Get the number of entries between a contiguous range of two positions. * @@ -3760,7 +3756,7 @@ public static ManagedLedgerException createManagedLedgerException(int bkErrorCod } else if (isBkErrorNotRecoverable(bkErrorCode)) { return new NonRecoverableLedgerException(BKException.getMessage(bkErrorCode)); } else { - return new ManagedLedgerException(BKException.getMessage(bkErrorCode)); + return new ManagedLedgerException(BKException.getMessage(bkErrorCode) + " error code: " + bkErrorCode); } } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java index 767a0c78b6d8a..3768c4dd61208 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java @@ -24,12 +24,18 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.LedgerMetadataBuilder; import org.apache.bookkeeper.client.api.DigestType; import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.common.util.Backoff; +import org.apache.bookkeeper.common.util.Retries; +import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.bookkeeper.mledger.proto.MLDataFormats.KeyValue; import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo; import org.apache.bookkeeper.mledger.proto.MLDataFormats.OffloadContext; @@ -181,4 +187,26 @@ public static LedgerMetadata parseLedgerMetadata(long id, byte[] bytes) throws I return builder.build(); } + + public static CompletableFuture cleanupOffloaded(long ledgerId, UUID uuid, ManagedLedgerConfig mlConfig, + Map offloadDriverMetadata, String cleanupReason, + String name, org.apache.bookkeeper.common.util.OrderedScheduler executor) { + log.info("[{}] Cleanup offload for ledgerId {} uuid {} because of the reason {}.", + name, ledgerId, uuid.toString(), cleanupReason); + Map metadataMap = new HashMap(); + metadataMap.putAll(offloadDriverMetadata); + metadataMap.put("ManagedLedgerName", name); + + return Retries.run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1), + TimeUnit.SECONDS.toHours(1)).limit(10), + Retries.NonFatalPredicate, + () -> mlConfig.getLedgerOffloader().deleteOffloaded(ledgerId, uuid, metadataMap), + executor, name).whenComplete((ignored, exception) -> { + if (exception != null) { + log.warn("[{}] Error cleaning up offload for {}, (cleanup reason: {})", + name, ledgerId, cleanupReason, exception); + } + }); + } + } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java index a07a84f70bdc2..4484327ad8dc7 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java @@ -2992,7 +2992,8 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { ledger.asyncCreateLedger(bk, config, null, (rc, lh, ctx) -> {}, Collections.emptyMap()); retryStrategically((test) -> responseException1.get() != null, 5, 1000); assertNotNull(responseException1.get()); - assertEquals(responseException1.get().getMessage(), BKException.getMessage(BKException.Code.TimeoutException)); + assertTrue(responseException1.get().getMessage() + .startsWith(BKException.getMessage(BKException.Code.TimeoutException))); // (2) test read-timeout for: ManagedLedger.asyncReadEntry(..) AtomicReference responseException2 = new AtomicReference<>(); @@ -3017,13 +3018,14 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { return responseException2.get() != null; }, 5, 1000); assertNotNull(responseException2.get()); - assertEquals(responseException2.get().getMessage(), BKException.getMessage(BKException.Code.TimeoutException)); + assertTrue(responseException2.get().getMessage() + .startsWith(BKException.getMessage(BKException.Code.TimeoutException))); ledger.close(); } /** - * It verifies that if bk-client doesn't complete the add-entry in given time out then broker is resilient enought + * It verifies that if bk-client doesn't complete the add-entry in given time out then broker is resilient enough * to create new ledger and add entry successfully. * * diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java index ae0e53456e2d7..f35e40ce0529e 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java @@ -34,10 +34,14 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; import java.util.stream.Collectors; + +import com.google.common.collect.Sets; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.api.ReadHandle; +import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.OffloadCallback; import org.apache.bookkeeper.mledger.LedgerOffloader; import org.apache.bookkeeper.mledger.ManagedCursor; @@ -679,6 +683,63 @@ public void testOffloadDelete() throws Exception { assertEventuallyTrue(() -> offloader.deletedOffloads().contains(firstLedger)); } + @Test + public void testOffloadDeleteClosedLedger() throws Exception { + MockLedgerOffloader offloader = new MockLedgerOffloader(); + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setMaxEntriesPerLedger(10); + config.setMinimumRolloverTime(0, TimeUnit.SECONDS); + config.setRetentionTime(0, TimeUnit.MINUTES); + offloader.getOffloadPolicies().setManagedLedgerOffloadDeletionLagInMillis(100L); + offloader.getOffloadPolicies().setManagedLedgerOffloadThresholdInBytes(100L); + config.setLedgerOffloader(offloader); + ManagedLedgerImpl ledger = (ManagedLedgerImpl)factory.open("my_test_ledger", config); + ManagedCursor cursor = ledger.openCursor("foobar"); + + for (int i = 0; i < 15; i++) { + String content = "entry-" + i; + ledger.addEntry(content.getBytes()); + } + + assertEquals(ledger.getLedgersInfoAsList().size(), 2); + ledger.offloadPrefix(ledger.getLastConfirmedEntry()); + assertEquals(ledger.getLedgersInfoAsList().size(), 2); + + assertEquals(ledger.getLedgersInfoAsList().stream() + .filter(e -> e.getOffloadContext().getComplete()).count(), 1); + assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); + + Set offloadedledgers = Sets.newHashSet(offloader.offloadedLedgers()); + assertTrue(offloadedledgers.size() > 0); + + Set bkLedgersInMLedger = Sets.newHashSet(ledger.getLedgersInfo().keySet()); + assertTrue(bkLedgersInMLedger.size() > 0); + + factory.close(ledger); + ledger.close(); + + AtomicInteger success = new AtomicInteger(0); + factory.asyncDelete("my_test_ledger", CompletableFuture.completedFuture(config), + new AsyncCallbacks.DeleteLedgerCallback() { + @Override + public void deleteLedgerComplete(Object ctx) { + success.set(1); + } + + @Override + public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { + success.set(-1); + } + }, null); + assertEventuallyTrue(() -> success.get() == 1); + Set deletedledgers = offloader.deletedOffloads(); + assertEquals(offloadedledgers, deletedledgers); + + for (long ledgerId: bkLedgersInMLedger) { + assertFalse(bkc.getLedgers().contains(ledgerId)); + } + } + @Test public void testOffloadDeleteIncomplete() throws Exception { Set> deleted = ConcurrentHashMap.newKeySet(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java index 8491615448aae..273af9460d98e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java @@ -1039,7 +1039,9 @@ public CompletableFuture> getTopic(final TopicName topicName, bo } public CompletableFuture deleteTopic(String topic, boolean forceDelete) { + TopicName topicName = TopicName.get(topic); Optional optTopic = getTopicReference(topic); + if (optTopic.isPresent()) { Topic t = optTopic.get(); if (forceDelete) { @@ -1066,9 +1068,8 @@ public CompletableFuture deleteTopic(String topic, boolean forceDelete) { return t.delete(); } - if (log.isDebugEnabled()) { - log.debug("Topic {} is not loaded, try to delete from metadata", topic); - } + log.info("Topic {} is not loaded, try to delete from metadata", topic); + // Topic is not loaded, though we still might be able to delete from metadata TopicName tn = TopicName.get(topic); if (!tn.isPersistent()) { @@ -1077,28 +1078,29 @@ public CompletableFuture deleteTopic(String topic, boolean forceDelete) { } CompletableFuture future = new CompletableFuture<>(); - CompletableFuture deleteTopicAuthenticationFuture = new CompletableFuture<>(); deleteTopicAuthenticationWithRetry(topic, deleteTopicAuthenticationFuture, 5); + deleteTopicAuthenticationFuture.whenComplete((v, ex) -> { if (ex != null) { future.completeExceptionally(ex); return; } - managedLedgerFactory.asyncDelete(tn.getPersistenceNamingEncoding(), new DeleteLedgerCallback() { - @Override - public void deleteLedgerComplete(Object ctx) { - future.complete(null); - } + CompletableFuture mlConfigFuture = getManagedLedgerConfig(topicName); + managedLedgerFactory.asyncDelete(tn.getPersistenceNamingEncoding(), + mlConfigFuture, new DeleteLedgerCallback() { + @Override + public void deleteLedgerComplete(Object ctx) { + future.complete(null); + } - @Override - public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { - future.completeExceptionally(exception); - } - }, null); + @Override + public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { + future.completeExceptionally(exception); + } + }, null); }); - return future; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java index fdcaaf2ffbdb3..68d348e50eca9 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java @@ -1031,10 +1031,12 @@ public CompletableFuture createSubscription(String subscriptionNam public CompletableFuture unsubscribe(String subscriptionName) { CompletableFuture unsubscribeFuture = new CompletableFuture<>(); + TopicName tn = TopicName.get(MLPendingAckStore + .getTransactionPendingAckStoreSuffix(topic, + Codec.encode(subscriptionName))); if (brokerService.pulsar().getConfiguration().isTransactionCoordinatorEnabled()) { - getBrokerService().getManagedLedgerFactory().asyncDelete(TopicName.get(MLPendingAckStore - .getTransactionPendingAckStoreSuffix(topic, - Codec.encode(subscriptionName))).getPersistenceNamingEncoding(), + getBrokerService().getManagedLedgerFactory().asyncDelete(tn.getPersistenceNamingEncoding(), + getBrokerService().getManagedLedgerConfig(tn), new AsyncCallbacks.DeleteLedgerCallback() { @Override public void deleteLedgerComplete(Object ctx) { @@ -1191,53 +1193,69 @@ private CompletableFuture delete(boolean failIfHasSubscriptions, .thenCompose(__ -> deleteTopicPolicies()) .thenCompose(__ -> transactionBufferCleanupAndClose()) .whenComplete((v, ex) -> { - if (ex != null) { - log.error("[{}] Error deleting topic", topic, ex); - unfenceTopicToResume(); - deleteFuture.completeExceptionally(ex); - } else { - List> subsDeleteFutures = new ArrayList<>(); - subscriptions.forEach((sub, p) -> subsDeleteFutures.add(unsubscribe(sub))); - - FutureUtil.waitForAll(subsDeleteFutures).whenComplete((f, e) -> { - if (e != null) { - log.error("[{}] Error deleting topic", topic, e); + if (ex != null) { + log.error("[{}] Error deleting topic", topic, ex); unfenceTopicToResume(); - deleteFuture.completeExceptionally(e); + deleteFuture.completeExceptionally(ex); } else { - ledger.asyncDelete(new AsyncCallbacks.DeleteLedgerCallback() { - @Override - public void deleteLedgerComplete(Object ctx) { - brokerService.removeTopicFromCache(PersistentTopic.this); - - dispatchRateLimiter.ifPresent(DispatchRateLimiter::close); - - subscribeRateLimiter.ifPresent(SubscribeRateLimiter::close); - - unregisterTopicPolicyListener(); - - log.info("[{}] Topic deleted", topic); - deleteFuture.complete(null); - } - - @Override - public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { - if (exception.getCause() - instanceof MetadataStoreException.NotFoundException) { - log.info("[{}] Topic is already deleted {}", - topic, exception.getMessage()); - deleteLedgerComplete(ctx); - } else { - unfenceTopicToResume(); - log.error("[{}] Error deleting topic", topic, exception); - deleteFuture.completeExceptionally(new PersistenceException(exception)); - } + List> subsDeleteFutures = new ArrayList<>(); + subscriptions.forEach((sub, p) -> subsDeleteFutures.add(unsubscribe(sub))); + + FutureUtil.waitForAll(subsDeleteFutures).whenComplete((f, e) -> { + if (e != null) { + log.error("[{}] Error deleting topic", topic, e); + unfenceTopicToResume(); + deleteFuture.completeExceptionally(e); + } else { + // Truncate to ensure the offloaded data is not orphaned. + // Also ensures the BK ledgers are deleted and not just + // scheduled for deletion + CompletableFuture truncateFuture = ledger.asyncTruncate(); + truncateFuture.whenComplete((ignore, exc) -> { + if (e != null) { + log.error("[{}] Error truncating topic", topic, e); + unfenceTopicToResume(); + deleteFuture.completeExceptionally(e); + } else { + ledger.asyncDelete(new AsyncCallbacks.DeleteLedgerCallback() { + @Override + public void deleteLedgerComplete(Object ctx) { + brokerService.removeTopicFromCache(PersistentTopic.this); + + dispatchRateLimiter.ifPresent(DispatchRateLimiter::close); + + subscribeRateLimiter.ifPresent(SubscribeRateLimiter::close); + + unregisterTopicPolicyListener(); + + log.info("[{}] Topic deleted", topic); + deleteFuture.complete(null); + } + + @Override + public void + deleteLedgerFailed(ManagedLedgerException exception, + Object ctx) { + if (exception.getCause() + instanceof MetadataStoreException.NotFoundException) { + log.info("[{}] Topic is already deleted {}", + topic, exception.getMessage()); + deleteLedgerComplete(ctx); + } else { + unfenceTopicToResume(); + log.error("[{}] Error deleting topic", + topic, exception); + deleteFuture.completeExceptionally( + new PersistenceException(exception)); + } + } + }, null); + } + }); } - }, null); + }); } }); - } - }); } else { unfenceTopicToResume(); deleteFuture.completeExceptionally(new TopicBusyException( diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java index 970bfd763a4e5..dd6d00f4cda27 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java @@ -1256,6 +1256,8 @@ public void testCloseTopic() throws Exception { @Test public void testDeleteTopic() throws Exception { + doReturn(CompletableFuture.completedFuture(null)).when(ledgerMock).asyncTruncate(); + // create topic PersistentTopic topic = (PersistentTopic) brokerService.getOrCreateTopic(successTopicName).get(); diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java index 1d7fb21062136..9e6b5261df16b 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java @@ -20,6 +20,8 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; @@ -32,14 +34,17 @@ import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.tests.integration.suites.PulsarTieredStorageTestSuite; +import org.awaitility.Awaitility; import org.testng.Assert; @Slf4j public abstract class TestBaseOffload extends PulsarTieredStorageTestSuite { - private static final int ENTRY_SIZE = 1024; + protected int getEntrySize() { + return 1024; + }; - private static byte[] buildEntry(String pattern) { - byte[] entry = new byte[ENTRY_SIZE]; + private byte[] buildEntry(String pattern) { + byte[] entry = new byte[getEntrySize()]; byte[] patternBytes = pattern.getBytes(); for (int i = 0; i < entry.length; i++) { @@ -64,15 +69,24 @@ protected void testPublishOffloadAndConsumeViaCLI(String serviceUrl, String admi long firstLedger = -1; try(PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build(); Producer producer = client.newProducer().topic(topic) + .maxPendingMessages(getNumEntriesPerLedger() / 2).sendTimeout(60, TimeUnit.SECONDS) .blockIfQueueFull(true).enableBatching(false).create();) { client.newConsumer().topic(topic).subscriptionName("my-sub").subscribe().close(); // write enough to topic to make it roll int i = 0; - for (; i < ENTRIES_PER_LEDGER * 1.5; i++) { - producer.sendAsync(buildEntry("offload-message" + i)); + AtomicBoolean success = new AtomicBoolean(true); + + for (; i < getNumEntriesPerLedger() * 1.5; i++) { + producer.sendAsync(buildEntry("offload-message" + i)) + .exceptionally(e -> { + log.error("failed to send a message", e); + success.set(false); + return null; + });; } producer.flush(); + Assert.assertTrue(success.get()); } try (PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(adminUrl).build()) { @@ -113,7 +127,7 @@ protected void testPublishOffloadAndConsumeViaCLI(String serviceUrl, String admi try(PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build(); Consumer consumer = client.newConsumer().topic(topic).subscriptionName("my-sub").subscribe()) { // read back from topic - for (int i = 0; i < ENTRIES_PER_LEDGER * 1.5; i++) { + for (int i = 0; i < getNumEntriesPerLedger() * 1.5; i++) { Message m = consumer.receive(1, TimeUnit.MINUTES); Assert.assertEquals(buildEntry("offload-message" + i), m.getData()); } @@ -138,25 +152,32 @@ protected void testPublishOffloadAndConsumeViaThreshold(String serviceUrl, Strin long firstLedger = 0; try(PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build(); Producer producer = client.newProducer().topic(topic) - .blockIfQueueFull(true).enableBatching(false).create(); - ) { + .maxPendingMessages(getNumEntriesPerLedger() / 2).sendTimeout(60, TimeUnit.SECONDS) + .blockIfQueueFull(true).enableBatching(false).create()) { client.newConsumer().topic(topic).subscriptionName("my-sub").subscribe().close(); + AtomicBoolean success = new AtomicBoolean(true); // write enough to topic to make it roll twice - for (int i = 0; i < ENTRIES_PER_LEDGER * 2.5; i++) { - producer.sendAsync(buildEntry("offload-message" + i)); + for (int i = 0; i < getNumEntriesPerLedger() * 2.5; i++) { + producer.sendAsync(buildEntry("offload-message" + i)) + .exceptionally(e -> { + log.error("failed to send a message", e); + success.set(false); + return null; + });; } producer.flush(); + Assert.assertTrue(success.get()); } try (PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(adminUrl).build()) { firstLedger = admin.topics().getInternalStats(topic).ledgers.get(0).ledgerId; // wait up to 30 seconds for offload to occur - for (int i = 0; i < 300 && !admin.topics().getInternalStats(topic).ledgers.get(0).offloaded; i++) { - Thread.sleep(100); + for (int i = 0; i < 100 && !admin.topics().getInternalStats(topic).ledgers.get(0).offloaded; i++) { + Thread.sleep(300); } Assert.assertTrue(admin.topics().getInternalStats(topic).ledgers.get(0).offloaded); @@ -175,8 +196,9 @@ protected void testPublishOffloadAndConsumeViaThreshold(String serviceUrl, Strin try (PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build(); Consumer consumer = client.newConsumer().topic(topic).subscriptionName("my-sub").subscribe()) { // read back from topic - for (int i = 0; i < ENTRIES_PER_LEDGER * 2.5; i++) { + for (int i = 0; i < getNumEntriesPerLedger() * 2.5; i++) { Message m = consumer.receive(1, TimeUnit.MINUTES); + Assert.assertNotNull(m); Assert.assertEquals(buildEntry("offload-message" + i), m.getData()); } } @@ -197,30 +219,52 @@ private boolean ledgerOffloaded(List le .map(l -> l.offloaded).findFirst().get(); } - private long writeAndWaitForOffload(String serviceUrl, String adminUrl, String topic) throws Exception { + private long writeAndWaitForOffload(String serviceUrl, String adminUrl, String topic) + throws Exception { + return writeAndWaitForOffload(serviceUrl, adminUrl, topic, -1); + } + + private long writeAndWaitForOffload(String serviceUrl, String adminUrl, String topic, int partitionNum) + throws Exception { try(PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build(); Producer producer = client.newProducer().topic(topic) + .maxPendingMessages(getNumEntriesPerLedger() / 2).sendTimeout(60, TimeUnit.SECONDS) .blockIfQueueFull(true).enableBatching(false).create(); PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(adminUrl).build()) { - List ledgers = admin.topics().getInternalStats(topic).ledgers; + String topicToCheck = partitionNum >= 0 + ? topic + "-partition-" + partitionNum + : topic; + + List ledgers = admin.topics() + .getInternalStats(topicToCheck).ledgers; long currentLedger = ledgers.get(ledgers.size() - 1).ledgerId; client.newConsumer().topic(topic).subscriptionName("my-sub").subscribe().close(); + AtomicBoolean success = new AtomicBoolean(true); // write enough to topic to make it roll twice - for (int i = 0; i < ENTRIES_PER_LEDGER * 2.5; i++) { - producer.sendAsync(buildEntry("offload-message" + i)); + for (int i = 0; + i < getNumEntriesPerLedger() * 2.5 * (partitionNum > 0 ? partitionNum + 1 : 1); + i++) { + producer.sendAsync(buildEntry("offload-message" + i)) + .exceptionally(e -> { + log.error("failed to send a message", e); + success.set(false); + return null; + }); } + producer.flush(); producer.send(buildEntry("final-offload-message")); + Assert.assertTrue(success.get()); // wait up to 30 seconds for offload to occur for (int i = 0; - i < 300 && !ledgerOffloaded(admin.topics().getInternalStats(topic).ledgers, currentLedger); + i < 100 && !ledgerOffloaded(admin.topics().getInternalStats(topicToCheck).ledgers, currentLedger); i++) { - Thread.sleep(100); + Thread.sleep(300); } - Assert.assertTrue(ledgerOffloaded(admin.topics().getInternalStats(topic).ledgers, currentLedger)); + Assert.assertTrue(ledgerOffloaded(admin.topics().getInternalStats(topicToCheck).ledgers, currentLedger)); return currentLedger; } @@ -295,4 +339,130 @@ protected void testPublishOffloadAndConsumeDeletionLag(String serviceUrl, String Thread.sleep(5000); Assert.assertTrue(ledgerExistsInBookKeeper(offloadedLedger)); } + + protected void testDeleteOffloadedTopic(String serviceUrl, String adminUrl, + boolean unloadBeforeDelete, int numPartitions) throws Exception { + final String tenant = "offload-test-cli-" + randomName(4); + final String namespace = tenant + "/ns1"; + final String topic = "persistent://" + namespace + "/topic1"; + + pulsarCluster.runAdminCommandOnAnyBroker("tenants", + "create", "--allowed-clusters", pulsarCluster.getClusterName(), + "--admin-roles", "offload-admin", tenant); + + pulsarCluster.runAdminCommandOnAnyBroker("namespaces", + "create", "--clusters", pulsarCluster.getClusterName(), namespace); + + // set threshold to offload runs immediately after role + pulsarCluster.runAdminCommandOnAnyBroker("namespaces", + "set-offload-threshold", "--size", "0", namespace); + + pulsarCluster.runAdminCommandOnAnyBroker("namespaces", + "set-retention", "--size", "100M", "--time", "100m", namespace); + + String output = pulsarCluster.runAdminCommandOnAnyBroker( + "namespaces", "get-offload-deletion-lag", namespace).getStdout(); + Assert.assertTrue(output.contains("Unset for namespace")); + + if (numPartitions > 0) { + pulsarCluster.runAdminCommandOnAnyBroker("topics", + "create-partitioned-topic", topic, + "--partitions", Integer.toString(numPartitions)); + } else { + pulsarCluster.runAdminCommandOnAnyBroker("topics", "create", topic); + } + + long offloadedLedger = writeAndWaitForOffload(serviceUrl, adminUrl, topic, numPartitions - 1); + // give it up to 5 seconds to delete, it shouldn't + // so we wait this every time + Thread.sleep(5000); + Assert.assertTrue(ledgerExistsInBookKeeper(offloadedLedger)); + + pulsarCluster.runAdminCommandOnAnyBroker("namespaces", "set-offload-deletion-lag", namespace, + "--lag", "0m"); + output = pulsarCluster.runAdminCommandOnAnyBroker( + "namespaces", "get-offload-deletion-lag", namespace).getStdout(); + Assert.assertTrue(output.contains("0 minute(s)")); + + offloadedLedger = writeAndWaitForOffload(serviceUrl, adminUrl, topic, numPartitions - 1); + // wait up to 10 seconds for ledger to be deleted + for (int i = 0; i < 10 && ledgerExistsInBookKeeper(offloadedLedger); i++) { + writeAndWaitForOffload(serviceUrl, adminUrl, topic, numPartitions - 1); + Thread.sleep(1000); + } + + Assert.assertFalse(ledgerExistsInBookKeeper(offloadedLedger)); + Assert.assertTrue(offloadedLedgerExists(topic, numPartitions - 1, offloadedLedger)); + + if (unloadBeforeDelete) { + pulsarCluster.runAdminCommandOnAnyBroker("topics", "unload", topic); + } + if (numPartitions > 0) { + pulsarCluster.runAdminCommandOnAnyBroker("topics", "delete-partitioned-topic", topic); + } else { + pulsarCluster.runAdminCommandOnAnyBroker("topics", "delete", topic); + } + final long ledgerId = offloadedLedger; + Awaitility.await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> { + Assert.assertFalse(offloadedLedgerExists(topic, numPartitions - 1, ledgerId)); + }); + } + + protected void testDeleteOffloadedTopicExistsInBk(String serviceUrl, String adminUrl, + boolean unloadBeforeDelete, int numPartitions) throws Exception { + final String tenant = "offload-test-cli-" + randomName(4); + final String namespace = tenant + "/ns1"; + final String topic = "persistent://" + namespace + "/topic1"; + + pulsarCluster.runAdminCommandOnAnyBroker("tenants", + "create", "--allowed-clusters", pulsarCluster.getClusterName(), + "--admin-roles", "offload-admin", tenant); + + pulsarCluster.runAdminCommandOnAnyBroker("namespaces", + "create", "--clusters", pulsarCluster.getClusterName(), namespace); + + // set threshold to offload runs immediately after role + pulsarCluster.runAdminCommandOnAnyBroker("namespaces", + "set-offload-threshold", "--size", "0", namespace); + pulsarCluster.runAdminCommandOnAnyBroker("namespaces", + "set-retention", "--size", "100M", "--time", "100m", namespace); + + if (numPartitions > 0) { + pulsarCluster.runAdminCommandOnAnyBroker("topics", + "create-partitioned-topic", topic, + "--partitions", Integer.toString(numPartitions)); + } else { + pulsarCluster.runAdminCommandOnAnyBroker("topics", "create", topic); + } + + String output = pulsarCluster.runAdminCommandOnAnyBroker( + "namespaces", "get-offload-deletion-lag", namespace).getStdout(); + Assert.assertTrue(output.contains("Unset for namespace")); + + long offloadedLedger = writeAndWaitForOffload(serviceUrl, adminUrl, topic, numPartitions - 1); + // give it up to 5 seconds to delete, it shouldn't + // so we wait this every time + Thread.sleep(5000); + Assert.assertTrue(ledgerExistsInBookKeeper(offloadedLedger)); + + Assert.assertTrue(offloadedLedgerExists(topic, numPartitions - 1, offloadedLedger)); + + if (unloadBeforeDelete) { + pulsarCluster.runAdminCommandOnAnyBroker("topics", "unload", topic); + } + if (numPartitions > 0) { + pulsarCluster.runAdminCommandOnAnyBroker("topics", "delete-partitioned-topic", topic); + } else { + pulsarCluster.runAdminCommandOnAnyBroker("topics", "delete", topic); + } + final long ledgerId = offloadedLedger; + Awaitility.await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> { + Assert.assertFalse(offloadedLedgerExists(topic, numPartitions - 1, ledgerId)); + }); + Assert.assertFalse(ledgerExistsInBookKeeper(offloadedLedger)); + } + + protected boolean offloadedLedgerExists(String topic, int partitionNum, long firstLedger) { + throw new RuntimeException("not implemented"); + } } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestFileSystemOffload.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestFileSystemOffload.java index 808aae62e7419..48b86e8a1f45d 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestFileSystemOffload.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestFileSystemOffload.java @@ -41,18 +41,17 @@ public void testPublishOffloadAndConsumeViaThreshold(Supplier serviceUrl @Test(dataProvider = "ServiceAndAdminUrls") public void testPublishOffloadAndConsumeDeletionLag(Supplier serviceUrl, Supplier adminUrl) throws Exception { super.testPublishOffloadAndConsumeDeletionLag(serviceUrl.get(), adminUrl.get()); - } - @Override protected Map getEnv() { Map result = new HashMap<>(); - result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(ENTRIES_PER_LEDGER)); + result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(getNumEntriesPerLedger())); result.put("managedLedgerMinLedgerRolloverTimeMinutes", "0"); result.put("managedLedgerOffloadDriver", "filesystem"); result.put("fileSystemURI", "file:///"); return result; } + } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestOffloadDeletionFS.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestOffloadDeletionFS.java new file mode 100644 index 0000000000000..4b1739a0cd13b --- /dev/null +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestOffloadDeletionFS.java @@ -0,0 +1,144 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.tests.integration.offload; + +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.tests.integration.docker.ContainerExecException; +import org.apache.pulsar.tests.integration.docker.ContainerExecResult; +import org.testng.annotations.Test; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +@Slf4j +public class TestOffloadDeletionFS extends TestBaseOffload { + + @Override + protected int getEntrySize() { + return 512; + } + + @Override + protected int getNumEntriesPerLedger() { + return 200; + } + + @Test(dataProvider = "ServiceAndAdminUrls") + public void testDeleteOffloadedTopic(Supplier serviceUrl, Supplier adminUrl) throws Exception { + super.testDeleteOffloadedTopic(serviceUrl.get(), adminUrl.get(), false, 0); + } + + @Test(dataProvider = "ServiceAndAdminUrls") + public void testDeleteUnloadedOffloadedTopic(Supplier serviceUrl, Supplier adminUrl) + throws Exception { + super.testDeleteOffloadedTopic(serviceUrl.get(), adminUrl.get(), true, 0); + } + + @Test(dataProvider = "ServiceAndAdminUrls") + public void testDeleteOffloadedTopicExistsInBk(Supplier serviceUrl, Supplier adminUrl) + throws Exception { + super.testDeleteOffloadedTopicExistsInBk(serviceUrl.get(), adminUrl.get(), false, 0); + } + + @Test(dataProvider = "ServiceAndAdminUrls") + public void testDeleteUnloadedOffloadedTopicExistsInBk(Supplier serviceUrl, Supplier adminUrl) + throws Exception { + super.testDeleteOffloadedTopicExistsInBk(serviceUrl.get(), adminUrl.get(), true, 0); + } + + @Test(dataProvider = "ServiceAndAdminUrls") + public void testDeleteOffloadedPartitionedTopic(Supplier serviceUrl, Supplier adminUrl) throws Exception { + super.testDeleteOffloadedTopic(serviceUrl.get(), adminUrl.get(), false, 3); + } + + @Test(dataProvider = "ServiceAndAdminUrls") + public void testDeleteUnloadedOffloadedPartitionedTopic(Supplier serviceUrl, Supplier adminUrl) + throws Exception { + super.testDeleteOffloadedTopic(serviceUrl.get(), adminUrl.get(), true, 3); + } + + @Test(dataProvider = "ServiceAndAdminUrls") + public void testDeleteOffloadedPartitionedTopicExistsInBk(Supplier serviceUrl, Supplier adminUrl) + throws Exception { + super.testDeleteOffloadedTopicExistsInBk(serviceUrl.get(), adminUrl.get(), false, 3); + } + + @Test(dataProvider = "ServiceAndAdminUrls") + public void testDeleteUnloadedOffloadedPartitionedTopicExistsInBk(Supplier serviceUrl, + Supplier adminUrl) throws Exception { + super.testDeleteOffloadedTopicExistsInBk(serviceUrl.get(), adminUrl.get(), true, 3); + } + + @Override + protected Map getEnv() { + Map result = new HashMap<>(); + result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(getNumEntriesPerLedger())); + result.put("managedLedgerMinLedgerRolloverTimeMinutes", "0"); + result.put("managedLedgerOffloadDriver", "filesystem"); + result.put("fileSystemURI", "file:///"); + + return result; + } + + @Override + protected boolean offloadedLedgerExists(String topic, int partitionNum, long ledger) { + log.info("offloadedLedgerExists(topic = {}, partitionNum={},ledger={})", + topic, partitionNum, ledger); + if (partitionNum > -1) { + topic = topic + "-partition-" + partitionNum; + } + String managedLedgerName = TopicName.get(topic).getPersistenceNamingEncoding(); + String rootPath = "pulsar/"; + String dirPath = rootPath + managedLedgerName + "/"; + + List result = new LinkedList<>(); + String[] cmds = { + "ls", + "-1", + dirPath + }; + pulsarCluster.getBrokers().forEach(broker -> { + try { + ContainerExecResult res = broker.execCmd(cmds); + log.info("offloadedLedgerExists broker {} 'ls -1 {}' got {}", + broker.getContainerName(), dirPath, res.getStdout()); + Arrays.stream(res.getStdout().split("\n")) + .filter(x -> x.startsWith(ledger + "-")) + .forEach(x -> result.add(x)); + } catch (ContainerExecException ce) { + log.info("offloadedLedgerExists broker {} 'ls -1 {}' got error code {}", + broker.getContainerName(), dirPath, ce.getResult().getExitCode()); + // ignore 2 (No such file or directory) + if (ce.getResult().getExitCode() != 2) { + throw new RuntimeException(ce); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + return !result.isEmpty(); + } + +} diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestS3Offload.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestS3Offload.java index edbbcfeba5e10..a230b13e215f5 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestS3Offload.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestS3Offload.java @@ -73,7 +73,7 @@ public void testPublishOffloadAndConsumeDeletionLag(Supplier serviceUrl, @Override protected Map getEnv() { Map result = new HashMap<>(); - result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(ENTRIES_PER_LEDGER)); + result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(getNumEntriesPerLedger())); result.put("managedLedgerMinLedgerRolloverTimeMinutes", "0"); result.put("managedLedgerOffloadDriver", "aws-s3"); result.put("s3ManagedLedgerOffloadBucket", "pulsar-integtest"); diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestUniversalConfigurations.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestUniversalConfigurations.java index 9c53d801ea1eb..ef7406113f6ee 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestUniversalConfigurations.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestUniversalConfigurations.java @@ -72,7 +72,7 @@ public void testPublishOffloadAndConsumeDeletionLag(Supplier serviceUrl, @Override protected Map getEnv() { Map result = new HashMap<>(); - result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(ENTRIES_PER_LEDGER)); + result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(getNumEntriesPerLedger())); result.put("managedLedgerMinLedgerRolloverTimeMinutes", "0"); result.put("managedLedgerOffloadDriver", "aws-s3"); result.put("managedLedgerOffloadBucket", "pulsar-integtest"); diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarTieredStorageTestSuite.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarTieredStorageTestSuite.java index 7811b38e0fd92..1c6bb9dc3f34c 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarTieredStorageTestSuite.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarTieredStorageTestSuite.java @@ -31,7 +31,9 @@ @Slf4j public abstract class PulsarTieredStorageTestSuite extends PulsarClusterTestBase { - protected static final int ENTRIES_PER_LEDGER = 1024; + protected int getNumEntriesPerLedger() { + return 1024; + } @BeforeClass(alwaysRun = true) @Override From df5e0e1869ff7ce55489e4a7853172fa37b2b59d Mon Sep 17 00:00:00 2001 From: Andras Beni Date: Thu, 29 Sep 2022 11:07:22 +0200 Subject: [PATCH 31/59] [fix][cli] Check numMessages after incrementing counter (#17826) --- .../pulsar/testclient/PerformanceReader.java | 11 +++--- .../tests/integration/cli/PerfToolTest.java | 34 ++++++++++++++----- 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceReader.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceReader.java index 5245f634d1bdb..be42bbf8a0594 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceReader.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceReader.java @@ -154,17 +154,18 @@ public static void main(String[] args) throws Exception { PerfClientUtils.exit(0); } } - if (arguments.numMessages > 0 && totalMessagesReceived.sum() >= arguments.numMessages) { - log.info("------------- DONE (reached the maximum number: [{}] of consumption) --------------", - arguments.numMessages); - PerfClientUtils.exit(0); - } messagesReceived.increment(); bytesReceived.add(msg.getData().length); totalMessagesReceived.increment(); totalBytesReceived.add(msg.getData().length); + if (arguments.numMessages > 0 && totalMessagesReceived.sum() >= arguments.numMessages) { + log.info("------------- DONE (reached the maximum number: [{}] of consumption) --------------", + arguments.numMessages); + PerfClientUtils.exit(0); + } + if (limiter != null) { limiter.acquire(); } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/PerfToolTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/PerfToolTest.java index 55af57d3b5224..f87d11531dc3a 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/PerfToolTest.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/PerfToolTest.java @@ -28,26 +28,24 @@ import org.testng.annotations.Test; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import java.util.stream.IntStream; public class PerfToolTest extends TopicMessagingBase { private static final int MESSAGE_COUNT = 50; @Test - private void testProduce() throws Exception { + public void testProduce() throws Exception { String serviceUrl = "pulsar://" + pulsarCluster.getProxy().getContainerName() + ":" + PulsarContainer.BROKER_PORT; final String topicName = getNonPartitionedTopic("testProduce", true); // Using the ZK container as it is separate from brokers, so its environment resembles real world usage more ZKContainer clientToolContainer = pulsarCluster.getZooKeeper(); - ContainerExecResult produceResult = produceWithPerfTool(clientToolContainer, serviceUrl, topicName); + ContainerExecResult produceResult = produceWithPerfTool(clientToolContainer, serviceUrl, topicName, MESSAGE_COUNT); checkOutputForLogs(produceResult,"PerformanceProducer - Aggregated throughput stats", "PerformanceProducer - Aggregated latency stats"); } @Test - private void testConsume() throws Exception { + public void testConsume() throws Exception { String serviceUrl = "pulsar://" + pulsarCluster.getProxy().getContainerName() + ":" + PulsarContainer.BROKER_PORT; final String topicName = getNonPartitionedTopic("testConsume", true); // Using the ZK container as it is separate from brokers, so its environment resembles real world usage more @@ -57,8 +55,19 @@ private void testConsume() throws Exception { "PerformanceConsumer - Aggregated latency stats"); } - private ContainerExecResult produceWithPerfTool(ChaosContainer container, String url, String topic) throws Exception { - ContainerExecResult result = container.execCmd("bin/pulsar-perf", "produce", "-u", url, "-m", String.valueOf(MESSAGE_COUNT), topic); + @Test + public void testRead() throws Exception { + String serviceUrl = "pulsar://" + pulsarCluster.getProxy().getContainerName() + ":" + PulsarContainer.BROKER_PORT; + final String topicName = getNonPartitionedTopic("testRead", true); + // Using the ZK container as it is separate from brokers, so its environment resembles real world usage more + ZKContainer clientToolContainer = pulsarCluster.getZooKeeper(); + ContainerExecResult readResult = readWithPerfTool(clientToolContainer, serviceUrl, topicName); + checkOutputForLogs(readResult,"PerformanceReader - Aggregated throughput stats ", + "PerformanceReader - Aggregated latency stats"); + } + + private ContainerExecResult produceWithPerfTool(ChaosContainer container, String url, String topic, int messageCount) throws Exception { + ContainerExecResult result = container.execCmd("bin/pulsar-perf", "produce", "-u", url, "-m", String.valueOf(messageCount), topic); return failOnError("Performance producer", result); } @@ -66,7 +75,16 @@ private ContainerExecResult produceWithPerfTool(ChaosContainer container, Str private ContainerExecResult consumeWithPerfTool(ChaosContainer container, String url, String topic) throws Exception { CompletableFuture resultFuture = container.execCmdAsync("bin/pulsar-perf", "consume", "-u", url, "-m", String.valueOf(MESSAGE_COUNT), topic); - produceWithPerfTool(container, url, topic); + produceWithPerfTool(container, url, topic, MESSAGE_COUNT); + + ContainerExecResult result = resultFuture.get(5, TimeUnit.SECONDS); + return failOnError("Performance consumer", result); + } + + private ContainerExecResult readWithPerfTool(ChaosContainer container, String url, String topic) throws Exception { + CompletableFuture resultFuture = + container.execCmdAsync("bin/pulsar-perf", "read", "-u", url, "-n", String.valueOf(MESSAGE_COUNT), topic); + produceWithPerfTool(container, url, topic, MESSAGE_COUNT); ContainerExecResult result = resultFuture.get(5, TimeUnit.SECONDS); return failOnError("Performance consumer", result); From c952f3c9f891f85ff4b6cee6e28b6f68db3b5bcd Mon Sep 17 00:00:00 2001 From: Zixuan Liu Date: Thu, 29 Sep 2022 18:48:38 +0800 Subject: [PATCH 32/59] [fix][proxy] Fix refresh client auth (#17831) * [fix][proxy] Fix refresh client auth Signed-off-by: Zixuan Liu * Fix style Signed-off-by: Zixuan Liu --- .../apache/pulsar/client/impl/ClientCnx.java | 17 +- .../pulsar/client/impl/ConnectionPool.java | 9 +- pulsar-proxy/pom.xml | 5 + .../pulsar/proxy/server/ProxyClientCnx.java | 77 ++++++-- .../pulsar/proxy/server/ProxyConnection.java | 73 +++++-- .../proxy/server/ProxyRefreshAuthTest.java | 186 ++++++++++++++++++ 6 files changed, 338 insertions(+), 29 deletions(-) create mode 100644 pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java index eb39fe53f1ab6..a6b9005611c0f 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java @@ -104,7 +104,7 @@ public class ClientCnx extends PulsarHandler { protected final Authentication authentication; - private State state; + protected State state; @Getter private final ConcurrentLongHashMap> pendingRequests = @@ -155,7 +155,7 @@ public class ClientCnx extends PulsarHandler { private final int maxNumberOfRejectedRequestPerConnection; private final int rejectedRequestResetTimeSec = 60; - private final int protocolVersion; + protected final int protocolVersion; private final long operationTimeoutMs; protected String proxyToTargetBrokerAddress = null; @@ -176,7 +176,10 @@ public class ClientCnx extends PulsarHandler { @Getter private final ClientCnxIdleState idleState; - enum State { + @Getter + private long lastDisconnectedTimestamp; + + protected enum State { None, SentConnectFrame, Ready, Failed, Connecting } @@ -281,6 +284,7 @@ protected ByteBuf newConnectCommand() throws Exception { @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { super.channelInactive(ctx); + lastDisconnectedTimestamp = System.currentTimeMillis(); log.info("{} Disconnected", ctx.channel()); if (!connectionFuture.isDone()) { connectionFuture.completeExceptionally(new PulsarClientException("Connection already closed")); @@ -1243,6 +1247,13 @@ public void close() { } } + protected void closeWithException(Throwable e) { + if (ctx != null) { + connectionFuture.completeExceptionally(e); + ctx.close(); + } + } + private void checkRequestTimeout() { while (!requestTimeoutQueue.isEmpty()) { RequestTime request = requestTimeoutQueue.peek(); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java index d85d853c4aa5c..36f28fb575606 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java @@ -35,16 +35,19 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Random; +import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.PulsarClientException.InvalidServiceURL; @@ -453,5 +456,9 @@ public void doMarkAndReleaseUselessConnections(){ // Do release idle connections. releaseIdleConnectionTaskList.forEach(Runnable::run); } -} + public Set> getConnections() { + return Collections.unmodifiableSet( + pool.values().stream().flatMap(n -> n.values().stream()).collect(Collectors.toSet())); + } +} diff --git a/pulsar-proxy/pom.xml b/pulsar-proxy/pom.xml index 0fcc2c5c7aa8a..a870dbecded92 100644 --- a/pulsar-proxy/pom.xml +++ b/pulsar-proxy/pom.xml @@ -180,6 +180,11 @@ ipaddress ${seancfoley.ipaddress.version} + + org.awaitility + awaitility + test + diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyClientCnx.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyClientCnx.java index 50a77d33683b4..283b835fff54f 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyClientCnx.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyClientCnx.java @@ -18,30 +18,35 @@ */ package org.apache.pulsar.proxy.server; +import static com.google.common.base.Preconditions.checkArgument; import io.netty.buffer.ByteBuf; import io.netty.channel.EventLoopGroup; +import java.util.Arrays; +import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.PulsarVersion; import org.apache.pulsar.client.impl.ClientCnx; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.common.api.AuthData; +import org.apache.pulsar.common.api.proto.CommandAuthChallenge; import org.apache.pulsar.common.protocol.Commands; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +@Slf4j public class ProxyClientCnx extends ClientCnx { - - String clientAuthRole; - AuthData clientAuthData; - String clientAuthMethod; - int protocolVersion; + private final boolean forwardClientAuthData; + private final String clientAuthMethod; + private final String clientAuthRole; + private final AuthData clientAuthData; + private final ProxyConnection proxyConnection; public ProxyClientCnx(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, String clientAuthRole, - AuthData clientAuthData, String clientAuthMethod, int protocolVersion) { - super(conf, eventLoopGroup); + AuthData clientAuthData, String clientAuthMethod, int protocolVersion, + boolean forwardClientAuthData, ProxyConnection proxyConnection) { + super(conf, eventLoopGroup, protocolVersion); this.clientAuthRole = clientAuthRole; this.clientAuthData = clientAuthData; this.clientAuthMethod = clientAuthMethod; - this.protocolVersion = protocolVersion; + this.forwardClientAuthData = forwardClientAuthData; + this.proxyConnection = proxyConnection; } @Override @@ -54,10 +59,54 @@ protected ByteBuf newConnectCommand() throws Exception { authenticationDataProvider = authentication.getAuthData(remoteHostName); AuthData authData = authenticationDataProvider.authenticate(AuthData.INIT_AUTH_DATA); - return Commands.newConnect(authentication.getAuthMethodName(), authData, this.protocolVersion, - PulsarVersion.getVersion(), proxyToTargetBrokerAddress, clientAuthRole, clientAuthData, - clientAuthMethod); + return Commands.newConnect(authentication.getAuthMethodName(), authData, protocolVersion, + PulsarVersion.getVersion(), proxyToTargetBrokerAddress, clientAuthRole, clientAuthData, + clientAuthMethod); } - private static final Logger log = LoggerFactory.getLogger(ProxyClientCnx.class); + @Override + protected void handleAuthChallenge(CommandAuthChallenge authChallenge) { + checkArgument(authChallenge.hasChallenge()); + checkArgument(authChallenge.getChallenge().hasAuthData()); + + boolean isRefresh = Arrays.equals(AuthData.REFRESH_AUTH_DATA_BYTES, authChallenge.getChallenge().getAuthData()); + if (!forwardClientAuthData || !isRefresh) { + super.handleAuthChallenge(authChallenge); + return; + } + + try { + if (log.isDebugEnabled()) { + log.debug("Proxy {} request to refresh the original client authentication data for " + + "the proxy client {}", proxyConnection.ctx().channel(), ctx.channel()); + } + + proxyConnection.ctx().writeAndFlush(Commands.newAuthChallenge(clientAuthMethod, AuthData.REFRESH_AUTH_DATA, + protocolVersion)) + .addListener(writeFuture -> { + if (writeFuture.isSuccess()) { + if (log.isDebugEnabled()) { + log.debug("Proxy {} sent the auth challenge to original client to refresh credentials " + + "with method {} for the proxy client {}", + proxyConnection.ctx().channel(), clientAuthMethod, ctx.channel()); + } + } else { + log.error("Failed to send the auth challenge to original client by the proxy {} " + + "for the proxy client {}", + proxyConnection.ctx().channel(), + ctx.channel(), + writeFuture.cause()); + closeWithException(writeFuture.cause()); + } + }); + + if (state == State.SentConnectFrame) { + state = State.Connecting; + } + } catch (Exception e) { + log.error("Failed to send the auth challenge to origin client by the proxy {} for the proxy client {}", + proxyConnection.ctx().channel(), ctx.channel(), e); + closeWithException(e); + } + } } diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnection.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnection.java index 8dbfd0844ebb3..b63ca3aa549a2 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnection.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnection.java @@ -45,6 +45,7 @@ import javax.naming.AuthenticationException; import javax.net.ssl.SSLSession; import lombok.Getter; +import org.apache.pulsar.PulsarVersion; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; import org.apache.pulsar.broker.authentication.AuthenticationProvider; @@ -316,12 +317,11 @@ private synchronized void completeConnect(AuthData clientData) throws PulsarClie this.clientAuthData = clientData; this.clientAuthMethod = authMethod; } - clientCnxSupplier = - () -> new ProxyClientCnx(clientConf, service.getWorkerGroup(), clientAuthRole, clientAuthData, - clientAuthMethod, protocolVersionToAdvertise); + clientCnxSupplier = () -> new ProxyClientCnx(clientConf, service.getWorkerGroup(), clientAuthRole, + clientAuthData, clientAuthMethod, protocolVersionToAdvertise, + service.getConfiguration().isForwardAuthorizationCredentials(), this); } else { - clientCnxSupplier = - () -> new ClientCnx(clientConf, service.getWorkerGroup(), protocolVersionToAdvertise); + clientCnxSupplier = () -> new ClientCnx(clientConf, service.getWorkerGroup(), protocolVersionToAdvertise); } if (this.connectionPool == null) { @@ -423,16 +423,22 @@ public void brokerConnected(DirectProxyHandler directProxyHandler, CommandConnec } // According to auth result, send newConnected or newAuthChallenge command. - private void doAuthentication(AuthData clientData) throws Exception { + private void doAuthentication(AuthData clientData) + throws Exception { AuthData brokerData = authState.authenticate(clientData); // authentication has completed, will send newConnected command. if (authState.isComplete()) { clientAuthRole = authState.getAuthRole(); if (LOG.isDebugEnabled()) { LOG.debug("[{}] Client successfully authenticated with {} role {}", - remoteAddress, authMethod, clientAuthRole); + remoteAddress, authMethod, clientAuthRole); + } + + // First connection + if (this.connectionPool == null || state == State.Connecting) { + // authentication has completed, will send newConnected command. + completeConnect(clientData); } - completeConnect(clientData); return; } @@ -441,7 +447,7 @@ private void doAuthentication(AuthData clientData) throws Exception { .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); if (LOG.isDebugEnabled()) { LOG.debug("[{}] Authentication in progress client by method {}.", - remoteAddress, authMethod); + remoteAddress, authMethod); } state = State.Connecting; } @@ -523,18 +529,63 @@ remoteAddress, protocolVersionToAdvertise, getRemoteEndpointProtocolVersion(), @Override protected void handleAuthResponse(CommandAuthResponse authResponse) { - checkArgument(state == State.Connecting); checkArgument(authResponse.hasResponse()); checkArgument(authResponse.getResponse().hasAuthData() && authResponse.getResponse().hasAuthMethodName()); if (LOG.isDebugEnabled()) { LOG.debug("Received AuthResponse from {}, auth method: {}", - remoteAddress, authResponse.getResponse().getAuthMethodName()); + remoteAddress, authResponse.getResponse().getAuthMethodName()); } try { AuthData clientData = AuthData.of(authResponse.getResponse().getAuthData()); doAuthentication(clientData); + if (service.getConfiguration().isForwardAuthorizationCredentials() + && connectionPool != null && state == State.ProxyLookupRequests) { + connectionPool.getConnections().forEach(toBrokerCnxFuture -> { + String clientVersion; + if (authResponse.hasClientVersion()) { + clientVersion = authResponse.getClientVersion(); + } else { + clientVersion = PulsarVersion.getVersion(); + } + int protocolVersion; + if (authResponse.hasProtocolVersion()) { + protocolVersion = authResponse.getProtocolVersion(); + } else { + protocolVersion = Commands.getCurrentProtocolVersion(); + } + + ByteBuf cmd = + Commands.newAuthResponse(clientAuthMethod, clientData, protocolVersion, clientVersion); + toBrokerCnxFuture.thenAccept(toBrokerCnx -> toBrokerCnx.ctx().writeAndFlush(cmd) + .addListener(writeFuture -> { + if (writeFuture.isSuccess()) { + if (LOG.isDebugEnabled()) { + LOG.debug("{} authentication is refreshed successfully by {}, " + + "auth method: {} ", + toBrokerCnx.ctx().channel(), ctx.channel(), clientAuthMethod); + } + } else { + LOG.error("Failed to forward the auth response " + + "from the proxy to the broker through the proxy client, " + + "proxy: {}, proxy client: {}", + ctx.channel(), + toBrokerCnx.ctx().channel(), + writeFuture.cause()); + toBrokerCnx.ctx().channel().pipeline() + .fireExceptionCaught(writeFuture.cause()); + } + })) + .whenComplete((__, ex) -> { + if (ex != null) { + LOG.error("Failed to forward the auth response from the proxy to " + + "the broker through the proxy client, proxy: {}", + ctx().channel(), ex); + } + }); + }); + } } catch (Exception e) { String msg = "Unable to handleAuthResponse"; LOG.warn("[{}] {} ", remoteAddress, msg, e); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java new file mode 100644 index 0000000000000..9ccb067adbf10 --- /dev/null +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java @@ -0,0 +1,186 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.proxy.server; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.mockito.Mockito.spy; +import static org.testng.Assert.assertTrue; +import com.google.common.collect.Sets; +import io.jsonwebtoken.SignatureAlgorithm; +import java.util.Calendar; +import java.util.Collections; +import java.util.HashSet; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import javax.crypto.SecretKey; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; +import org.apache.pulsar.broker.authentication.AuthenticationService; +import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.awaitility.Awaitility; +import org.mockito.Mockito; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +@Slf4j +public class ProxyRefreshAuthTest extends ProducerConsumerBase { + private final SecretKey SECRET_KEY = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + + private ProxyService proxyService; + private final ProxyConfiguration proxyConfig = new ProxyConfiguration(); + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + + // enable tls and auth&auth at broker + conf.setAuthenticationEnabled(true); + conf.setAuthorizationEnabled(false); + conf.setTopicLevelPoliciesEnabled(false); + conf.setProxyRoles(Collections.singleton("Proxy")); + conf.setAdvertisedAddress(null); + conf.setAuthenticateOriginalAuthData(true); + conf.setBrokerServicePort(Optional.of(0)); + conf.setWebServicePort(Optional.of(0)); + + Set superUserRoles = new HashSet<>(); + superUserRoles.add("superUser"); + conf.setSuperUserRoles(superUserRoles); + + conf.setAuthenticationProviders(Set.of(AuthenticationProviderToken.class.getName())); + Properties properties = new Properties(); + properties.setProperty("tokenSecretKey", AuthTokenUtils.encodeKeyBase64(SECRET_KEY)); + conf.setProperties(properties); + + conf.setClusterName("proxy-authorization"); + conf.setNumExecutorThreadPoolSize(5); + + conf.setAuthenticationRefreshCheckSeconds(1); + } + + @BeforeClass + @Override + protected void setup() throws Exception { + super.init(); + + admin = PulsarAdmin.builder().serviceHttpUrl(pulsar.getWebServiceAddress()) + .authentication(new AuthenticationToken( + () -> AuthTokenUtils.createToken(SECRET_KEY, "client", Optional.empty()))).build(); + String namespaceName = "my-tenant/my-ns"; + admin.clusters().createCluster("proxy-authorization", + ClusterData.builder().serviceUrlTls(brokerUrlTls.toString()).build()); + admin.tenants().createTenant("my-tenant", + new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("proxy-authorization"))); + admin.namespaces().createNamespace(namespaceName); + + // start proxy service + proxyConfig.setAuthenticationEnabled(true); + proxyConfig.setAuthorizationEnabled(false); + proxyConfig.setForwardAuthorizationCredentials(true); + proxyConfig.setBrokerServiceURL(pulsar.getBrokerServiceUrl()); + proxyConfig.setAdvertisedAddress(null); + + proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); + proxyConfig.setWebServicePort(Optional.of(0)); + + proxyConfig.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); + proxyConfig.setBrokerClientAuthenticationParameters( + AuthTokenUtils.createToken(SECRET_KEY, "Proxy", Optional.empty())); + proxyConfig.setAuthenticationProviders(Set.of(AuthenticationProviderToken.class.getName())); + Properties properties = new Properties(); + properties.setProperty("tokenSecretKey", AuthTokenUtils.encodeKeyBase64(SECRET_KEY)); + proxyConfig.setProperties(properties); + + proxyService = Mockito.spy(new ProxyService(proxyConfig, + new AuthenticationService( + PulsarConfigurationLoader.convertFrom(proxyConfig)))); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + proxyService.close(); + } + + private void startProxy(boolean forwardAuthData) throws Exception { + pulsar.getConfiguration().setAuthenticateOriginalAuthData(forwardAuthData); + proxyConfig.setForwardAuthorizationCredentials(forwardAuthData); + proxyService.start(); + } + + @DataProvider + Object[] forwardAuthDataProvider() { + return new Object[]{true, false}; + } + + @Test(dataProvider = "forwardAuthDataProvider") + public void testAuthDataRefresh(boolean forwardAuthData) throws Exception { + log.info("-- Starting {} test --", methodName); + + startProxy(forwardAuthData); + + AuthenticationToken authenticationToken = new AuthenticationToken(() -> { + Calendar calendar = Calendar.getInstance(); + calendar.add(Calendar.SECOND, 1); + return AuthTokenUtils.createToken(SECRET_KEY, "client", Optional.of(calendar.getTime())); + }); + + pulsarClient = PulsarClient.builder().serviceUrl(proxyService.getServiceUrl()) + .authentication(authenticationToken) + .build(); + + String topic = "persistent://my-tenant/my-ns/my-topic1"; + @Cleanup + Producer ignored = spy(pulsarClient.newProducer() + .topic(topic).create()); + + PulsarClientImpl pulsarClientImpl = (PulsarClientImpl) pulsarClient; + Set> connections = pulsarClientImpl.getCnxPool().getConnections(); + + Awaitility.await().during(4, SECONDS).untilAsserted(() -> { + pulsarClient.getPartitionsForTopic(topic).get(); + assertTrue(connections.stream().allMatch(n -> { + try { + ClientCnx clientCnx = n.get(); + long timestamp = clientCnx.getLastDisconnectedTimestamp(); + return timestamp == 0; + } catch (Exception e) { + throw new RuntimeException(e); + } + })); + }); + } +} From 6cba1f65d8a9828d46426f606853eeddb4f7cd03 Mon Sep 17 00:00:00 2001 From: congbo <39078850+congbobo184@users.noreply.github.com> Date: Thu, 29 Sep 2022 23:53:46 +0800 Subject: [PATCH 33/59] [fix][flaky-test] BookKeeperClusterTestCase.setup() (#17865) Fixes: https://github.com/apache/pulsar/issues/15773 https://github.com/apache/pulsar/issues/16863 https://github.com/apache/pulsar/issues/16860 ### Motivation ``` Error: Tests run: 11, Failures: 1, Errors: 0, Skipped: 3, Time elapsed: 87.06 s <<< FAILURE! - in org.apache.pulsar.packages.management.storage.bookkeeper.BookKeeperPackagesStorageTest Error: setUp(org.apache.pulsar.packages.management.storage.bookkeeper.BookKeeperPackagesStorageTest) Time elapsed: 13.089 s <<< FAILURE! org.apache.zookeeper.KeeperException$ConnectionLossException: KeeperErrorCode = ConnectionLoss at org.apache.zookeeper.KeeperException.create(KeeperException.java:102) at org.apache.bookkeeper.zookeeper.ZooKeeperWatcherBase.waitForConnection(ZooKeeperWatcherBase.java:159) at org.apache.bookkeeper.zookeeper.ZooKeeperClient$Builder.build(ZooKeeperClient.java:260) at org.apache.bookkeeper.test.ZooKeeperUtil.restartCluster(ZooKeeperUtil.java:133) at org.apache.bookkeeper.test.ZooKeeperUtil.startCluster(ZooKeeperUtil.java:104) at org.apache.pulsar.packages.management.storage.bookkeeper.bookkeeper.test.BookKeeperClusterTestCase.startZKCluster(BookKeeperClusterTestCase.java:238) at org.apache.pulsar.packages.management.storage.bookkeeper.bookkeeper.test.BookKeeperClusterTestCase.setUp(BookKeeperClusterTestCase.java:178) at org.apache.pulsar.packages.management.storage.bookkeeper.bookkeeper.test.BookKeeperClusterTestCase.setUp(BookKeeperClusterTestCase.java:166) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.testng.internal.MethodInvocationHelper.invokeMethod(MethodInvocationHelper.java:132) at org.testng.internal.MethodInvocationHelper.invokeMethodConsideringTimeout(MethodInvocationHelper.java:61) at org.testng.internal.ConfigInvoker.invokeConfigurationMethod(ConfigInvoker.java:366) at org.testng.internal.ConfigInvoker.invokeConfigurations(ConfigInvoker.java:320) at org.testng.internal.TestInvoker.runConfigMethods(TestInvoker.java:701) at org.testng.internal.TestInvoker.invokeMethod(TestInvoker.java:527) at org.testng.internal.TestInvoker.invokeTestMethod(TestInvoker.java:174) at org.testng.internal.MethodRunner.runInSequence(MethodRunner.java:46) at org.testng.internal.TestInvoker$MethodInvocationAgent.invoke(TestInvoker.java:822) at org.testng.internal.TestInvoker.invokeTestMethods(TestInvoker.java:147) at org.testng.internal.TestMethodWorker.invokeTestMethods(TestMethodWorker.java:146) at org.testng.internal.TestMethodWorker.run(TestMethodWorker.java:128) at java.base/java.util.ArrayList.forEach(ArrayList.java:1511) at org.testng.TestRunner.privateRun(TestRunner.java:764) at org.testng.TestRunner.run(TestRunner.java:585) at org.testng.SuiteRunner.runTest(SuiteRunner.java:384) at org.testng.SuiteRunner.runSequentially(SuiteRunner.java:378) at org.testng.SuiteRunner.privateRun(SuiteRunner.java:337) at org.testng.SuiteRunner.run(SuiteRunner.java:286) at org.testng.SuiteRunnerWorker.runSuite(SuiteRunnerWorker.java:53) at org.testng.SuiteRunnerWorker.run(SuiteRunnerWorker.java:96) at org.testng.TestNG.runSuitesSequentially(TestNG.java:1218) at org.testng.TestNG.runSuitesLocally(TestNG.java:1140) at org.testng.TestNG.runSuites(TestNG.java:1069) at org.testng.TestNG.run(TestNG.java:1037) at org.apache.maven.surefire.testng.TestNGExecutor.run(TestNGExecutor.java:135) at org.apache.maven.surefire.testng.TestNGDirectoryTestSuite.executeSingleClass(TestNGDirectoryTestSuite.java:112) at org.apache.maven.surefire.testng.TestNGDirectoryTestSuite.executeLazy(TestNGDirectoryTestSuite.java:123) at org.apache.maven.surefire.testng.TestNGDirectoryTestSuite.execute(TestNGDirectoryTestSuite.java:90) at org.apache.maven.surefire.testng.TestNGProvider.invoke(TestNGProvider.java:146) at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:384) at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:345) at org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:126) at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:418) ``` The root cause is that the zk client randomly selects IPV4 and IPV6 when parsing localhost, can connect when using IPV4, and fails when using IPV6. Therefore, if you continue to randomly connect to IPV6, the connection will timeout. https://github.com/apache/zookeeper/blob/bc1b231c9e32667b2978c86a6a64833470973dbd/zookeeper-server/src/main/java/org/apache/zookeeper/client/StaticHostProvider.java#L140-L146 Thanks to @poorbarcode for helping me locate the problem ### Modifications add @AfterMethod(alwaysRun = true) use Adress replace hostName ### Documentation - [x] `doc-not-needed` ### Matching PR in the forked repository PR in forked repository: - https://github.com/congbobo184/pulsar/pull/1 --- .../test/BookKeeperClusterTestCase.java | 2 +- .../apache/bookkeeper/test/ZooKeeperUtil.java | 4 +- .../BookKeeperPackagesStorageTest.java | 4 +- .../test/BookKeeperClusterTestCase.java | 3 +- .../bookkeeper/test/ZooKeeperUtil.java | 158 ++++++++++-------- 5 files changed, 96 insertions(+), 75 deletions(-) diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java b/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java index 39f9dc9ba7d84..e4f1d470bf6de 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java @@ -184,7 +184,7 @@ protected String changeLedgerPath() { return ""; } - @AfterTest + @AfterTest(alwaysRun = true) public void tearDown() throws Exception { boolean failed = false; for (Throwable e : asyncExceptions) { diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/test/ZooKeeperUtil.java b/managed-ledger/src/test/java/org/apache/bookkeeper/test/ZooKeeperUtil.java index f0dcd1d788355..6f9d04d9eb0b3 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/test/ZooKeeperUtil.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/test/ZooKeeperUtil.java @@ -144,8 +144,8 @@ public void restartCluster() throws Exception { if (0 == zooKeeperPort) { zooKeeperPort = serverFactory.getLocalPort(); - zkaddr = new InetSocketAddress(zkaddr.getHostName(), zooKeeperPort); - connectString = zkaddr.getHostName() + ":" + zooKeeperPort; + zkaddr = new InetSocketAddress(zkaddr.getAddress().getHostAddress(), zooKeeperPort); + connectString = zkaddr.getAddress().getHostAddress() + ":" + zooKeeperPort; } boolean b = ClientBase.waitForServerUp(getZooKeeperConnectString(), diff --git a/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorageTest.java b/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorageTest.java index 90458f96a2498..104fcbe3f3485 100644 --- a/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorageTest.java +++ b/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorageTest.java @@ -46,7 +46,7 @@ public BookKeeperPackagesStorageTest() { } @BeforeMethod() - public void setup() throws Exception { + public void start() throws Exception { PackagesStorageProvider provider = PackagesStorageProvider .newProvider(BookKeeperPackagesStorageProvider.class.getName()); DefaultPackagesStorageConfiguration configuration = new DefaultPackagesStorageConfiguration(); @@ -58,7 +58,7 @@ public void setup() throws Exception { } @AfterMethod(alwaysRun = true) - public void teardown() throws Exception { + public void close() throws Exception { if (storage != null) { storage.closeAsync().get(); } diff --git a/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java b/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java index 565a066c5f446..7921e784f60e7 100644 --- a/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java +++ b/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java @@ -82,7 +82,6 @@ import org.apache.bookkeeper.test.TmpDirs; import org.apache.bookkeeper.test.ZooKeeperCluster; import org.apache.bookkeeper.test.ZooKeeperClusterUtil; -import org.apache.bookkeeper.test.ZooKeeperUtil; import org.apache.bookkeeper.util.DiskChecker; import org.apache.bookkeeper.util.PortManager; import org.apache.zookeeper.KeeperException; @@ -191,7 +190,7 @@ protected String getMetadataServiceUri(String ledgersRootPath) { return zkUtil.getMetadataServiceUri(ledgersRootPath); } - @AfterMethod + @AfterMethod(alwaysRun = true) public void tearDown() throws Exception { boolean failed = false; for (Throwable e : asyncExceptions) { diff --git a/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/ZooKeeperUtil.java b/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/ZooKeeperUtil.java index 078eb2d70e1e9..17129435e8c3e 100644 --- a/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/ZooKeeperUtil.java +++ b/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/ZooKeeperUtil.java @@ -16,120 +16,142 @@ * specific language governing permissions and limitations * under the License. */ -/** - * This file is derived from ZooKeeperUtil from Apache BookKeeper - * http://bookkeeper.apache.org - */ package org.apache.pulsar.packages.management.storage.bookkeeper.bookkeeper.test; +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.apache.bookkeeper.test.ZooKeeperCluster; +import org.apache.bookkeeper.util.IOUtils; import org.apache.bookkeeper.zookeeper.ZooKeeperClient; import org.apache.commons.io.FileUtils; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.server.NIOServerCnxnFactory; import org.apache.zookeeper.server.ZooKeeperServer; import org.apache.zookeeper.test.ClientBase; +import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.testng.Assert; - -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.concurrent.CountDownLatch; - -public class ZooKeeperUtil { - static final Logger LOG = LoggerFactory.getLogger(org.apache.bookkeeper.test.ZooKeeperUtil.class); - - // ZooKeeper related variables - protected static final Integer zooKeeperPort = PortManager.nextFreePort(); - private final InetSocketAddress zkaddr; +public class ZooKeeperUtil implements ZooKeeperCluster { + static final Logger LOG; + protected Integer zooKeeperPort = 0; + private InetSocketAddress zkaddr; protected ZooKeeperServer zks; - protected ZooKeeper zkc; // zookeeper client + protected ZooKeeper zkc; protected NIOServerCnxnFactory serverFactory; - protected File ZkTmpDir; - private final String connectString; + protected File zkTmpDir; + private String connectString; public ZooKeeperUtil() { - zkaddr = new InetSocketAddress(zooKeeperPort); - connectString = "localhost:" + zooKeeperPort; + String loopbackIPAddr = InetAddress.getLoopbackAddress().getHostAddress(); + this.zkaddr = new InetSocketAddress(loopbackIPAddr, 0); + this.connectString = loopbackIPAddr + ":" + this.zooKeeperPort; } public ZooKeeper getZooKeeperClient() { - return zkc; + return this.zkc; } public String getZooKeeperConnectString() { - return connectString; + return this.connectString; + } + + public String getMetadataServiceUri() { + return this.getMetadataServiceUri("/ledgers"); } - public void startServer() throws Exception { - // create a ZooKeeper server(dataDir, dataLogDir, port) + public String getMetadataServiceUri(String zkLedgersRootPath) { + return "zk://" + this.connectString + zkLedgersRootPath; + } + + public String getMetadataServiceUri(String zkLedgersRootPath, String type) { + return "zk+" + type + "://" + this.connectString + zkLedgersRootPath; + } + + public void startCluster() throws Exception { LOG.debug("Running ZK server"); - // ServerStats.registerAsConcrete(); ClientBase.setupTestEnv(); - ZkTmpDir = File.createTempFile("zookeeper", "test"); - ZkTmpDir.delete(); - ZkTmpDir.mkdir(); + this.zkTmpDir = IOUtils.createTempDir("zookeeper", "test"); + this.restartCluster(); + this.createBKEnsemble("/ledgers"); + } - zks = new ZooKeeperServer(ZkTmpDir, ZkTmpDir, ZooKeeperServer.DEFAULT_TICK_TIME); - serverFactory = new NIOServerCnxnFactory(); - serverFactory.configure(zkaddr, 100); - serverFactory.startup(zks); + public void restartCluster() throws Exception { + this.zks = new ZooKeeperServer(this.zkTmpDir, this.zkTmpDir, 3000); + this.serverFactory = new NIOServerCnxnFactory(); + this.serverFactory.configure(this.zkaddr, 100); + this.serverFactory.startup(this.zks); + if (0 == this.zooKeeperPort) { + this.zooKeeperPort = this.serverFactory.getLocalPort(); + this.zkaddr = new InetSocketAddress(this.zkaddr.getAddress().getHostAddress(), this.zooKeeperPort); + this.connectString = this.zkaddr.getAddress().getHostAddress() + ":" + this.zooKeeperPort; + } - boolean b = ClientBase.waitForServerUp(getZooKeeperConnectString(), ClientBase.CONNECTION_TIMEOUT); + boolean b = ClientBase.waitForServerUp(this.getZooKeeperConnectString(), (long)ClientBase.CONNECTION_TIMEOUT); LOG.debug("Server up: " + b); - - // create a zookeeper client LOG.debug("Instantiate ZK Client"); - zkc = ZooKeeperClient.newBuilder().connectString(getZooKeeperConnectString()).build(); - - // initialize the zk client with values - zkc.create("/ledgers", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - zkc.create("/ledgers/available", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + this.zkc = ZooKeeperClient.newBuilder().connectString(this.getZooKeeperConnectString()).sessionTimeoutMs(10000).build(); } - @SuppressWarnings("deprecation") - public void sleepServer(final int seconds, final CountDownLatch l) throws InterruptedException, IOException { + public void sleepCluster(final int time, final TimeUnit timeUnit, final CountDownLatch l) throws InterruptedException, IOException { Thread[] allthreads = new Thread[Thread.activeCount()]; Thread.enumerate(allthreads); - for (final Thread t : allthreads) { + Thread[] var5 = allthreads; + int var6 = allthreads.length; + + for(int var7 = 0; var7 < var6; ++var7) { + final Thread t = var5[var7]; if (t.getName().contains("SyncThread:0")) { - Thread sleeper = new Thread(() -> { - try { - t.suspend(); - l.countDown(); - Thread.sleep(seconds * 1000); - t.resume(); - } catch (Exception e) { - LOG.error("Error suspending thread", e); + Thread sleeper = new Thread() { + public void run() { + try { + t.suspend(); + l.countDown(); + timeUnit.sleep((long)time); + t.resume(); + } catch (Exception var2) { + ZooKeeperUtil.LOG.error("Error suspending thread", var2); + } + } - }); + }; sleeper.start(); return; } } + throw new IOException("ZooKeeper thread not found"); } - public void killServer() throws Exception { - if (zkc != null) { - zkc.close(); + public void stopCluster() throws Exception { + if (this.zkc != null) { + this.zkc.close(); } - // shutdown ZK server - if (serverFactory != null) { - serverFactory.shutdown(); - Assert.assertTrue(ClientBase.waitForServerDown(getZooKeeperConnectString(), ClientBase.CONNECTION_TIMEOUT), - "waiting for server down"); + if (this.serverFactory != null) { + this.serverFactory.shutdown(); + Assert.assertTrue("waiting for server down", ClientBase.waitForServerDown(this.getZooKeeperConnectString(), (long)ClientBase.CONNECTION_TIMEOUT)); } - if (zks != null) { - zks.getTxnLogFactory().close(); + + if (this.zks != null) { + this.zks.getTxnLogFactory().close(); } - // ServerStats.unregister(); - FileUtils.deleteDirectory(ZkTmpDir); + + } + + public void killCluster() throws Exception { + this.stopCluster(); + FileUtils.deleteDirectory(this.zkTmpDir); + } + + static { + System.setProperty("zookeeper.4lw.commands.whitelist", "*"); + LOG = LoggerFactory.getLogger(ZooKeeperUtil.class); } } From f0b6348c3ccdf84c10f4c51e68d581034830833d Mon Sep 17 00:00:00 2001 From: Enrico Olivelli Date: Thu, 29 Sep 2022 18:45:48 +0200 Subject: [PATCH 34/59] Revert "[Fix][Tiered Storage] Eagerly Delete Offloaded Segments On Topic Deletion (#15914)" (#17889) This reverts commit 9026d1954d180cfb4b3a38f52217b14a3b5e3dc0. --- .../mledger/ManagedLedgerFactory.java | 20 -- .../impl/ManagedLedgerFactoryImpl.java | 110 ++------- .../mledger/impl/ManagedLedgerImpl.java | 78 ++++--- .../mledger/offload/OffloadUtils.java | 28 --- .../mledger/impl/ManagedLedgerTest.java | 8 +- .../mledger/impl/OffloadPrefixTest.java | 61 ----- .../pulsar/broker/service/BrokerService.java | 32 ++- .../service/persistent/PersistentTopic.java | 108 ++++----- .../broker/service/PersistentTopicTest.java | 2 - .../integration/offload/TestBaseOffload.java | 210 ++---------------- .../offload/TestFileSystemOffload.java | 5 +- .../offload/TestOffloadDeletionFS.java | 144 ------------ .../integration/offload/TestS3Offload.java | 2 +- .../offload/TestUniversalConfigurations.java | 2 +- .../suites/PulsarTieredStorageTestSuite.java | 4 +- 15 files changed, 148 insertions(+), 666 deletions(-) delete mode 100644 tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestOffloadDeletionFS.java diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java index 21841544f8102..e42c2581ba101 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java @@ -145,16 +145,6 @@ void asyncOpenReadOnlyCursor(String managedLedgerName, Position startPosition, M */ void delete(String name) throws InterruptedException, ManagedLedgerException; - /** - * Delete a managed ledger. If it's not open, it's metadata will get regardless deleted. - * - * @param name - * @throws InterruptedException - * @throws ManagedLedgerException - */ - void delete(String name, CompletableFuture mlConfigFuture) - throws InterruptedException, ManagedLedgerException; - /** * Delete a managed ledger. If it's not open, it's metadata will get regardless deleted. * @@ -164,16 +154,6 @@ void delete(String name, CompletableFuture mlConfigFuture) */ void asyncDelete(String name, DeleteLedgerCallback callback, Object ctx); - /** - * Delete a managed ledger. If it's not open, it's metadata will get regardless deleted. - * - * @param name - * @throws InterruptedException - * @throws ManagedLedgerException - */ - void asyncDelete(String name, CompletableFuture mlConfigFuture, - DeleteLedgerCallback callback, Object ctx); - /** * Releases all the resources maintained by the ManagedLedgerFactory. * diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java index e4bc53de52889..d7596a7468a40 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java @@ -26,7 +26,6 @@ import io.netty.util.concurrent.DefaultThreadFactory; import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -72,7 +71,6 @@ import org.apache.bookkeeper.mledger.impl.MetaStore.MetaStoreCallback; import org.apache.bookkeeper.mledger.impl.cache.EntryCacheManager; import org.apache.bookkeeper.mledger.impl.cache.RangeEntryCacheManagerImpl; -import org.apache.bookkeeper.mledger.offload.OffloadUtils; import org.apache.bookkeeper.mledger.proto.MLDataFormats; import org.apache.bookkeeper.mledger.proto.MLDataFormats.LongProperty; import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedCursorInfo; @@ -80,7 +78,6 @@ import org.apache.bookkeeper.mledger.util.Futures; import org.apache.bookkeeper.stats.NullStatsLogger; import org.apache.bookkeeper.stats.StatsLogger; -import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.common.policies.data.EnsemblePlacementPolicyConfig; import org.apache.pulsar.common.util.DateFormatter; import org.apache.pulsar.common.util.FutureUtil; @@ -805,18 +802,12 @@ public void operationFailed(MetaStoreException e) { @Override public void delete(String name) throws InterruptedException, ManagedLedgerException { - delete(name, CompletableFuture.completedFuture(null)); - } - - @Override - public void delete(String name, CompletableFuture mlConfigFuture) - throws InterruptedException, ManagedLedgerException { class Result { ManagedLedgerException e = null; } final Result r = new Result(); final CountDownLatch latch = new CountDownLatch(1); - asyncDelete(name, mlConfigFuture, new DeleteLedgerCallback() { + asyncDelete(name, new DeleteLedgerCallback() { @Override public void deleteLedgerComplete(Object ctx) { latch.countDown(); @@ -838,16 +829,10 @@ public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { @Override public void asyncDelete(String name, DeleteLedgerCallback callback, Object ctx) { - asyncDelete(name, CompletableFuture.completedFuture(null), callback, ctx); - } - - @Override - public void asyncDelete(String name, CompletableFuture mlConfigFuture, - DeleteLedgerCallback callback, Object ctx) { CompletableFuture future = ledgers.get(name); if (future == null) { // Managed ledger does not exist and we're not currently trying to open it - deleteManagedLedger(name, mlConfigFuture, callback, ctx); + deleteManagedLedger(name, callback, ctx); } else { future.thenAccept(ml -> { // If it's open, delete in the normal way @@ -862,8 +847,7 @@ public void asyncDelete(String name, CompletableFuture mlCo /** * Delete all managed ledger resources and metadata. */ - void deleteManagedLedger(String managedLedgerName, CompletableFuture mlConfigFuture, - DeleteLedgerCallback callback, Object ctx) { + void deleteManagedLedger(String managedLedgerName, DeleteLedgerCallback callback, Object ctx) { // Read the managed ledger metadata from store asyncGetManagedLedgerInfo(managedLedgerName, new ManagedLedgerInfoCallback() { @Override @@ -875,7 +859,7 @@ public void getInfoComplete(ManagedLedgerInfo info, Object ctx) { .map(e -> deleteCursor(bkc, managedLedgerName, e.getKey(), e.getValue())) .collect(Collectors.toList()); Futures.waitForAll(futures).thenRun(() -> { - deleteManagedLedgerData(bkc, managedLedgerName, info, mlConfigFuture, callback, ctx); + deleteManagedLedgerData(bkc, managedLedgerName, info, callback, ctx); }).exceptionally(ex -> { callback.deleteLedgerFailed(new ManagedLedgerException(ex), ctx); return null; @@ -890,80 +874,22 @@ public void getInfoFailed(ManagedLedgerException exception, Object ctx) { } private void deleteManagedLedgerData(BookKeeper bkc, String managedLedgerName, ManagedLedgerInfo info, - CompletableFuture mlConfigFuture, - DeleteLedgerCallback callback, Object ctx) { - final CompletableFuture> - ledgerInfosFuture = new CompletableFuture<>(); - store.getManagedLedgerInfo(managedLedgerName, false, null, - new MetaStoreCallback<>() { - @Override - public void operationComplete(MLDataFormats.ManagedLedgerInfo mlInfo, Stat stat) { - Map infos = new HashMap<>(); - for (MLDataFormats.ManagedLedgerInfo.LedgerInfo ls : mlInfo.getLedgerInfoList()) { - infos.put(ls.getLedgerId(), ls); - } - ledgerInfosFuture.complete(infos); - } - - @Override - public void operationFailed(MetaStoreException e) { - log.error("Failed to get managed ledger info for {}", managedLedgerName, e); - ledgerInfosFuture.completeExceptionally(e); - } - }); - + DeleteLedgerCallback callback, Object ctx) { Futures.waitForAll(info.ledgers.stream() - .map(li -> { - final CompletableFuture res; - if (li.isOffloaded) { - res = mlConfigFuture - .thenCombine(ledgerInfosFuture, Pair::of) - .thenCompose(pair -> { - ManagedLedgerConfig mlConfig = pair.getLeft(); - Map ledgerInfos = pair.getRight(); - - if (mlConfig == null || ledgerInfos == null) { - return CompletableFuture.completedFuture(null); - } - - MLDataFormats.ManagedLedgerInfo.LedgerInfo ls = ledgerInfos.get(li.ledgerId); - - if (ls.getOffloadContext().hasUidMsb()) { - MLDataFormats.ManagedLedgerInfo.LedgerInfo.Builder newInfoBuilder = ls.toBuilder(); - newInfoBuilder.getOffloadContextBuilder().setBookkeeperDeleted(true); - String driverName = OffloadUtils.getOffloadDriverName(ls, - mlConfig.getLedgerOffloader().getOffloadDriverName()); - Map driverMetadata = OffloadUtils.getOffloadDriverMetadata(ls, - mlConfig.getLedgerOffloader().getOffloadDriverMetadata()); - OffloadUtils.setOffloadDriverMetadata(newInfoBuilder, driverName, driverMetadata); - - UUID uuid = new UUID(ls.getOffloadContext().getUidMsb(), - ls.getOffloadContext().getUidLsb()); - return OffloadUtils.cleanupOffloaded(li.ledgerId, uuid, mlConfig, - OffloadUtils.getOffloadDriverMetadata(ls, - mlConfig.getLedgerOffloader().getOffloadDriverMetadata()), - "Deletion", managedLedgerName, scheduledExecutor); - } - - return CompletableFuture.completedFuture(null); - }); - } else { - res = CompletableFuture.completedFuture(null); - } - return res.thenCompose(__ -> bkc.newDeleteLedgerOp().withLedgerId(li.ledgerId).execute() - .handle((result, ex) -> { - if (ex != null) { - int rc = BKException.getExceptionCode(ex); - if (rc == BKException.Code.NoSuchLedgerExistsOnMetadataServerException - || rc == BKException.Code.NoSuchLedgerExistsException) { - log.info("Ledger {} does not exist, ignoring", li.ledgerId); - return null; - } - throw new CompletionException(ex); + .filter(li -> !li.isOffloaded) + .map(li -> bkc.newDeleteLedgerOp().withLedgerId(li.ledgerId).execute() + .handle((result, ex) -> { + if (ex != null) { + int rc = BKException.getExceptionCode(ex); + if (rc == BKException.Code.NoSuchLedgerExistsOnMetadataServerException + || rc == BKException.Code.NoSuchLedgerExistsException) { + log.info("Ledger {} does not exist, ignoring", li.ledgerId); + return null; } - return result; - })); - }) + throw new CompletionException(ex); + } + return result; + })) .collect(Collectors.toList())) .thenRun(() -> { // Delete the metadata diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java index a4f94b2a9b71a..254ee767bc7fc 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java @@ -2450,7 +2450,7 @@ void internalTrimConsumedLedgers(CompletableFuture promise) { void internalTrimLedgers(boolean isTruncate, CompletableFuture promise) { if (!factory.isMetadataServiceAvailable()) { // Defer trimming of ledger if we cannot connect to metadata service - promise.completeExceptionally(new MetaStoreException("Metadata service is not available")); + promise.complete(null); return; } @@ -2722,30 +2722,11 @@ public void deleteLedgerFailed(ManagedLedgerException e, Object ctx) { @Override public void asyncDelete(final DeleteLedgerCallback callback, final Object ctx) { - // Delete the managed ledger without closing, since we are not interested in gracefully closing cursors and // ledgers setFenced(); cancelScheduledTasks(); - // Truncate to ensure the offloaded data is not orphaned. - // Also ensures the BK ledgers are deleted and not just scheduled for deletion - CompletableFuture truncateFuture = asyncTruncate(); - truncateFuture.whenComplete((ignore, exc) -> { - if (exc != null) { - log.error("[{}] Error truncating ledger for deletion", name, exc); - callback.deleteLedgerFailed(exc instanceof ManagedLedgerException - ? (ManagedLedgerException) exc : new ManagedLedgerException(exc), - ctx); - } else { - asyncDeleteInternal(callback, ctx); - } - }); - - } - - private void asyncDeleteInternal(final DeleteLedgerCallback callback, final Object ctx) { - List cursors = Lists.newArrayList(this.cursors); if (cursors.isEmpty()) { // No cursors to delete, proceed with next step @@ -2803,9 +2784,10 @@ private void asyncDeleteLedger(long ledgerId, LedgerInfo info) { if (info.getOffloadContext().hasUidMsb()) { UUID uuid = new UUID(info.getOffloadContext().getUidMsb(), info.getOffloadContext().getUidLsb()); - OffloadUtils.cleanupOffloaded(ledgerId, uuid, config, + cleanupOffloaded(ledgerId, uuid, + OffloadUtils.getOffloadDriverName(info, config.getLedgerOffloader().getOffloadDriverName()), OffloadUtils.getOffloadDriverMetadata(info, config.getLedgerOffloader().getOffloadDriverMetadata()), - "Trimming", name, scheduledExecutor); + "Trimming"); } } @@ -2860,7 +2842,7 @@ private void deleteAllLedgers(DeleteLedgerCallback callback, Object ctx) { default: // Handle error log.warn("[{}] Failed to delete ledger {} -- {}", name, ls.getLedgerId(), - BKException.getMessage(rc) + " code " + rc); + BKException.getMessage(rc)); int toDelete = ledgersToDelete.get(); if (toDelete != -1 && ledgersToDelete.compareAndSet(toDelete, -1)) { // Trigger callback only once @@ -3049,17 +3031,18 @@ private void offloadLoop(CompletableFuture promise, Queue prepareLedgerInfoForOffloaded(long ledgerId, UUI oldInfo.getOffloadContext().getUidLsb()); log.info("[{}] Found previous offload attempt for ledger {}, uuid {}" + ", cleaning up", name, ledgerId, uuid); - OffloadUtils.cleanupOffloaded( + cleanupOffloaded( ledgerId, oldUuid, - config, + OffloadUtils.getOffloadDriverName(oldInfo, + config.getLedgerOffloader().getOffloadDriverName()), OffloadUtils.getOffloadDriverMetadata(oldInfo, config.getLedgerOffloader().getOffloadDriverMetadata()), - "Previous failed offload", - name, - scheduledExecutor); + "Previous failed offload"); } LedgerInfo.Builder builder = oldInfo.toBuilder(); builder.getOffloadContextBuilder() @@ -3248,6 +3230,28 @@ private CompletableFuture completeLedgerInfoForOffloaded(long ledgerId, UU }); } + private void cleanupOffloaded(long ledgerId, UUID uuid, String offloadDriverName, /* + * TODO: use driver name to + * identify offloader + */ + Map offloadDriverMetadata, String cleanupReason) { + log.info("[{}] Cleanup offload for ledgerId {} uuid {} because of the reason {}.", + name, ledgerId, uuid.toString(), cleanupReason); + Map metadataMap = new HashMap(); + metadataMap.putAll(offloadDriverMetadata); + metadataMap.put("ManagedLedgerName", name); + + Retries.run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1), TimeUnit.SECONDS.toHours(1)).limit(10), + Retries.NonFatalPredicate, + () -> config.getLedgerOffloader().deleteOffloaded(ledgerId, uuid, metadataMap), + scheduledExecutor, name).whenComplete((ignored, exception) -> { + if (exception != null) { + log.warn("[{}] Error cleaning up offload for {}, (cleanup reason: {})", + name, ledgerId, cleanupReason, exception); + } + }); + } + /** * Get the number of entries between a contiguous range of two positions. * @@ -3756,7 +3760,7 @@ public static ManagedLedgerException createManagedLedgerException(int bkErrorCod } else if (isBkErrorNotRecoverable(bkErrorCode)) { return new NonRecoverableLedgerException(BKException.getMessage(bkErrorCode)); } else { - return new ManagedLedgerException(BKException.getMessage(bkErrorCode) + " error code: " + bkErrorCode); + return new ManagedLedgerException(BKException.getMessage(bkErrorCode)); } } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java index 3768c4dd61208..767a0c78b6d8a 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java @@ -24,18 +24,12 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.LedgerMetadataBuilder; import org.apache.bookkeeper.client.api.DigestType; import org.apache.bookkeeper.client.api.LedgerMetadata; -import org.apache.bookkeeper.common.util.Backoff; -import org.apache.bookkeeper.common.util.Retries; -import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.bookkeeper.mledger.proto.MLDataFormats.KeyValue; import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo; import org.apache.bookkeeper.mledger.proto.MLDataFormats.OffloadContext; @@ -187,26 +181,4 @@ public static LedgerMetadata parseLedgerMetadata(long id, byte[] bytes) throws I return builder.build(); } - - public static CompletableFuture cleanupOffloaded(long ledgerId, UUID uuid, ManagedLedgerConfig mlConfig, - Map offloadDriverMetadata, String cleanupReason, - String name, org.apache.bookkeeper.common.util.OrderedScheduler executor) { - log.info("[{}] Cleanup offload for ledgerId {} uuid {} because of the reason {}.", - name, ledgerId, uuid.toString(), cleanupReason); - Map metadataMap = new HashMap(); - metadataMap.putAll(offloadDriverMetadata); - metadataMap.put("ManagedLedgerName", name); - - return Retries.run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1), - TimeUnit.SECONDS.toHours(1)).limit(10), - Retries.NonFatalPredicate, - () -> mlConfig.getLedgerOffloader().deleteOffloaded(ledgerId, uuid, metadataMap), - executor, name).whenComplete((ignored, exception) -> { - if (exception != null) { - log.warn("[{}] Error cleaning up offload for {}, (cleanup reason: {})", - name, ledgerId, cleanupReason, exception); - } - }); - } - } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java index 4484327ad8dc7..a07a84f70bdc2 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java @@ -2992,8 +2992,7 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { ledger.asyncCreateLedger(bk, config, null, (rc, lh, ctx) -> {}, Collections.emptyMap()); retryStrategically((test) -> responseException1.get() != null, 5, 1000); assertNotNull(responseException1.get()); - assertTrue(responseException1.get().getMessage() - .startsWith(BKException.getMessage(BKException.Code.TimeoutException))); + assertEquals(responseException1.get().getMessage(), BKException.getMessage(BKException.Code.TimeoutException)); // (2) test read-timeout for: ManagedLedger.asyncReadEntry(..) AtomicReference responseException2 = new AtomicReference<>(); @@ -3018,14 +3017,13 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { return responseException2.get() != null; }, 5, 1000); assertNotNull(responseException2.get()); - assertTrue(responseException2.get().getMessage() - .startsWith(BKException.getMessage(BKException.Code.TimeoutException))); + assertEquals(responseException2.get().getMessage(), BKException.getMessage(BKException.Code.TimeoutException)); ledger.close(); } /** - * It verifies that if bk-client doesn't complete the add-entry in given time out then broker is resilient enough + * It verifies that if bk-client doesn't complete the add-entry in given time out then broker is resilient enought * to create new ledger and add entry successfully. * * diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java index f35e40ce0529e..ae0e53456e2d7 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java @@ -34,14 +34,10 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; import java.util.stream.Collectors; - -import com.google.common.collect.Sets; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.api.ReadHandle; -import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.OffloadCallback; import org.apache.bookkeeper.mledger.LedgerOffloader; import org.apache.bookkeeper.mledger.ManagedCursor; @@ -683,63 +679,6 @@ public void testOffloadDelete() throws Exception { assertEventuallyTrue(() -> offloader.deletedOffloads().contains(firstLedger)); } - @Test - public void testOffloadDeleteClosedLedger() throws Exception { - MockLedgerOffloader offloader = new MockLedgerOffloader(); - ManagedLedgerConfig config = new ManagedLedgerConfig(); - config.setMaxEntriesPerLedger(10); - config.setMinimumRolloverTime(0, TimeUnit.SECONDS); - config.setRetentionTime(0, TimeUnit.MINUTES); - offloader.getOffloadPolicies().setManagedLedgerOffloadDeletionLagInMillis(100L); - offloader.getOffloadPolicies().setManagedLedgerOffloadThresholdInBytes(100L); - config.setLedgerOffloader(offloader); - ManagedLedgerImpl ledger = (ManagedLedgerImpl)factory.open("my_test_ledger", config); - ManagedCursor cursor = ledger.openCursor("foobar"); - - for (int i = 0; i < 15; i++) { - String content = "entry-" + i; - ledger.addEntry(content.getBytes()); - } - - assertEquals(ledger.getLedgersInfoAsList().size(), 2); - ledger.offloadPrefix(ledger.getLastConfirmedEntry()); - assertEquals(ledger.getLedgersInfoAsList().size(), 2); - - assertEquals(ledger.getLedgersInfoAsList().stream() - .filter(e -> e.getOffloadContext().getComplete()).count(), 1); - assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); - - Set offloadedledgers = Sets.newHashSet(offloader.offloadedLedgers()); - assertTrue(offloadedledgers.size() > 0); - - Set bkLedgersInMLedger = Sets.newHashSet(ledger.getLedgersInfo().keySet()); - assertTrue(bkLedgersInMLedger.size() > 0); - - factory.close(ledger); - ledger.close(); - - AtomicInteger success = new AtomicInteger(0); - factory.asyncDelete("my_test_ledger", CompletableFuture.completedFuture(config), - new AsyncCallbacks.DeleteLedgerCallback() { - @Override - public void deleteLedgerComplete(Object ctx) { - success.set(1); - } - - @Override - public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { - success.set(-1); - } - }, null); - assertEventuallyTrue(() -> success.get() == 1); - Set deletedledgers = offloader.deletedOffloads(); - assertEquals(offloadedledgers, deletedledgers); - - for (long ledgerId: bkLedgersInMLedger) { - assertFalse(bkc.getLedgers().contains(ledgerId)); - } - } - @Test public void testOffloadDeleteIncomplete() throws Exception { Set> deleted = ConcurrentHashMap.newKeySet(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java index 273af9460d98e..8491615448aae 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java @@ -1039,9 +1039,7 @@ public CompletableFuture> getTopic(final TopicName topicName, bo } public CompletableFuture deleteTopic(String topic, boolean forceDelete) { - TopicName topicName = TopicName.get(topic); Optional optTopic = getTopicReference(topic); - if (optTopic.isPresent()) { Topic t = optTopic.get(); if (forceDelete) { @@ -1068,8 +1066,9 @@ public CompletableFuture deleteTopic(String topic, boolean forceDelete) { return t.delete(); } - log.info("Topic {} is not loaded, try to delete from metadata", topic); - + if (log.isDebugEnabled()) { + log.debug("Topic {} is not loaded, try to delete from metadata", topic); + } // Topic is not loaded, though we still might be able to delete from metadata TopicName tn = TopicName.get(topic); if (!tn.isPersistent()) { @@ -1078,29 +1077,28 @@ public CompletableFuture deleteTopic(String topic, boolean forceDelete) { } CompletableFuture future = new CompletableFuture<>(); + CompletableFuture deleteTopicAuthenticationFuture = new CompletableFuture<>(); deleteTopicAuthenticationWithRetry(topic, deleteTopicAuthenticationFuture, 5); - deleteTopicAuthenticationFuture.whenComplete((v, ex) -> { if (ex != null) { future.completeExceptionally(ex); return; } - CompletableFuture mlConfigFuture = getManagedLedgerConfig(topicName); - managedLedgerFactory.asyncDelete(tn.getPersistenceNamingEncoding(), - mlConfigFuture, new DeleteLedgerCallback() { - @Override - public void deleteLedgerComplete(Object ctx) { - future.complete(null); - } + managedLedgerFactory.asyncDelete(tn.getPersistenceNamingEncoding(), new DeleteLedgerCallback() { + @Override + public void deleteLedgerComplete(Object ctx) { + future.complete(null); + } - @Override - public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { - future.completeExceptionally(exception); - } - }, null); + @Override + public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { + future.completeExceptionally(exception); + } + }, null); }); + return future; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java index 68d348e50eca9..fdcaaf2ffbdb3 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java @@ -1031,12 +1031,10 @@ public CompletableFuture createSubscription(String subscriptionNam public CompletableFuture unsubscribe(String subscriptionName) { CompletableFuture unsubscribeFuture = new CompletableFuture<>(); - TopicName tn = TopicName.get(MLPendingAckStore - .getTransactionPendingAckStoreSuffix(topic, - Codec.encode(subscriptionName))); if (brokerService.pulsar().getConfiguration().isTransactionCoordinatorEnabled()) { - getBrokerService().getManagedLedgerFactory().asyncDelete(tn.getPersistenceNamingEncoding(), - getBrokerService().getManagedLedgerConfig(tn), + getBrokerService().getManagedLedgerFactory().asyncDelete(TopicName.get(MLPendingAckStore + .getTransactionPendingAckStoreSuffix(topic, + Codec.encode(subscriptionName))).getPersistenceNamingEncoding(), new AsyncCallbacks.DeleteLedgerCallback() { @Override public void deleteLedgerComplete(Object ctx) { @@ -1193,69 +1191,53 @@ private CompletableFuture delete(boolean failIfHasSubscriptions, .thenCompose(__ -> deleteTopicPolicies()) .thenCompose(__ -> transactionBufferCleanupAndClose()) .whenComplete((v, ex) -> { - if (ex != null) { - log.error("[{}] Error deleting topic", topic, ex); + if (ex != null) { + log.error("[{}] Error deleting topic", topic, ex); + unfenceTopicToResume(); + deleteFuture.completeExceptionally(ex); + } else { + List> subsDeleteFutures = new ArrayList<>(); + subscriptions.forEach((sub, p) -> subsDeleteFutures.add(unsubscribe(sub))); + + FutureUtil.waitForAll(subsDeleteFutures).whenComplete((f, e) -> { + if (e != null) { + log.error("[{}] Error deleting topic", topic, e); unfenceTopicToResume(); - deleteFuture.completeExceptionally(ex); + deleteFuture.completeExceptionally(e); } else { - List> subsDeleteFutures = new ArrayList<>(); - subscriptions.forEach((sub, p) -> subsDeleteFutures.add(unsubscribe(sub))); - - FutureUtil.waitForAll(subsDeleteFutures).whenComplete((f, e) -> { - if (e != null) { - log.error("[{}] Error deleting topic", topic, e); - unfenceTopicToResume(); - deleteFuture.completeExceptionally(e); - } else { - // Truncate to ensure the offloaded data is not orphaned. - // Also ensures the BK ledgers are deleted and not just - // scheduled for deletion - CompletableFuture truncateFuture = ledger.asyncTruncate(); - truncateFuture.whenComplete((ignore, exc) -> { - if (e != null) { - log.error("[{}] Error truncating topic", topic, e); - unfenceTopicToResume(); - deleteFuture.completeExceptionally(e); - } else { - ledger.asyncDelete(new AsyncCallbacks.DeleteLedgerCallback() { - @Override - public void deleteLedgerComplete(Object ctx) { - brokerService.removeTopicFromCache(PersistentTopic.this); - - dispatchRateLimiter.ifPresent(DispatchRateLimiter::close); - - subscribeRateLimiter.ifPresent(SubscribeRateLimiter::close); - - unregisterTopicPolicyListener(); - - log.info("[{}] Topic deleted", topic); - deleteFuture.complete(null); - } - - @Override - public void - deleteLedgerFailed(ManagedLedgerException exception, - Object ctx) { - if (exception.getCause() - instanceof MetadataStoreException.NotFoundException) { - log.info("[{}] Topic is already deleted {}", - topic, exception.getMessage()); - deleteLedgerComplete(ctx); - } else { - unfenceTopicToResume(); - log.error("[{}] Error deleting topic", - topic, exception); - deleteFuture.completeExceptionally( - new PersistenceException(exception)); - } - } - }, null); - } - }); + ledger.asyncDelete(new AsyncCallbacks.DeleteLedgerCallback() { + @Override + public void deleteLedgerComplete(Object ctx) { + brokerService.removeTopicFromCache(PersistentTopic.this); + + dispatchRateLimiter.ifPresent(DispatchRateLimiter::close); + + subscribeRateLimiter.ifPresent(SubscribeRateLimiter::close); + + unregisterTopicPolicyListener(); + + log.info("[{}] Topic deleted", topic); + deleteFuture.complete(null); + } + + @Override + public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { + if (exception.getCause() + instanceof MetadataStoreException.NotFoundException) { + log.info("[{}] Topic is already deleted {}", + topic, exception.getMessage()); + deleteLedgerComplete(ctx); + } else { + unfenceTopicToResume(); + log.error("[{}] Error deleting topic", topic, exception); + deleteFuture.completeExceptionally(new PersistenceException(exception)); + } } - }); + }, null); } }); + } + }); } else { unfenceTopicToResume(); deleteFuture.completeExceptionally(new TopicBusyException( diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java index dd6d00f4cda27..970bfd763a4e5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java @@ -1256,8 +1256,6 @@ public void testCloseTopic() throws Exception { @Test public void testDeleteTopic() throws Exception { - doReturn(CompletableFuture.completedFuture(null)).when(ledgerMock).asyncTruncate(); - // create topic PersistentTopic topic = (PersistentTopic) brokerService.getOrCreateTopic(successTopicName).get(); diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java index 9e6b5261df16b..1d7fb21062136 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java @@ -20,8 +20,6 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; @@ -34,17 +32,14 @@ import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.tests.integration.suites.PulsarTieredStorageTestSuite; -import org.awaitility.Awaitility; import org.testng.Assert; @Slf4j public abstract class TestBaseOffload extends PulsarTieredStorageTestSuite { - protected int getEntrySize() { - return 1024; - }; + private static final int ENTRY_SIZE = 1024; - private byte[] buildEntry(String pattern) { - byte[] entry = new byte[getEntrySize()]; + private static byte[] buildEntry(String pattern) { + byte[] entry = new byte[ENTRY_SIZE]; byte[] patternBytes = pattern.getBytes(); for (int i = 0; i < entry.length; i++) { @@ -69,24 +64,15 @@ protected void testPublishOffloadAndConsumeViaCLI(String serviceUrl, String admi long firstLedger = -1; try(PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build(); Producer producer = client.newProducer().topic(topic) - .maxPendingMessages(getNumEntriesPerLedger() / 2).sendTimeout(60, TimeUnit.SECONDS) .blockIfQueueFull(true).enableBatching(false).create();) { client.newConsumer().topic(topic).subscriptionName("my-sub").subscribe().close(); // write enough to topic to make it roll int i = 0; - AtomicBoolean success = new AtomicBoolean(true); - - for (; i < getNumEntriesPerLedger() * 1.5; i++) { - producer.sendAsync(buildEntry("offload-message" + i)) - .exceptionally(e -> { - log.error("failed to send a message", e); - success.set(false); - return null; - });; + for (; i < ENTRIES_PER_LEDGER * 1.5; i++) { + producer.sendAsync(buildEntry("offload-message" + i)); } producer.flush(); - Assert.assertTrue(success.get()); } try (PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(adminUrl).build()) { @@ -127,7 +113,7 @@ protected void testPublishOffloadAndConsumeViaCLI(String serviceUrl, String admi try(PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build(); Consumer consumer = client.newConsumer().topic(topic).subscriptionName("my-sub").subscribe()) { // read back from topic - for (int i = 0; i < getNumEntriesPerLedger() * 1.5; i++) { + for (int i = 0; i < ENTRIES_PER_LEDGER * 1.5; i++) { Message m = consumer.receive(1, TimeUnit.MINUTES); Assert.assertEquals(buildEntry("offload-message" + i), m.getData()); } @@ -152,32 +138,25 @@ protected void testPublishOffloadAndConsumeViaThreshold(String serviceUrl, Strin long firstLedger = 0; try(PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build(); Producer producer = client.newProducer().topic(topic) - .maxPendingMessages(getNumEntriesPerLedger() / 2).sendTimeout(60, TimeUnit.SECONDS) - .blockIfQueueFull(true).enableBatching(false).create()) { + .blockIfQueueFull(true).enableBatching(false).create(); + ) { client.newConsumer().topic(topic).subscriptionName("my-sub").subscribe().close(); - AtomicBoolean success = new AtomicBoolean(true); // write enough to topic to make it roll twice - for (int i = 0; i < getNumEntriesPerLedger() * 2.5; i++) { - producer.sendAsync(buildEntry("offload-message" + i)) - .exceptionally(e -> { - log.error("failed to send a message", e); - success.set(false); - return null; - });; + for (int i = 0; i < ENTRIES_PER_LEDGER * 2.5; i++) { + producer.sendAsync(buildEntry("offload-message" + i)); } producer.flush(); - Assert.assertTrue(success.get()); } try (PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(adminUrl).build()) { firstLedger = admin.topics().getInternalStats(topic).ledgers.get(0).ledgerId; // wait up to 30 seconds for offload to occur - for (int i = 0; i < 100 && !admin.topics().getInternalStats(topic).ledgers.get(0).offloaded; i++) { - Thread.sleep(300); + for (int i = 0; i < 300 && !admin.topics().getInternalStats(topic).ledgers.get(0).offloaded; i++) { + Thread.sleep(100); } Assert.assertTrue(admin.topics().getInternalStats(topic).ledgers.get(0).offloaded); @@ -196,9 +175,8 @@ protected void testPublishOffloadAndConsumeViaThreshold(String serviceUrl, Strin try (PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build(); Consumer consumer = client.newConsumer().topic(topic).subscriptionName("my-sub").subscribe()) { // read back from topic - for (int i = 0; i < getNumEntriesPerLedger() * 2.5; i++) { + for (int i = 0; i < ENTRIES_PER_LEDGER * 2.5; i++) { Message m = consumer.receive(1, TimeUnit.MINUTES); - Assert.assertNotNull(m); Assert.assertEquals(buildEntry("offload-message" + i), m.getData()); } } @@ -219,52 +197,30 @@ private boolean ledgerOffloaded(List le .map(l -> l.offloaded).findFirst().get(); } - private long writeAndWaitForOffload(String serviceUrl, String adminUrl, String topic) - throws Exception { - return writeAndWaitForOffload(serviceUrl, adminUrl, topic, -1); - } - - private long writeAndWaitForOffload(String serviceUrl, String adminUrl, String topic, int partitionNum) - throws Exception { + private long writeAndWaitForOffload(String serviceUrl, String adminUrl, String topic) throws Exception { try(PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build(); Producer producer = client.newProducer().topic(topic) - .maxPendingMessages(getNumEntriesPerLedger() / 2).sendTimeout(60, TimeUnit.SECONDS) .blockIfQueueFull(true).enableBatching(false).create(); PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(adminUrl).build()) { - String topicToCheck = partitionNum >= 0 - ? topic + "-partition-" + partitionNum - : topic; - - List ledgers = admin.topics() - .getInternalStats(topicToCheck).ledgers; + List ledgers = admin.topics().getInternalStats(topic).ledgers; long currentLedger = ledgers.get(ledgers.size() - 1).ledgerId; client.newConsumer().topic(topic).subscriptionName("my-sub").subscribe().close(); - AtomicBoolean success = new AtomicBoolean(true); // write enough to topic to make it roll twice - for (int i = 0; - i < getNumEntriesPerLedger() * 2.5 * (partitionNum > 0 ? partitionNum + 1 : 1); - i++) { - producer.sendAsync(buildEntry("offload-message" + i)) - .exceptionally(e -> { - log.error("failed to send a message", e); - success.set(false); - return null; - }); + for (int i = 0; i < ENTRIES_PER_LEDGER * 2.5; i++) { + producer.sendAsync(buildEntry("offload-message" + i)); } - producer.flush(); producer.send(buildEntry("final-offload-message")); - Assert.assertTrue(success.get()); // wait up to 30 seconds for offload to occur for (int i = 0; - i < 100 && !ledgerOffloaded(admin.topics().getInternalStats(topicToCheck).ledgers, currentLedger); + i < 300 && !ledgerOffloaded(admin.topics().getInternalStats(topic).ledgers, currentLedger); i++) { - Thread.sleep(300); + Thread.sleep(100); } - Assert.assertTrue(ledgerOffloaded(admin.topics().getInternalStats(topicToCheck).ledgers, currentLedger)); + Assert.assertTrue(ledgerOffloaded(admin.topics().getInternalStats(topic).ledgers, currentLedger)); return currentLedger; } @@ -339,130 +295,4 @@ protected void testPublishOffloadAndConsumeDeletionLag(String serviceUrl, String Thread.sleep(5000); Assert.assertTrue(ledgerExistsInBookKeeper(offloadedLedger)); } - - protected void testDeleteOffloadedTopic(String serviceUrl, String adminUrl, - boolean unloadBeforeDelete, int numPartitions) throws Exception { - final String tenant = "offload-test-cli-" + randomName(4); - final String namespace = tenant + "/ns1"; - final String topic = "persistent://" + namespace + "/topic1"; - - pulsarCluster.runAdminCommandOnAnyBroker("tenants", - "create", "--allowed-clusters", pulsarCluster.getClusterName(), - "--admin-roles", "offload-admin", tenant); - - pulsarCluster.runAdminCommandOnAnyBroker("namespaces", - "create", "--clusters", pulsarCluster.getClusterName(), namespace); - - // set threshold to offload runs immediately after role - pulsarCluster.runAdminCommandOnAnyBroker("namespaces", - "set-offload-threshold", "--size", "0", namespace); - - pulsarCluster.runAdminCommandOnAnyBroker("namespaces", - "set-retention", "--size", "100M", "--time", "100m", namespace); - - String output = pulsarCluster.runAdminCommandOnAnyBroker( - "namespaces", "get-offload-deletion-lag", namespace).getStdout(); - Assert.assertTrue(output.contains("Unset for namespace")); - - if (numPartitions > 0) { - pulsarCluster.runAdminCommandOnAnyBroker("topics", - "create-partitioned-topic", topic, - "--partitions", Integer.toString(numPartitions)); - } else { - pulsarCluster.runAdminCommandOnAnyBroker("topics", "create", topic); - } - - long offloadedLedger = writeAndWaitForOffload(serviceUrl, adminUrl, topic, numPartitions - 1); - // give it up to 5 seconds to delete, it shouldn't - // so we wait this every time - Thread.sleep(5000); - Assert.assertTrue(ledgerExistsInBookKeeper(offloadedLedger)); - - pulsarCluster.runAdminCommandOnAnyBroker("namespaces", "set-offload-deletion-lag", namespace, - "--lag", "0m"); - output = pulsarCluster.runAdminCommandOnAnyBroker( - "namespaces", "get-offload-deletion-lag", namespace).getStdout(); - Assert.assertTrue(output.contains("0 minute(s)")); - - offloadedLedger = writeAndWaitForOffload(serviceUrl, adminUrl, topic, numPartitions - 1); - // wait up to 10 seconds for ledger to be deleted - for (int i = 0; i < 10 && ledgerExistsInBookKeeper(offloadedLedger); i++) { - writeAndWaitForOffload(serviceUrl, adminUrl, topic, numPartitions - 1); - Thread.sleep(1000); - } - - Assert.assertFalse(ledgerExistsInBookKeeper(offloadedLedger)); - Assert.assertTrue(offloadedLedgerExists(topic, numPartitions - 1, offloadedLedger)); - - if (unloadBeforeDelete) { - pulsarCluster.runAdminCommandOnAnyBroker("topics", "unload", topic); - } - if (numPartitions > 0) { - pulsarCluster.runAdminCommandOnAnyBroker("topics", "delete-partitioned-topic", topic); - } else { - pulsarCluster.runAdminCommandOnAnyBroker("topics", "delete", topic); - } - final long ledgerId = offloadedLedger; - Awaitility.await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> { - Assert.assertFalse(offloadedLedgerExists(topic, numPartitions - 1, ledgerId)); - }); - } - - protected void testDeleteOffloadedTopicExistsInBk(String serviceUrl, String adminUrl, - boolean unloadBeforeDelete, int numPartitions) throws Exception { - final String tenant = "offload-test-cli-" + randomName(4); - final String namespace = tenant + "/ns1"; - final String topic = "persistent://" + namespace + "/topic1"; - - pulsarCluster.runAdminCommandOnAnyBroker("tenants", - "create", "--allowed-clusters", pulsarCluster.getClusterName(), - "--admin-roles", "offload-admin", tenant); - - pulsarCluster.runAdminCommandOnAnyBroker("namespaces", - "create", "--clusters", pulsarCluster.getClusterName(), namespace); - - // set threshold to offload runs immediately after role - pulsarCluster.runAdminCommandOnAnyBroker("namespaces", - "set-offload-threshold", "--size", "0", namespace); - pulsarCluster.runAdminCommandOnAnyBroker("namespaces", - "set-retention", "--size", "100M", "--time", "100m", namespace); - - if (numPartitions > 0) { - pulsarCluster.runAdminCommandOnAnyBroker("topics", - "create-partitioned-topic", topic, - "--partitions", Integer.toString(numPartitions)); - } else { - pulsarCluster.runAdminCommandOnAnyBroker("topics", "create", topic); - } - - String output = pulsarCluster.runAdminCommandOnAnyBroker( - "namespaces", "get-offload-deletion-lag", namespace).getStdout(); - Assert.assertTrue(output.contains("Unset for namespace")); - - long offloadedLedger = writeAndWaitForOffload(serviceUrl, adminUrl, topic, numPartitions - 1); - // give it up to 5 seconds to delete, it shouldn't - // so we wait this every time - Thread.sleep(5000); - Assert.assertTrue(ledgerExistsInBookKeeper(offloadedLedger)); - - Assert.assertTrue(offloadedLedgerExists(topic, numPartitions - 1, offloadedLedger)); - - if (unloadBeforeDelete) { - pulsarCluster.runAdminCommandOnAnyBroker("topics", "unload", topic); - } - if (numPartitions > 0) { - pulsarCluster.runAdminCommandOnAnyBroker("topics", "delete-partitioned-topic", topic); - } else { - pulsarCluster.runAdminCommandOnAnyBroker("topics", "delete", topic); - } - final long ledgerId = offloadedLedger; - Awaitility.await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> { - Assert.assertFalse(offloadedLedgerExists(topic, numPartitions - 1, ledgerId)); - }); - Assert.assertFalse(ledgerExistsInBookKeeper(offloadedLedger)); - } - - protected boolean offloadedLedgerExists(String topic, int partitionNum, long firstLedger) { - throw new RuntimeException("not implemented"); - } } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestFileSystemOffload.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestFileSystemOffload.java index 48b86e8a1f45d..808aae62e7419 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestFileSystemOffload.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestFileSystemOffload.java @@ -41,17 +41,18 @@ public void testPublishOffloadAndConsumeViaThreshold(Supplier serviceUrl @Test(dataProvider = "ServiceAndAdminUrls") public void testPublishOffloadAndConsumeDeletionLag(Supplier serviceUrl, Supplier adminUrl) throws Exception { super.testPublishOffloadAndConsumeDeletionLag(serviceUrl.get(), adminUrl.get()); + } + @Override protected Map getEnv() { Map result = new HashMap<>(); - result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(getNumEntriesPerLedger())); + result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(ENTRIES_PER_LEDGER)); result.put("managedLedgerMinLedgerRolloverTimeMinutes", "0"); result.put("managedLedgerOffloadDriver", "filesystem"); result.put("fileSystemURI", "file:///"); return result; } - } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestOffloadDeletionFS.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestOffloadDeletionFS.java deleted file mode 100644 index 4b1739a0cd13b..0000000000000 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestOffloadDeletionFS.java +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.pulsar.tests.integration.offload; - -import lombok.extern.slf4j.Slf4j; -import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.tests.integration.docker.ContainerExecException; -import org.apache.pulsar.tests.integration.docker.ContainerExecResult; -import org.testng.annotations.Test; - -import java.util.Arrays; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.function.Supplier; - -@Slf4j -public class TestOffloadDeletionFS extends TestBaseOffload { - - @Override - protected int getEntrySize() { - return 512; - } - - @Override - protected int getNumEntriesPerLedger() { - return 200; - } - - @Test(dataProvider = "ServiceAndAdminUrls") - public void testDeleteOffloadedTopic(Supplier serviceUrl, Supplier adminUrl) throws Exception { - super.testDeleteOffloadedTopic(serviceUrl.get(), adminUrl.get(), false, 0); - } - - @Test(dataProvider = "ServiceAndAdminUrls") - public void testDeleteUnloadedOffloadedTopic(Supplier serviceUrl, Supplier adminUrl) - throws Exception { - super.testDeleteOffloadedTopic(serviceUrl.get(), adminUrl.get(), true, 0); - } - - @Test(dataProvider = "ServiceAndAdminUrls") - public void testDeleteOffloadedTopicExistsInBk(Supplier serviceUrl, Supplier adminUrl) - throws Exception { - super.testDeleteOffloadedTopicExistsInBk(serviceUrl.get(), adminUrl.get(), false, 0); - } - - @Test(dataProvider = "ServiceAndAdminUrls") - public void testDeleteUnloadedOffloadedTopicExistsInBk(Supplier serviceUrl, Supplier adminUrl) - throws Exception { - super.testDeleteOffloadedTopicExistsInBk(serviceUrl.get(), adminUrl.get(), true, 0); - } - - @Test(dataProvider = "ServiceAndAdminUrls") - public void testDeleteOffloadedPartitionedTopic(Supplier serviceUrl, Supplier adminUrl) throws Exception { - super.testDeleteOffloadedTopic(serviceUrl.get(), adminUrl.get(), false, 3); - } - - @Test(dataProvider = "ServiceAndAdminUrls") - public void testDeleteUnloadedOffloadedPartitionedTopic(Supplier serviceUrl, Supplier adminUrl) - throws Exception { - super.testDeleteOffloadedTopic(serviceUrl.get(), adminUrl.get(), true, 3); - } - - @Test(dataProvider = "ServiceAndAdminUrls") - public void testDeleteOffloadedPartitionedTopicExistsInBk(Supplier serviceUrl, Supplier adminUrl) - throws Exception { - super.testDeleteOffloadedTopicExistsInBk(serviceUrl.get(), adminUrl.get(), false, 3); - } - - @Test(dataProvider = "ServiceAndAdminUrls") - public void testDeleteUnloadedOffloadedPartitionedTopicExistsInBk(Supplier serviceUrl, - Supplier adminUrl) throws Exception { - super.testDeleteOffloadedTopicExistsInBk(serviceUrl.get(), adminUrl.get(), true, 3); - } - - @Override - protected Map getEnv() { - Map result = new HashMap<>(); - result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(getNumEntriesPerLedger())); - result.put("managedLedgerMinLedgerRolloverTimeMinutes", "0"); - result.put("managedLedgerOffloadDriver", "filesystem"); - result.put("fileSystemURI", "file:///"); - - return result; - } - - @Override - protected boolean offloadedLedgerExists(String topic, int partitionNum, long ledger) { - log.info("offloadedLedgerExists(topic = {}, partitionNum={},ledger={})", - topic, partitionNum, ledger); - if (partitionNum > -1) { - topic = topic + "-partition-" + partitionNum; - } - String managedLedgerName = TopicName.get(topic).getPersistenceNamingEncoding(); - String rootPath = "pulsar/"; - String dirPath = rootPath + managedLedgerName + "/"; - - List result = new LinkedList<>(); - String[] cmds = { - "ls", - "-1", - dirPath - }; - pulsarCluster.getBrokers().forEach(broker -> { - try { - ContainerExecResult res = broker.execCmd(cmds); - log.info("offloadedLedgerExists broker {} 'ls -1 {}' got {}", - broker.getContainerName(), dirPath, res.getStdout()); - Arrays.stream(res.getStdout().split("\n")) - .filter(x -> x.startsWith(ledger + "-")) - .forEach(x -> result.add(x)); - } catch (ContainerExecException ce) { - log.info("offloadedLedgerExists broker {} 'ls -1 {}' got error code {}", - broker.getContainerName(), dirPath, ce.getResult().getExitCode()); - // ignore 2 (No such file or directory) - if (ce.getResult().getExitCode() != 2) { - throw new RuntimeException(ce); - } - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - - return !result.isEmpty(); - } - -} diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestS3Offload.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestS3Offload.java index a230b13e215f5..edbbcfeba5e10 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestS3Offload.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestS3Offload.java @@ -73,7 +73,7 @@ public void testPublishOffloadAndConsumeDeletionLag(Supplier serviceUrl, @Override protected Map getEnv() { Map result = new HashMap<>(); - result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(getNumEntriesPerLedger())); + result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(ENTRIES_PER_LEDGER)); result.put("managedLedgerMinLedgerRolloverTimeMinutes", "0"); result.put("managedLedgerOffloadDriver", "aws-s3"); result.put("s3ManagedLedgerOffloadBucket", "pulsar-integtest"); diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestUniversalConfigurations.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestUniversalConfigurations.java index ef7406113f6ee..9c53d801ea1eb 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestUniversalConfigurations.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestUniversalConfigurations.java @@ -72,7 +72,7 @@ public void testPublishOffloadAndConsumeDeletionLag(Supplier serviceUrl, @Override protected Map getEnv() { Map result = new HashMap<>(); - result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(getNumEntriesPerLedger())); + result.put("managedLedgerMaxEntriesPerLedger", String.valueOf(ENTRIES_PER_LEDGER)); result.put("managedLedgerMinLedgerRolloverTimeMinutes", "0"); result.put("managedLedgerOffloadDriver", "aws-s3"); result.put("managedLedgerOffloadBucket", "pulsar-integtest"); diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarTieredStorageTestSuite.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarTieredStorageTestSuite.java index 1c6bb9dc3f34c..7811b38e0fd92 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarTieredStorageTestSuite.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarTieredStorageTestSuite.java @@ -31,9 +31,7 @@ @Slf4j public abstract class PulsarTieredStorageTestSuite extends PulsarClusterTestBase { - protected int getNumEntriesPerLedger() { - return 1024; - } + protected static final int ENTRIES_PER_LEDGER = 1024; @BeforeClass(alwaysRun = true) @Override From 5f079433c3706d994e6d0210cfc87e483ad53e52 Mon Sep 17 00:00:00 2001 From: Asaf Mesika Date: Fri, 30 Sep 2022 03:54:19 +0300 Subject: [PATCH 35/59] [fix][doc] Fix M1 JVM Installation Instructions (#17669) --- README.md | 5 +++-- .../version-2.10.x/getting-started-standalone.md | 8 +------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 37e73f95ac194..f858e1065723e 100644 --- a/README.md +++ b/README.md @@ -123,7 +123,7 @@ components in the Pulsar ecosystem, including connectors, adapters, and other la ## Build Pulsar -Requirements: +### Requirements - JDK @@ -139,6 +139,7 @@ Requirements: > Note: this project includes a [Maven Wrapper](https://maven.apache.org/wrapper/) that can be used instead of a system installed Maven. > Use it by replacing `mvn` by `./mvnw` on Linux and `mvnw.cmd` on Windows in the commands below. +### Build Compile and install: ```bash @@ -151,7 +152,7 @@ Compile and install individual module $ mvn -pl module-name (e.g: pulsar-broker) install -DskipTests ``` -## Minimal build (This skips most of external connectors and tiered storage handlers) +### Minimal build (This skips most of external connectors and tiered storage handlers) ``` mvn install -Pcore-modules,-main -DskipTests diff --git a/site2/website/versioned_docs/version-2.10.x/getting-started-standalone.md b/site2/website/versioned_docs/version-2.10.x/getting-started-standalone.md index efb218a846dd8..2a5b27100ba18 100644 --- a/site2/website/versioned_docs/version-2.10.x/getting-started-standalone.md +++ b/site2/website/versioned_docs/version-2.10.x/getting-started-standalone.md @@ -37,13 +37,7 @@ One of the ways to easily install an x86 JDK is to use [SDKMan](http://sdkman.io 1. Install [SDKMan](http://sdkman.io). - * Method 1: follow instructions on the SDKMan website. - - * Method 2: if you have [Homebrew](https://brew.sh) installed, enter the following command. - -```shell -brew install sdkman -``` +Follow the instructions on the SDKMan website. 2. Turn on Rosetta2 compatibility for SDKMan by editing `~/.sdkman/etc/config` and changing the following property from `false` to `true`. From f3c547b1fb0fd3bf6a40a91e457f2dccfec10d05 Mon Sep 17 00:00:00 2001 From: Matteo Merli Date: Thu, 29 Sep 2022 18:56:56 -0700 Subject: [PATCH 36/59] PIP-209: Removed C++/Python clients from main repo (#17881) * PIP-209: Removed C++/Python clients from main repo * Removed python directory from Docekrfile * Fixed python client version argument scoping * Fixed handling of pulsar.functions.serde --- .github/changes-filter.yaml | 2 - .github/workflows/ci-cpp-build.yaml | 259 -- .github/workflows/ci-go-functions.yaml | 4 +- .github/workflows/pulsar-ci-flaky.yaml | 4 +- .github/workflows/pulsar-ci.yaml | 112 +- docker/pulsar/Dockerfile | 5 +- docker/pulsar/pom.xml | 52 +- .../pulsar/scripts/install-pulsar-client.sh | 4 +- pom.xml | 2 + pulsar-client-cpp/.clang-format | 25 - pulsar-client-cpp/.gitignore | 91 - pulsar-client-cpp/CMakeLists.txt | 465 -- pulsar-client-cpp/Doxyfile | 2500 ---------- pulsar-client-cpp/README.md | 289 -- .../build-support/clang_format_exclusions.txt | 21 - .../build-support/merge_archives.sh | 50 - .../build-support/run_clang_format.py | 80 - .../cmake_modules/FindClangTools.cmake | 100 - pulsar-client-cpp/docker-build-centos7.sh | 39 - pulsar-client-cpp/docker-build-python3.9.sh | 40 - pulsar-client-cpp/docker-build.sh | 49 - pulsar-client-cpp/docker-format.sh | 47 - pulsar-client-cpp/docker-tests.sh | 88 - pulsar-client-cpp/docker/alpine/Dockerfile | 99 - .../docker/alpine/Dockerfile-alpine-3.8 | 99 - .../docker/alpine/build-alpine-image.sh | 33 - .../alpine/build-wheel-file-within-docker.sh | 38 - .../docker/alpine/build-wheel.sh | 34 - .../docker/build-client-lib-within-docker.sh | 34 - pulsar-client-cpp/docker/build-client-lib.sh | 49 - .../docker/build-wheel-file-within-docker.sh | 46 - pulsar-client-cpp/docker/build-wheels.sh | 82 - pulsar-client-cpp/docker/centos-7/Dockerfile | 47 - pulsar-client-cpp/docker/create-images.sh | 44 - .../docker/manylinux1/Dockerfile | 162 - .../docker/manylinux2014/Dockerfile | 130 - .../docker/manylinux_musl/Dockerfile | 116 - pulsar-client-cpp/docker/push-images.sh | 46 - pulsar-client-cpp/docker/python-versions.sh | 41 - pulsar-client-cpp/docs/MainPage.md | 180 - pulsar-client-cpp/eclipse-formatter.xml | 187 - pulsar-client-cpp/examples/CMakeLists.txt | 84 - .../examples/SampleAsyncConsumerCApi.c | 68 - .../examples/SampleAsyncProducer.cc | 53 - pulsar-client-cpp/examples/SampleConsumer.cc | 49 - .../examples/SampleConsumerCApi.c | 60 - .../examples/SampleConsumerListener.cc | 52 - .../examples/SampleConsumerListenerCApi.c | 59 - .../examples/SampleFileLogger.cc | 33 - pulsar-client-cpp/examples/SampleProducer.cc | 45 - .../examples/SampleProducerCApi.c | 64 - pulsar-client-cpp/examples/SampleReaderCApi.c | 59 - .../include/pulsar/Authentication.h | 564 --- .../include/pulsar/BrokerConsumerStats.h | 94 - pulsar-client-cpp/include/pulsar/Client.h | 385 -- .../include/pulsar/ClientConfiguration.h | 272 -- .../include/pulsar/CompressionType.h | 33 - .../include/pulsar/ConsoleLoggerFactory.h | 61 - pulsar-client-cpp/include/pulsar/Consumer.h | 417 -- .../include/pulsar/ConsumerConfiguration.h | 522 --- .../pulsar/ConsumerCryptoFailureAction.h | 36 - .../include/pulsar/ConsumerEventListener.h | 49 - .../include/pulsar/ConsumerType.h | 49 - .../include/pulsar/CryptoKeyReader.h | 123 - .../include/pulsar/DeprecatedException.h | 36 - .../include/pulsar/EncryptionKeyInfo.h | 85 - .../include/pulsar/FileLoggerFactory.h | 65 - .../include/pulsar/InitialPosition.h | 30 - .../include/pulsar/KeySharedPolicy.h | 110 - pulsar-client-cpp/include/pulsar/Logger.h | 71 - pulsar-client-cpp/include/pulsar/Message.h | 198 - .../include/pulsar/MessageBatch.h | 49 - .../include/pulsar/MessageBuilder.h | 167 - pulsar-client-cpp/include/pulsar/MessageId.h | 111 - .../include/pulsar/MessageRoutingPolicy.h | 62 - pulsar-client-cpp/include/pulsar/Producer.h | 176 - .../include/pulsar/ProducerConfiguration.h | 537 --- .../pulsar/ProducerCryptoFailureAction.h | 32 - .../include/pulsar/ProtobufNativeSchema.h | 35 - pulsar-client-cpp/include/pulsar/Reader.h | 163 - .../include/pulsar/ReaderConfiguration.h | 302 -- pulsar-client-cpp/include/pulsar/Result.h | 102 - pulsar-client-cpp/include/pulsar/Schema.h | 173 - .../include/pulsar/TopicMetadata.h | 39 - .../include/pulsar/c/authentication.h | 53 - pulsar-client-cpp/include/pulsar/c/client.h | 192 - .../include/pulsar/c/client_configuration.h | 173 - pulsar-client-cpp/include/pulsar/c/consumer.h | 257 -- .../include/pulsar/c/consumer_configuration.h | 316 -- pulsar-client-cpp/include/pulsar/c/message.h | 211 - .../include/pulsar/c/message_id.h | 58 - .../include/pulsar/c/message_router.h | 38 - pulsar-client-cpp/include/pulsar/c/producer.h | 130 - .../include/pulsar/c/producer_configuration.h | 226 - pulsar-client-cpp/include/pulsar/c/reader.h | 75 - .../include/pulsar/c/reader_configuration.h | 94 - pulsar-client-cpp/include/pulsar/c/result.h | 102 - .../include/pulsar/c/string_list.h | 41 - .../include/pulsar/c/string_map.h | 44 - pulsar-client-cpp/include/pulsar/c/version.h | 22 - pulsar-client-cpp/include/pulsar/defines.h | 44 - pulsar-client-cpp/lib/AckGroupingTracker.cc | 73 - pulsar-client-cpp/lib/AckGroupingTracker.h | 113 - .../lib/AckGroupingTrackerDisabled.cc | 43 - .../lib/AckGroupingTrackerDisabled.h | 58 - .../lib/AckGroupingTrackerEnabled.cc | 161 - .../lib/AckGroupingTrackerEnabled.h | 95 - pulsar-client-cpp/lib/Authentication.cc | 228 - pulsar-client-cpp/lib/Backoff.cc | 60 - pulsar-client-cpp/lib/Backoff.h | 48 - .../lib/BatchAcknowledgementTracker.cc | 176 - .../lib/BatchAcknowledgementTracker.h | 104 - .../lib/BatchMessageContainer.cc | 79 - pulsar-client-cpp/lib/BatchMessageContainer.h | 64 - .../lib/BatchMessageContainerBase.cc | 86 - .../lib/BatchMessageContainerBase.h | 193 - .../lib/BatchMessageKeyBasedContainer.cc | 129 - .../lib/BatchMessageKeyBasedContainer.h | 62 - .../lib/BinaryProtoLookupService.cc | 185 - .../lib/BinaryProtoLookupService.h | 77 - pulsar-client-cpp/lib/BlockingQueue.h | 193 - pulsar-client-cpp/lib/BoostHash.cc | 29 - pulsar-client-cpp/lib/BoostHash.h | 40 - pulsar-client-cpp/lib/BrokerConsumerStats.cc | 70 - .../lib/BrokerConsumerStatsImpl.cc | 104 - .../lib/BrokerConsumerStatsImpl.h | 129 - .../lib/BrokerConsumerStatsImplBase.h | 71 - pulsar-client-cpp/lib/CMakeLists.txt | 172 - pulsar-client-cpp/lib/Client.cc | 181 - pulsar-client-cpp/lib/ClientConfiguration.cc | 153 - .../lib/ClientConfigurationImpl.h | 48 - pulsar-client-cpp/lib/ClientConnection.cc | 1711 ------- pulsar-client-cpp/lib/ClientConnection.h | 350 -- pulsar-client-cpp/lib/ClientImpl.cc | 659 --- pulsar-client-cpp/lib/ClientImpl.h | 161 - pulsar-client-cpp/lib/Commands.cc | 816 ---- pulsar-client-cpp/lib/Commands.h | 152 - pulsar-client-cpp/lib/CompressionCodec.cc | 91 - pulsar-client-cpp/lib/CompressionCodec.h | 92 - pulsar-client-cpp/lib/CompressionCodecLZ4.cc | 53 - pulsar-client-cpp/lib/CompressionCodecLZ4.h | 33 - .../lib/CompressionCodecSnappy.cc | 70 - .../lib/CompressionCodecSnappy.h | 31 - pulsar-client-cpp/lib/CompressionCodecZLib.cc | 91 - pulsar-client-cpp/lib/CompressionCodecZLib.h | 39 - pulsar-client-cpp/lib/CompressionCodecZstd.cc | 73 - pulsar-client-cpp/lib/CompressionCodecZstd.h | 31 - pulsar-client-cpp/lib/ConnectionPool.cc | 107 - pulsar-client-cpp/lib/ConnectionPool.h | 83 - pulsar-client-cpp/lib/ConsoleLoggerFactory.cc | 32 - .../lib/ConsoleLoggerFactoryImpl.h | 37 - pulsar-client-cpp/lib/Consumer.cc | 270 -- .../lib/ConsumerConfiguration.cc | 270 -- .../lib/ConsumerConfigurationImpl.h | 59 - pulsar-client-cpp/lib/ConsumerImpl.cc | 1422 ------ pulsar-client-cpp/lib/ConsumerImpl.h | 326 -- pulsar-client-cpp/lib/ConsumerImplBase.h | 67 - pulsar-client-cpp/lib/CryptoKeyReader.cc | 80 - pulsar-client-cpp/lib/DeprecatedException.cc | 26 - pulsar-client-cpp/lib/EncryptionKeyInfo.cc | 38 - .../lib/EncryptionKeyInfoImpl.cc | 35 - pulsar-client-cpp/lib/EncryptionKeyInfoImpl.h | 51 - pulsar-client-cpp/lib/ExecutorService.cc | 138 - pulsar-client-cpp/lib/ExecutorService.h | 105 - pulsar-client-cpp/lib/FileLoggerFactory.cc | 31 - pulsar-client-cpp/lib/FileLoggerFactoryImpl.h | 44 - pulsar-client-cpp/lib/Future.h | 181 - .../lib/GetLastMessageIdResponse.h | 56 - pulsar-client-cpp/lib/HTTPLookupService.cc | 398 -- pulsar-client-cpp/lib/HTTPLookupService.h | 74 - pulsar-client-cpp/lib/HandlerBase.cc | 155 - pulsar-client-cpp/lib/HandlerBase.h | 118 - pulsar-client-cpp/lib/Hash.h | 39 - pulsar-client-cpp/lib/JavaStringHash.cc | 40 - pulsar-client-cpp/lib/JavaStringHash.h | 36 - pulsar-client-cpp/lib/KeySharedPolicy.cc | 83 - pulsar-client-cpp/lib/KeySharedPolicyImpl.h | 32 - pulsar-client-cpp/lib/Latch.cc | 47 - pulsar-client-cpp/lib/Latch.h | 66 - pulsar-client-cpp/lib/Log4CxxLogger.h | 39 - pulsar-client-cpp/lib/Log4cxxLogger.cc | 97 - pulsar-client-cpp/lib/LogUtils.cc | 68 - pulsar-client-cpp/lib/LogUtils.h | 98 - pulsar-client-cpp/lib/LookupDataResult.h | 79 - pulsar-client-cpp/lib/LookupService.h | 79 - pulsar-client-cpp/lib/MapCache.h | 104 - .../lib/MemoryLimitController.cc | 86 - pulsar-client-cpp/lib/MemoryLimitController.h | 47 - pulsar-client-cpp/lib/Message.cc | 227 - .../lib/MessageAndCallbackBatch.cc | 71 - .../lib/MessageAndCallbackBatch.h | 88 - pulsar-client-cpp/lib/MessageBatch.cc | 57 - pulsar-client-cpp/lib/MessageBuilder.cc | 154 - pulsar-client-cpp/lib/MessageCrypto.cc | 518 --- pulsar-client-cpp/lib/MessageCrypto.h | 144 - pulsar-client-cpp/lib/MessageId.cc | 141 - pulsar-client-cpp/lib/MessageIdImpl.h | 49 - pulsar-client-cpp/lib/MessageIdUtil.h | 44 - pulsar-client-cpp/lib/MessageImpl.cc | 105 - pulsar-client-cpp/lib/MessageImpl.h | 89 - pulsar-client-cpp/lib/MessageRouterBase.cc | 40 - pulsar-client-cpp/lib/MessageRouterBase.h | 40 - pulsar-client-cpp/lib/MultiResultCallback.h | 51 - .../lib/MultiTopicsBrokerConsumerStatsImpl.cc | 158 - .../lib/MultiTopicsBrokerConsumerStatsImpl.h | 91 - .../lib/MultiTopicsConsumerImpl.cc | 834 ---- .../lib/MultiTopicsConsumerImpl.h | 162 - pulsar-client-cpp/lib/Murmur3_32Hash.cc | 122 - pulsar-client-cpp/lib/Murmur3_32Hash.h | 52 - pulsar-client-cpp/lib/NamedEntity.cc | 38 - pulsar-client-cpp/lib/NamedEntity.h | 26 - pulsar-client-cpp/lib/NamespaceName.cc | 110 - pulsar-client-cpp/lib/NamespaceName.h | 59 - pulsar-client-cpp/lib/NegativeAcksTracker.cc | 118 - pulsar-client-cpp/lib/NegativeAcksTracker.h | 63 - pulsar-client-cpp/lib/ObjectPool.h | 231 - pulsar-client-cpp/lib/OpSendMsg.h | 65 - .../lib/PartitionedProducerImpl.cc | 449 -- .../lib/PartitionedProducerImpl.h | 124 - .../lib/PatternMultiTopicsConsumerImpl.cc | 237 - .../lib/PatternMultiTopicsConsumerImpl.h | 84 - pulsar-client-cpp/lib/PendingFailures.h | 45 - pulsar-client-cpp/lib/PeriodicTask.cc | 65 - pulsar-client-cpp/lib/PeriodicTask.h | 76 - pulsar-client-cpp/lib/Producer.cc | 122 - .../lib/ProducerConfiguration.cc | 269 -- .../lib/ProducerConfigurationImpl.h | 56 - pulsar-client-cpp/lib/ProducerImpl.cc | 932 ---- pulsar-client-cpp/lib/ProducerImpl.h | 200 - pulsar-client-cpp/lib/ProducerImplBase.h | 51 - pulsar-client-cpp/lib/ProtobufNativeSchema.cc | 83 - pulsar-client-cpp/lib/PulsarScheme.h | 82 - pulsar-client-cpp/lib/Reader.cc | 135 - pulsar-client-cpp/lib/ReaderConfiguration.cc | 155 - .../lib/ReaderConfigurationImpl.h | 43 - pulsar-client-cpp/lib/ReaderImpl.cc | 150 - pulsar-client-cpp/lib/ReaderImpl.h | 81 - pulsar-client-cpp/lib/Result.cc | 177 - .../lib/RetryableLookupService.h | 151 - .../lib/RoundRobinMessageRouter.cc | 90 - .../lib/RoundRobinMessageRouter.h | 53 - pulsar-client-cpp/lib/Schema.cc | 101 - pulsar-client-cpp/lib/Semaphore.cc | 74 - pulsar-client-cpp/lib/Semaphore.h | 48 - pulsar-client-cpp/lib/ServiceNameResolver.h | 59 - pulsar-client-cpp/lib/ServiceURI.cc | 101 - pulsar-client-cpp/lib/ServiceURI.h | 50 - pulsar-client-cpp/lib/ServiceUnitId.h | 27 - pulsar-client-cpp/lib/SharedBuffer.h | 256 - pulsar-client-cpp/lib/SimpleLogger.h | 86 - .../lib/SinglePartitionMessageRouter.cc | 52 - .../lib/SinglePartitionMessageRouter.h | 45 - pulsar-client-cpp/lib/Synchronized.h | 42 - pulsar-client-cpp/lib/SynchronizedHashMap.h | 140 - pulsar-client-cpp/lib/TestUtil.h | 25 - pulsar-client-cpp/lib/TimeUtils.cc | 32 - pulsar-client-cpp/lib/TimeUtils.h | 84 - pulsar-client-cpp/lib/TopicMetadataImpl.cc | 26 - pulsar-client-cpp/lib/TopicMetadataImpl.h | 36 - pulsar-client-cpp/lib/TopicName.cc | 259 -- pulsar-client-cpp/lib/TopicName.h | 82 - .../lib/UnAckedMessageTrackerDisabled.h | 34 - .../lib/UnAckedMessageTrackerEnabled.cc | 167 - .../lib/UnAckedMessageTrackerEnabled.h | 59 - .../lib/UnAckedMessageTrackerInterface.h | 50 - .../lib/UnboundedBlockingQueue.h | 156 - pulsar-client-cpp/lib/Url.cc | 112 - pulsar-client-cpp/lib/Url.h | 57 - pulsar-client-cpp/lib/UtilAllocator.h | 79 - pulsar-client-cpp/lib/Utils.h | 106 - pulsar-client-cpp/lib/VersionInternal.h | 26 - pulsar-client-cpp/lib/auth/AuthAthenz.cc | 95 - pulsar-client-cpp/lib/auth/AuthAthenz.h | 45 - pulsar-client-cpp/lib/auth/AuthBasic.cc | 110 - pulsar-client-cpp/lib/auth/AuthBasic.h | 46 - pulsar-client-cpp/lib/auth/AuthOauth2.cc | 416 -- pulsar-client-cpp/lib/auth/AuthOauth2.h | 101 - pulsar-client-cpp/lib/auth/AuthTls.cc | 61 - pulsar-client-cpp/lib/auth/AuthTls.h | 44 - pulsar-client-cpp/lib/auth/AuthToken.cc | 118 - pulsar-client-cpp/lib/auth/AuthToken.h | 46 - .../lib/auth/athenz/ZTSClient.cc | 391 -- pulsar-client-cpp/lib/auth/athenz/ZTSClient.h | 64 - pulsar-client-cpp/lib/c/cStringList.cc | 34 - pulsar-client-cpp/lib/c/cStringMap.cc | 60 - pulsar-client-cpp/lib/c/c_Authentication.cc | 80 - pulsar-client-cpp/lib/c/c_Client.cc | 243 - .../lib/c/c_ClientConfiguration.cc | 160 - pulsar-client-cpp/lib/c/c_Consumer.cc | 164 - .../lib/c/c_ConsumerConfiguration.cc | 219 - pulsar-client-cpp/lib/c/c_Message.cc | 128 - pulsar-client-cpp/lib/c/c_MessageId.cc | 78 - pulsar-client-cpp/lib/c/c_MessageRouter.cc | 26 - pulsar-client-cpp/lib/c/c_Producer.cc | 77 - .../lib/c/c_ProducerConfiguration.cc | 234 - pulsar-client-cpp/lib/c/c_Reader.cc | 63 - .../lib/c/c_ReaderConfiguration.cc | 88 - pulsar-client-cpp/lib/c/c_Result.cc | 23 - pulsar-client-cpp/lib/c/c_structs.h | 90 - .../lib/checksum/ChecksumProvider.cc | 75 - .../lib/checksum/ChecksumProvider.h | 33 - pulsar-client-cpp/lib/checksum/crc32c_arm.cc | 209 - pulsar-client-cpp/lib/checksum/crc32c_arm.h | 65 - .../lib/checksum/crc32c_sse42.cc | 272 -- pulsar-client-cpp/lib/checksum/crc32c_sse42.h | 47 - pulsar-client-cpp/lib/checksum/crc32c_sw.cc | 102 - pulsar-client-cpp/lib/checksum/crc32c_sw.h | 27 - pulsar-client-cpp/lib/checksum/gf2.hpp | 203 - pulsar-client-cpp/lib/checksum/int_types.h | 43 - pulsar-client-cpp/lib/lz4/lz4.cc | 1533 ------ pulsar-client-cpp/lib/lz4/lz4.h | 405 -- .../lib/stats/ConsumerStatsBase.h | 38 - .../lib/stats/ConsumerStatsDisabled.h | 35 - .../lib/stats/ConsumerStatsImpl.cc | 111 - .../lib/stats/ConsumerStatsImpl.h | 81 - .../lib/stats/ProducerStatsBase.h | 37 - .../lib/stats/ProducerStatsDisabled.h | 31 - .../lib/stats/ProducerStatsImpl.cc | 125 - .../lib/stats/ProducerStatsImpl.h | 108 - pulsar-client-cpp/log4cxx.conf | 32 - pulsar-client-cpp/perf/CMakeLists.txt | 37 - pulsar-client-cpp/perf/PerfConsumer.cc | 354 -- pulsar-client-cpp/perf/PerfProducer.cc | 430 -- pulsar-client-cpp/perf/RateLimiter.h | 90 - pulsar-client-cpp/pkg/apk/.gitignore | 4 - pulsar-client-cpp/pkg/apk/APKBUILD | 57 - pulsar-client-cpp/pkg/apk/build-apk.sh | 42 - pulsar-client-cpp/pkg/apk/docker-build-apk.sh | 29 - pulsar-client-cpp/pkg/deb/.gitignore | 1 - pulsar-client-cpp/pkg/deb/Dockerfile | 91 - pulsar-client-cpp/pkg/deb/build-deb.sh | 104 - pulsar-client-cpp/pkg/deb/docker-build-deb.sh | 34 - .../pkg/licenses/LICENSE-boost.txt | 23 - .../pkg/licenses/LICENSE-jsoncpp.txt | 55 - .../pkg/licenses/LICENSE-libcurl.txt | 22 - .../pkg/licenses/LICENSE-protobuf.txt | 32 - .../pkg/licenses/LICENSE-zlib.txt | 23 - pulsar-client-cpp/pkg/licenses/LICENSE.txt | 214 - pulsar-client-cpp/pkg/rpm/.gitignore | 5 - pulsar-client-cpp/pkg/rpm/Dockerfile | 92 - .../pkg/rpm/SPECS/pulsar-client.spec | 97 - pulsar-client-cpp/pkg/rpm/build-rpm.sh | 43 - pulsar-client-cpp/pkg/rpm/docker-build-rpm.sh | 34 - .../pulsar-test-service-start.sh | 125 - pulsar-client-cpp/pulsar-test-service-stop.sh | 26 - pulsar-client-cpp/python/.gitignore | 4 - pulsar-client-cpp/python/CMakeLists.txt | 103 - pulsar-client-cpp/python/build-mac-wheels.sh | 300 -- .../python/custom_logger_test.py | 54 - .../python/examples/company.avsc | 21 - .../python/examples/rpc_client.py | 80 - .../python/examples/rpc_server.py | 63 - pulsar-client-cpp/python/pulsar/__init__.py | 1428 ------ pulsar-client-cpp/python/pulsar/exceptions.py | 28 - .../python/pulsar/functions/__init__.py | 20 - .../python/pulsar/functions/context.py | 191 - .../python/pulsar/functions/function.py | 51 - .../python/pulsar/functions/serde.py | 87 - .../python/pulsar/schema/__init__.py | 24 - .../python/pulsar/schema/definition.py | 515 --- .../python/pulsar/schema/schema.py | 111 - .../python/pulsar/schema/schema_avro.py | 96 - pulsar-client-cpp/python/pulsar_test.py | 1341 ------ pulsar-client-cpp/python/schema_test.py | 1291 ------ pulsar-client-cpp/python/setup.py | 117 - .../python/src/authentication.cc | 119 - pulsar-client-cpp/python/src/client.cc | 118 - pulsar-client-cpp/python/src/config.cc | 300 -- pulsar-client-cpp/python/src/consumer.cc | 120 - .../python/src/cryptoKeyReader.cc | 32 - pulsar-client-cpp/python/src/enums.cc | 114 - pulsar-client-cpp/python/src/exceptions.cc | 112 - pulsar-client-cpp/python/src/message.cc | 171 - pulsar-client-cpp/python/src/producer.cc | 102 - pulsar-client-cpp/python/src/pulsar.cc | 59 - pulsar-client-cpp/python/src/reader.cc | 98 - pulsar-client-cpp/python/src/schema.cc | 28 - pulsar-client-cpp/python/src/utils.cc | 47 - pulsar-client-cpp/python/src/utils.h | 104 - pulsar-client-cpp/python/test_consumer.py | 36 - pulsar-client-cpp/python/test_producer.py | 46 - pulsar-client-cpp/run-unit-tests.sh | 99 - pulsar-client-cpp/templates/Version.h.in | 28 - pulsar-client-cpp/test-conf/.htpasswd | 1 - pulsar-client-cpp/test-conf/client-ssl.conf | 26 - pulsar-client-cpp/test-conf/client.conf | 27 - .../test-conf/standalone-ssl.conf | 309 -- pulsar-client-cpp/test-conf/standalone.conf | 291 -- pulsar-client-cpp/tests/AuthBasicTest.cc | 140 - pulsar-client-cpp/tests/AuthPluginTest.cc | 485 -- pulsar-client-cpp/tests/AuthTokenTest.cc | 200 - pulsar-client-cpp/tests/BackoffTest.cc | 147 - pulsar-client-cpp/tests/BasicEndToEndTest.cc | 4106 ----------------- pulsar-client-cpp/tests/BatchMessageTest.cc | 1151 ----- pulsar-client-cpp/tests/BlockingQueueTest.cc | 237 - pulsar-client-cpp/tests/CMakeLists.txt | 61 - .../tests/ClientDeduplicationTest.cc | 152 - pulsar-client-cpp/tests/ClientTest.cc | 297 -- .../tests/CompressionCodecSnappyTest.cc | 38 - .../tests/ConsumerConfigurationTest.cc | 310 -- pulsar-client-cpp/tests/ConsumerStatsTest.cc | 319 -- pulsar-client-cpp/tests/ConsumerTest.cc | 934 ---- pulsar-client-cpp/tests/ConsumerTest.h | 31 - pulsar-client-cpp/tests/CustomLoggerTest.cc | 107 - pulsar-client-cpp/tests/CustomRoutingPolicy.h | 50 - pulsar-client-cpp/tests/HashTest.cc | 75 - pulsar-client-cpp/tests/HttpHelper.cc | 55 - pulsar-client-cpp/tests/HttpHelper.h | 28 - .../tests/KeyBasedBatchingTest.cc | 211 - .../tests/KeySharedConsumerTest.cc | 237 - .../tests/KeySharedPolicyTest.cc | 203 - pulsar-client-cpp/tests/LatchTest.cc | 85 - pulsar-client-cpp/tests/LogHelper.h | 39 - pulsar-client-cpp/tests/LoggerTest.cc | 28 - pulsar-client-cpp/tests/LookupServiceTest.cc | 274 -- pulsar-client-cpp/tests/MapCacheTest.cc | 78 - .../tests/MemoryLimitControllerTest.cc | 130 - pulsar-client-cpp/tests/MemoryLimitTest.cc | 163 - .../tests/MessageChunkingTest.cc | 137 - pulsar-client-cpp/tests/MessageIdTest.cc | 59 - pulsar-client-cpp/tests/MessageTest.cc | 101 - pulsar-client-cpp/tests/NamespaceNameTest.cc | 44 - .../tests/NoOpsCryptoKeyReader.h | 36 - pulsar-client-cpp/tests/PaddingDemo.proto | 26 - .../tests/PartitionsUpdateTest.cc | 185 - pulsar-client-cpp/tests/PeriodicTaskTest.cc | 75 - .../tests/ProducerConfigurationTest.cc | 136 - pulsar-client-cpp/tests/ProducerTest.cc | 298 -- pulsar-client-cpp/tests/PromiseTest.cc | 84 - .../tests/ProtobufNativeSchemaTest.cc | 146 - pulsar-client-cpp/tests/PulsarFriend.h | 133 - .../tests/ReaderConfigurationTest.cc | 126 - pulsar-client-cpp/tests/ReaderTest.cc | 608 --- .../tests/RoundRobinMessageRouterTest.cc | 165 - pulsar-client-cpp/tests/SchemaTest.cc | 109 - pulsar-client-cpp/tests/SemaphoreTest.cc | 150 - pulsar-client-cpp/tests/ServiceURITest.cc | 75 - .../tests/SinglePartitionMessageRouterTest.cc | 73 - .../tests/SynchronizedHashMapTest.cc | 132 - .../tests/TopicMetadataImplTest.cc | 29 - pulsar-client-cpp/tests/TopicNameTest.cc | 181 - .../tests/UnboundedBlockingQueueTest.cc | 178 - pulsar-client-cpp/tests/UrlTest.cc | 87 - pulsar-client-cpp/tests/VersionTest.cc | 29 - pulsar-client-cpp/tests/WaitUtils.h | 43 - .../tests/ZLibCompressionTest.cc | 65 - pulsar-client-cpp/tests/ZTSClientTest.cc | 79 - pulsar-client-cpp/tests/ZeroQueueSizeTest.cc | 286 -- pulsar-client-cpp/tests/authentication.conf | 288 -- .../tests/c/c_BasicEndToEndTest.cc | 122 - .../tests/c/c_ConsumerConfigurationTest.cc | 34 - .../tests/c/c_ProducerConfigurationTest.cc | 28 - pulsar-client-cpp/tests/client.conf | 27 - pulsar-client-cpp/tests/main.cc | 25 - pulsar-client-cpp/tests/mocks/GMockMessage.h | 36 - pulsar-client-cpp/tests/standalone.conf | 289 -- pulsar-client-cpp/vcpkg.json | 30 - pulsar-client-cpp/wireshark/CMakeLists.txt | 83 - pulsar-client-cpp/wireshark/README.md | 110 - .../wireshark/pulsarDissector.cc | 1227 ----- pulsar-functions/instance/pom.xml | 3 - .../instance/src/main/python/util.py | 11 +- src/stage-release.sh | 6 - 463 files changed, 27 insertions(+), 70723 deletions(-) delete mode 100644 .github/workflows/ci-cpp-build.yaml delete mode 100644 pulsar-client-cpp/.clang-format delete mode 100644 pulsar-client-cpp/.gitignore delete mode 100644 pulsar-client-cpp/CMakeLists.txt delete mode 100644 pulsar-client-cpp/Doxyfile delete mode 100644 pulsar-client-cpp/README.md delete mode 100644 pulsar-client-cpp/build-support/clang_format_exclusions.txt delete mode 100755 pulsar-client-cpp/build-support/merge_archives.sh delete mode 100755 pulsar-client-cpp/build-support/run_clang_format.py delete mode 100644 pulsar-client-cpp/cmake_modules/FindClangTools.cmake delete mode 100755 pulsar-client-cpp/docker-build-centos7.sh delete mode 100755 pulsar-client-cpp/docker-build-python3.9.sh delete mode 100755 pulsar-client-cpp/docker-build.sh delete mode 100755 pulsar-client-cpp/docker-format.sh delete mode 100755 pulsar-client-cpp/docker-tests.sh delete mode 100644 pulsar-client-cpp/docker/alpine/Dockerfile delete mode 100644 pulsar-client-cpp/docker/alpine/Dockerfile-alpine-3.8 delete mode 100755 pulsar-client-cpp/docker/alpine/build-alpine-image.sh delete mode 100755 pulsar-client-cpp/docker/alpine/build-wheel-file-within-docker.sh delete mode 100755 pulsar-client-cpp/docker/alpine/build-wheel.sh delete mode 100755 pulsar-client-cpp/docker/build-client-lib-within-docker.sh delete mode 100755 pulsar-client-cpp/docker/build-client-lib.sh delete mode 100755 pulsar-client-cpp/docker/build-wheel-file-within-docker.sh delete mode 100755 pulsar-client-cpp/docker/build-wheels.sh delete mode 100644 pulsar-client-cpp/docker/centos-7/Dockerfile delete mode 100755 pulsar-client-cpp/docker/create-images.sh delete mode 100644 pulsar-client-cpp/docker/manylinux1/Dockerfile delete mode 100644 pulsar-client-cpp/docker/manylinux2014/Dockerfile delete mode 100644 pulsar-client-cpp/docker/manylinux_musl/Dockerfile delete mode 100755 pulsar-client-cpp/docker/push-images.sh delete mode 100644 pulsar-client-cpp/docker/python-versions.sh delete mode 100644 pulsar-client-cpp/docs/MainPage.md delete mode 100644 pulsar-client-cpp/eclipse-formatter.xml delete mode 100644 pulsar-client-cpp/examples/CMakeLists.txt delete mode 100644 pulsar-client-cpp/examples/SampleAsyncConsumerCApi.c delete mode 100644 pulsar-client-cpp/examples/SampleAsyncProducer.cc delete mode 100644 pulsar-client-cpp/examples/SampleConsumer.cc delete mode 100644 pulsar-client-cpp/examples/SampleConsumerCApi.c delete mode 100644 pulsar-client-cpp/examples/SampleConsumerListener.cc delete mode 100644 pulsar-client-cpp/examples/SampleConsumerListenerCApi.c delete mode 100644 pulsar-client-cpp/examples/SampleFileLogger.cc delete mode 100644 pulsar-client-cpp/examples/SampleProducer.cc delete mode 100644 pulsar-client-cpp/examples/SampleProducerCApi.c delete mode 100644 pulsar-client-cpp/examples/SampleReaderCApi.c delete mode 100644 pulsar-client-cpp/include/pulsar/Authentication.h delete mode 100644 pulsar-client-cpp/include/pulsar/BrokerConsumerStats.h delete mode 100644 pulsar-client-cpp/include/pulsar/Client.h delete mode 100644 pulsar-client-cpp/include/pulsar/ClientConfiguration.h delete mode 100644 pulsar-client-cpp/include/pulsar/CompressionType.h delete mode 100644 pulsar-client-cpp/include/pulsar/ConsoleLoggerFactory.h delete mode 100644 pulsar-client-cpp/include/pulsar/Consumer.h delete mode 100644 pulsar-client-cpp/include/pulsar/ConsumerConfiguration.h delete mode 100644 pulsar-client-cpp/include/pulsar/ConsumerCryptoFailureAction.h delete mode 100644 pulsar-client-cpp/include/pulsar/ConsumerEventListener.h delete mode 100644 pulsar-client-cpp/include/pulsar/ConsumerType.h delete mode 100644 pulsar-client-cpp/include/pulsar/CryptoKeyReader.h delete mode 100644 pulsar-client-cpp/include/pulsar/DeprecatedException.h delete mode 100644 pulsar-client-cpp/include/pulsar/EncryptionKeyInfo.h delete mode 100644 pulsar-client-cpp/include/pulsar/FileLoggerFactory.h delete mode 100644 pulsar-client-cpp/include/pulsar/InitialPosition.h delete mode 100644 pulsar-client-cpp/include/pulsar/KeySharedPolicy.h delete mode 100644 pulsar-client-cpp/include/pulsar/Logger.h delete mode 100644 pulsar-client-cpp/include/pulsar/Message.h delete mode 100644 pulsar-client-cpp/include/pulsar/MessageBatch.h delete mode 100644 pulsar-client-cpp/include/pulsar/MessageBuilder.h delete mode 100644 pulsar-client-cpp/include/pulsar/MessageId.h delete mode 100644 pulsar-client-cpp/include/pulsar/MessageRoutingPolicy.h delete mode 100644 pulsar-client-cpp/include/pulsar/Producer.h delete mode 100644 pulsar-client-cpp/include/pulsar/ProducerConfiguration.h delete mode 100644 pulsar-client-cpp/include/pulsar/ProducerCryptoFailureAction.h delete mode 100644 pulsar-client-cpp/include/pulsar/ProtobufNativeSchema.h delete mode 100644 pulsar-client-cpp/include/pulsar/Reader.h delete mode 100644 pulsar-client-cpp/include/pulsar/ReaderConfiguration.h delete mode 100644 pulsar-client-cpp/include/pulsar/Result.h delete mode 100644 pulsar-client-cpp/include/pulsar/Schema.h delete mode 100644 pulsar-client-cpp/include/pulsar/TopicMetadata.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/authentication.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/client.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/client_configuration.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/consumer.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/consumer_configuration.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/message.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/message_id.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/message_router.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/producer.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/producer_configuration.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/reader.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/reader_configuration.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/result.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/string_list.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/string_map.h delete mode 100644 pulsar-client-cpp/include/pulsar/c/version.h delete mode 100644 pulsar-client-cpp/include/pulsar/defines.h delete mode 100644 pulsar-client-cpp/lib/AckGroupingTracker.cc delete mode 100644 pulsar-client-cpp/lib/AckGroupingTracker.h delete mode 100644 pulsar-client-cpp/lib/AckGroupingTrackerDisabled.cc delete mode 100644 pulsar-client-cpp/lib/AckGroupingTrackerDisabled.h delete mode 100644 pulsar-client-cpp/lib/AckGroupingTrackerEnabled.cc delete mode 100644 pulsar-client-cpp/lib/AckGroupingTrackerEnabled.h delete mode 100644 pulsar-client-cpp/lib/Authentication.cc delete mode 100644 pulsar-client-cpp/lib/Backoff.cc delete mode 100644 pulsar-client-cpp/lib/Backoff.h delete mode 100644 pulsar-client-cpp/lib/BatchAcknowledgementTracker.cc delete mode 100644 pulsar-client-cpp/lib/BatchAcknowledgementTracker.h delete mode 100644 pulsar-client-cpp/lib/BatchMessageContainer.cc delete mode 100644 pulsar-client-cpp/lib/BatchMessageContainer.h delete mode 100644 pulsar-client-cpp/lib/BatchMessageContainerBase.cc delete mode 100644 pulsar-client-cpp/lib/BatchMessageContainerBase.h delete mode 100644 pulsar-client-cpp/lib/BatchMessageKeyBasedContainer.cc delete mode 100644 pulsar-client-cpp/lib/BatchMessageKeyBasedContainer.h delete mode 100644 pulsar-client-cpp/lib/BinaryProtoLookupService.cc delete mode 100644 pulsar-client-cpp/lib/BinaryProtoLookupService.h delete mode 100644 pulsar-client-cpp/lib/BlockingQueue.h delete mode 100644 pulsar-client-cpp/lib/BoostHash.cc delete mode 100644 pulsar-client-cpp/lib/BoostHash.h delete mode 100644 pulsar-client-cpp/lib/BrokerConsumerStats.cc delete mode 100644 pulsar-client-cpp/lib/BrokerConsumerStatsImpl.cc delete mode 100644 pulsar-client-cpp/lib/BrokerConsumerStatsImpl.h delete mode 100644 pulsar-client-cpp/lib/BrokerConsumerStatsImplBase.h delete mode 100644 pulsar-client-cpp/lib/CMakeLists.txt delete mode 100644 pulsar-client-cpp/lib/Client.cc delete mode 100644 pulsar-client-cpp/lib/ClientConfiguration.cc delete mode 100644 pulsar-client-cpp/lib/ClientConfigurationImpl.h delete mode 100644 pulsar-client-cpp/lib/ClientConnection.cc delete mode 100644 pulsar-client-cpp/lib/ClientConnection.h delete mode 100644 pulsar-client-cpp/lib/ClientImpl.cc delete mode 100644 pulsar-client-cpp/lib/ClientImpl.h delete mode 100644 pulsar-client-cpp/lib/Commands.cc delete mode 100644 pulsar-client-cpp/lib/Commands.h delete mode 100644 pulsar-client-cpp/lib/CompressionCodec.cc delete mode 100644 pulsar-client-cpp/lib/CompressionCodec.h delete mode 100644 pulsar-client-cpp/lib/CompressionCodecLZ4.cc delete mode 100644 pulsar-client-cpp/lib/CompressionCodecLZ4.h delete mode 100644 pulsar-client-cpp/lib/CompressionCodecSnappy.cc delete mode 100644 pulsar-client-cpp/lib/CompressionCodecSnappy.h delete mode 100644 pulsar-client-cpp/lib/CompressionCodecZLib.cc delete mode 100644 pulsar-client-cpp/lib/CompressionCodecZLib.h delete mode 100644 pulsar-client-cpp/lib/CompressionCodecZstd.cc delete mode 100644 pulsar-client-cpp/lib/CompressionCodecZstd.h delete mode 100644 pulsar-client-cpp/lib/ConnectionPool.cc delete mode 100644 pulsar-client-cpp/lib/ConnectionPool.h delete mode 100644 pulsar-client-cpp/lib/ConsoleLoggerFactory.cc delete mode 100644 pulsar-client-cpp/lib/ConsoleLoggerFactoryImpl.h delete mode 100644 pulsar-client-cpp/lib/Consumer.cc delete mode 100644 pulsar-client-cpp/lib/ConsumerConfiguration.cc delete mode 100644 pulsar-client-cpp/lib/ConsumerConfigurationImpl.h delete mode 100644 pulsar-client-cpp/lib/ConsumerImpl.cc delete mode 100644 pulsar-client-cpp/lib/ConsumerImpl.h delete mode 100644 pulsar-client-cpp/lib/ConsumerImplBase.h delete mode 100644 pulsar-client-cpp/lib/CryptoKeyReader.cc delete mode 100644 pulsar-client-cpp/lib/DeprecatedException.cc delete mode 100644 pulsar-client-cpp/lib/EncryptionKeyInfo.cc delete mode 100644 pulsar-client-cpp/lib/EncryptionKeyInfoImpl.cc delete mode 100644 pulsar-client-cpp/lib/EncryptionKeyInfoImpl.h delete mode 100644 pulsar-client-cpp/lib/ExecutorService.cc delete mode 100644 pulsar-client-cpp/lib/ExecutorService.h delete mode 100644 pulsar-client-cpp/lib/FileLoggerFactory.cc delete mode 100644 pulsar-client-cpp/lib/FileLoggerFactoryImpl.h delete mode 100644 pulsar-client-cpp/lib/Future.h delete mode 100644 pulsar-client-cpp/lib/GetLastMessageIdResponse.h delete mode 100644 pulsar-client-cpp/lib/HTTPLookupService.cc delete mode 100644 pulsar-client-cpp/lib/HTTPLookupService.h delete mode 100644 pulsar-client-cpp/lib/HandlerBase.cc delete mode 100644 pulsar-client-cpp/lib/HandlerBase.h delete mode 100644 pulsar-client-cpp/lib/Hash.h delete mode 100644 pulsar-client-cpp/lib/JavaStringHash.cc delete mode 100644 pulsar-client-cpp/lib/JavaStringHash.h delete mode 100644 pulsar-client-cpp/lib/KeySharedPolicy.cc delete mode 100644 pulsar-client-cpp/lib/KeySharedPolicyImpl.h delete mode 100644 pulsar-client-cpp/lib/Latch.cc delete mode 100644 pulsar-client-cpp/lib/Latch.h delete mode 100644 pulsar-client-cpp/lib/Log4CxxLogger.h delete mode 100644 pulsar-client-cpp/lib/Log4cxxLogger.cc delete mode 100644 pulsar-client-cpp/lib/LogUtils.cc delete mode 100644 pulsar-client-cpp/lib/LogUtils.h delete mode 100644 pulsar-client-cpp/lib/LookupDataResult.h delete mode 100644 pulsar-client-cpp/lib/LookupService.h delete mode 100644 pulsar-client-cpp/lib/MapCache.h delete mode 100644 pulsar-client-cpp/lib/MemoryLimitController.cc delete mode 100644 pulsar-client-cpp/lib/MemoryLimitController.h delete mode 100644 pulsar-client-cpp/lib/Message.cc delete mode 100644 pulsar-client-cpp/lib/MessageAndCallbackBatch.cc delete mode 100644 pulsar-client-cpp/lib/MessageAndCallbackBatch.h delete mode 100644 pulsar-client-cpp/lib/MessageBatch.cc delete mode 100644 pulsar-client-cpp/lib/MessageBuilder.cc delete mode 100644 pulsar-client-cpp/lib/MessageCrypto.cc delete mode 100644 pulsar-client-cpp/lib/MessageCrypto.h delete mode 100644 pulsar-client-cpp/lib/MessageId.cc delete mode 100644 pulsar-client-cpp/lib/MessageIdImpl.h delete mode 100644 pulsar-client-cpp/lib/MessageIdUtil.h delete mode 100644 pulsar-client-cpp/lib/MessageImpl.cc delete mode 100644 pulsar-client-cpp/lib/MessageImpl.h delete mode 100644 pulsar-client-cpp/lib/MessageRouterBase.cc delete mode 100644 pulsar-client-cpp/lib/MessageRouterBase.h delete mode 100644 pulsar-client-cpp/lib/MultiResultCallback.h delete mode 100644 pulsar-client-cpp/lib/MultiTopicsBrokerConsumerStatsImpl.cc delete mode 100644 pulsar-client-cpp/lib/MultiTopicsBrokerConsumerStatsImpl.h delete mode 100644 pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc delete mode 100644 pulsar-client-cpp/lib/MultiTopicsConsumerImpl.h delete mode 100644 pulsar-client-cpp/lib/Murmur3_32Hash.cc delete mode 100644 pulsar-client-cpp/lib/Murmur3_32Hash.h delete mode 100644 pulsar-client-cpp/lib/NamedEntity.cc delete mode 100644 pulsar-client-cpp/lib/NamedEntity.h delete mode 100644 pulsar-client-cpp/lib/NamespaceName.cc delete mode 100644 pulsar-client-cpp/lib/NamespaceName.h delete mode 100644 pulsar-client-cpp/lib/NegativeAcksTracker.cc delete mode 100644 pulsar-client-cpp/lib/NegativeAcksTracker.h delete mode 100644 pulsar-client-cpp/lib/ObjectPool.h delete mode 100644 pulsar-client-cpp/lib/OpSendMsg.h delete mode 100644 pulsar-client-cpp/lib/PartitionedProducerImpl.cc delete mode 100644 pulsar-client-cpp/lib/PartitionedProducerImpl.h delete mode 100644 pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.cc delete mode 100644 pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.h delete mode 100644 pulsar-client-cpp/lib/PendingFailures.h delete mode 100644 pulsar-client-cpp/lib/PeriodicTask.cc delete mode 100644 pulsar-client-cpp/lib/PeriodicTask.h delete mode 100644 pulsar-client-cpp/lib/Producer.cc delete mode 100644 pulsar-client-cpp/lib/ProducerConfiguration.cc delete mode 100644 pulsar-client-cpp/lib/ProducerConfigurationImpl.h delete mode 100644 pulsar-client-cpp/lib/ProducerImpl.cc delete mode 100644 pulsar-client-cpp/lib/ProducerImpl.h delete mode 100644 pulsar-client-cpp/lib/ProducerImplBase.h delete mode 100644 pulsar-client-cpp/lib/ProtobufNativeSchema.cc delete mode 100644 pulsar-client-cpp/lib/PulsarScheme.h delete mode 100644 pulsar-client-cpp/lib/Reader.cc delete mode 100644 pulsar-client-cpp/lib/ReaderConfiguration.cc delete mode 100644 pulsar-client-cpp/lib/ReaderConfigurationImpl.h delete mode 100644 pulsar-client-cpp/lib/ReaderImpl.cc delete mode 100644 pulsar-client-cpp/lib/ReaderImpl.h delete mode 100644 pulsar-client-cpp/lib/Result.cc delete mode 100644 pulsar-client-cpp/lib/RetryableLookupService.h delete mode 100644 pulsar-client-cpp/lib/RoundRobinMessageRouter.cc delete mode 100644 pulsar-client-cpp/lib/RoundRobinMessageRouter.h delete mode 100644 pulsar-client-cpp/lib/Schema.cc delete mode 100644 pulsar-client-cpp/lib/Semaphore.cc delete mode 100644 pulsar-client-cpp/lib/Semaphore.h delete mode 100644 pulsar-client-cpp/lib/ServiceNameResolver.h delete mode 100644 pulsar-client-cpp/lib/ServiceURI.cc delete mode 100644 pulsar-client-cpp/lib/ServiceURI.h delete mode 100644 pulsar-client-cpp/lib/ServiceUnitId.h delete mode 100644 pulsar-client-cpp/lib/SharedBuffer.h delete mode 100644 pulsar-client-cpp/lib/SimpleLogger.h delete mode 100644 pulsar-client-cpp/lib/SinglePartitionMessageRouter.cc delete mode 100644 pulsar-client-cpp/lib/SinglePartitionMessageRouter.h delete mode 100644 pulsar-client-cpp/lib/Synchronized.h delete mode 100644 pulsar-client-cpp/lib/SynchronizedHashMap.h delete mode 100644 pulsar-client-cpp/lib/TestUtil.h delete mode 100644 pulsar-client-cpp/lib/TimeUtils.cc delete mode 100644 pulsar-client-cpp/lib/TimeUtils.h delete mode 100644 pulsar-client-cpp/lib/TopicMetadataImpl.cc delete mode 100644 pulsar-client-cpp/lib/TopicMetadataImpl.h delete mode 100644 pulsar-client-cpp/lib/TopicName.cc delete mode 100644 pulsar-client-cpp/lib/TopicName.h delete mode 100644 pulsar-client-cpp/lib/UnAckedMessageTrackerDisabled.h delete mode 100644 pulsar-client-cpp/lib/UnAckedMessageTrackerEnabled.cc delete mode 100644 pulsar-client-cpp/lib/UnAckedMessageTrackerEnabled.h delete mode 100644 pulsar-client-cpp/lib/UnAckedMessageTrackerInterface.h delete mode 100644 pulsar-client-cpp/lib/UnboundedBlockingQueue.h delete mode 100644 pulsar-client-cpp/lib/Url.cc delete mode 100644 pulsar-client-cpp/lib/Url.h delete mode 100644 pulsar-client-cpp/lib/UtilAllocator.h delete mode 100644 pulsar-client-cpp/lib/Utils.h delete mode 100644 pulsar-client-cpp/lib/VersionInternal.h delete mode 100644 pulsar-client-cpp/lib/auth/AuthAthenz.cc delete mode 100644 pulsar-client-cpp/lib/auth/AuthAthenz.h delete mode 100644 pulsar-client-cpp/lib/auth/AuthBasic.cc delete mode 100644 pulsar-client-cpp/lib/auth/AuthBasic.h delete mode 100644 pulsar-client-cpp/lib/auth/AuthOauth2.cc delete mode 100644 pulsar-client-cpp/lib/auth/AuthOauth2.h delete mode 100644 pulsar-client-cpp/lib/auth/AuthTls.cc delete mode 100644 pulsar-client-cpp/lib/auth/AuthTls.h delete mode 100644 pulsar-client-cpp/lib/auth/AuthToken.cc delete mode 100644 pulsar-client-cpp/lib/auth/AuthToken.h delete mode 100644 pulsar-client-cpp/lib/auth/athenz/ZTSClient.cc delete mode 100644 pulsar-client-cpp/lib/auth/athenz/ZTSClient.h delete mode 100644 pulsar-client-cpp/lib/c/cStringList.cc delete mode 100644 pulsar-client-cpp/lib/c/cStringMap.cc delete mode 100644 pulsar-client-cpp/lib/c/c_Authentication.cc delete mode 100644 pulsar-client-cpp/lib/c/c_Client.cc delete mode 100644 pulsar-client-cpp/lib/c/c_ClientConfiguration.cc delete mode 100644 pulsar-client-cpp/lib/c/c_Consumer.cc delete mode 100644 pulsar-client-cpp/lib/c/c_ConsumerConfiguration.cc delete mode 100644 pulsar-client-cpp/lib/c/c_Message.cc delete mode 100644 pulsar-client-cpp/lib/c/c_MessageId.cc delete mode 100644 pulsar-client-cpp/lib/c/c_MessageRouter.cc delete mode 100644 pulsar-client-cpp/lib/c/c_Producer.cc delete mode 100644 pulsar-client-cpp/lib/c/c_ProducerConfiguration.cc delete mode 100644 pulsar-client-cpp/lib/c/c_Reader.cc delete mode 100644 pulsar-client-cpp/lib/c/c_ReaderConfiguration.cc delete mode 100644 pulsar-client-cpp/lib/c/c_Result.cc delete mode 100644 pulsar-client-cpp/lib/c/c_structs.h delete mode 100644 pulsar-client-cpp/lib/checksum/ChecksumProvider.cc delete mode 100644 pulsar-client-cpp/lib/checksum/ChecksumProvider.h delete mode 100644 pulsar-client-cpp/lib/checksum/crc32c_arm.cc delete mode 100644 pulsar-client-cpp/lib/checksum/crc32c_arm.h delete mode 100644 pulsar-client-cpp/lib/checksum/crc32c_sse42.cc delete mode 100644 pulsar-client-cpp/lib/checksum/crc32c_sse42.h delete mode 100644 pulsar-client-cpp/lib/checksum/crc32c_sw.cc delete mode 100644 pulsar-client-cpp/lib/checksum/crc32c_sw.h delete mode 100644 pulsar-client-cpp/lib/checksum/gf2.hpp delete mode 100644 pulsar-client-cpp/lib/checksum/int_types.h delete mode 100644 pulsar-client-cpp/lib/lz4/lz4.cc delete mode 100644 pulsar-client-cpp/lib/lz4/lz4.h delete mode 100644 pulsar-client-cpp/lib/stats/ConsumerStatsBase.h delete mode 100644 pulsar-client-cpp/lib/stats/ConsumerStatsDisabled.h delete mode 100644 pulsar-client-cpp/lib/stats/ConsumerStatsImpl.cc delete mode 100644 pulsar-client-cpp/lib/stats/ConsumerStatsImpl.h delete mode 100644 pulsar-client-cpp/lib/stats/ProducerStatsBase.h delete mode 100644 pulsar-client-cpp/lib/stats/ProducerStatsDisabled.h delete mode 100644 pulsar-client-cpp/lib/stats/ProducerStatsImpl.cc delete mode 100644 pulsar-client-cpp/lib/stats/ProducerStatsImpl.h delete mode 100644 pulsar-client-cpp/log4cxx.conf delete mode 100644 pulsar-client-cpp/perf/CMakeLists.txt delete mode 100644 pulsar-client-cpp/perf/PerfConsumer.cc delete mode 100644 pulsar-client-cpp/perf/PerfProducer.cc delete mode 100644 pulsar-client-cpp/perf/RateLimiter.h delete mode 100644 pulsar-client-cpp/pkg/apk/.gitignore delete mode 100644 pulsar-client-cpp/pkg/apk/APKBUILD delete mode 100755 pulsar-client-cpp/pkg/apk/build-apk.sh delete mode 100755 pulsar-client-cpp/pkg/apk/docker-build-apk.sh delete mode 100644 pulsar-client-cpp/pkg/deb/.gitignore delete mode 100644 pulsar-client-cpp/pkg/deb/Dockerfile delete mode 100755 pulsar-client-cpp/pkg/deb/build-deb.sh delete mode 100755 pulsar-client-cpp/pkg/deb/docker-build-deb.sh delete mode 100644 pulsar-client-cpp/pkg/licenses/LICENSE-boost.txt delete mode 100644 pulsar-client-cpp/pkg/licenses/LICENSE-jsoncpp.txt delete mode 100644 pulsar-client-cpp/pkg/licenses/LICENSE-libcurl.txt delete mode 100644 pulsar-client-cpp/pkg/licenses/LICENSE-protobuf.txt delete mode 100644 pulsar-client-cpp/pkg/licenses/LICENSE-zlib.txt delete mode 100644 pulsar-client-cpp/pkg/licenses/LICENSE.txt delete mode 100644 pulsar-client-cpp/pkg/rpm/.gitignore delete mode 100644 pulsar-client-cpp/pkg/rpm/Dockerfile delete mode 100644 pulsar-client-cpp/pkg/rpm/SPECS/pulsar-client.spec delete mode 100755 pulsar-client-cpp/pkg/rpm/build-rpm.sh delete mode 100755 pulsar-client-cpp/pkg/rpm/docker-build-rpm.sh delete mode 100755 pulsar-client-cpp/pulsar-test-service-start.sh delete mode 100755 pulsar-client-cpp/pulsar-test-service-stop.sh delete mode 100644 pulsar-client-cpp/python/.gitignore delete mode 100644 pulsar-client-cpp/python/CMakeLists.txt delete mode 100755 pulsar-client-cpp/python/build-mac-wheels.sh delete mode 100755 pulsar-client-cpp/python/custom_logger_test.py delete mode 100644 pulsar-client-cpp/python/examples/company.avsc delete mode 100755 pulsar-client-cpp/python/examples/rpc_client.py delete mode 100755 pulsar-client-cpp/python/examples/rpc_server.py delete mode 100644 pulsar-client-cpp/python/pulsar/__init__.py delete mode 100644 pulsar-client-cpp/python/pulsar/exceptions.py delete mode 100644 pulsar-client-cpp/python/pulsar/functions/__init__.py delete mode 100644 pulsar-client-cpp/python/pulsar/functions/context.py delete mode 100644 pulsar-client-cpp/python/pulsar/functions/function.py delete mode 100644 pulsar-client-cpp/python/pulsar/functions/serde.py delete mode 100644 pulsar-client-cpp/python/pulsar/schema/__init__.py delete mode 100644 pulsar-client-cpp/python/pulsar/schema/definition.py delete mode 100644 pulsar-client-cpp/python/pulsar/schema/schema.py delete mode 100644 pulsar-client-cpp/python/pulsar/schema/schema_avro.py delete mode 100755 pulsar-client-cpp/python/pulsar_test.py delete mode 100755 pulsar-client-cpp/python/schema_test.py delete mode 100644 pulsar-client-cpp/python/setup.py delete mode 100644 pulsar-client-cpp/python/src/authentication.cc delete mode 100644 pulsar-client-cpp/python/src/client.cc delete mode 100644 pulsar-client-cpp/python/src/config.cc delete mode 100644 pulsar-client-cpp/python/src/consumer.cc delete mode 100644 pulsar-client-cpp/python/src/cryptoKeyReader.cc delete mode 100644 pulsar-client-cpp/python/src/enums.cc delete mode 100644 pulsar-client-cpp/python/src/exceptions.cc delete mode 100644 pulsar-client-cpp/python/src/message.cc delete mode 100644 pulsar-client-cpp/python/src/producer.cc delete mode 100644 pulsar-client-cpp/python/src/pulsar.cc delete mode 100644 pulsar-client-cpp/python/src/reader.cc delete mode 100644 pulsar-client-cpp/python/src/schema.cc delete mode 100644 pulsar-client-cpp/python/src/utils.cc delete mode 100644 pulsar-client-cpp/python/src/utils.h delete mode 100755 pulsar-client-cpp/python/test_consumer.py delete mode 100755 pulsar-client-cpp/python/test_producer.py delete mode 100755 pulsar-client-cpp/run-unit-tests.sh delete mode 100644 pulsar-client-cpp/templates/Version.h.in delete mode 100644 pulsar-client-cpp/test-conf/.htpasswd delete mode 100644 pulsar-client-cpp/test-conf/client-ssl.conf delete mode 100644 pulsar-client-cpp/test-conf/client.conf delete mode 100644 pulsar-client-cpp/test-conf/standalone-ssl.conf delete mode 100644 pulsar-client-cpp/test-conf/standalone.conf delete mode 100644 pulsar-client-cpp/tests/AuthBasicTest.cc delete mode 100644 pulsar-client-cpp/tests/AuthPluginTest.cc delete mode 100644 pulsar-client-cpp/tests/AuthTokenTest.cc delete mode 100644 pulsar-client-cpp/tests/BackoffTest.cc delete mode 100644 pulsar-client-cpp/tests/BasicEndToEndTest.cc delete mode 100644 pulsar-client-cpp/tests/BatchMessageTest.cc delete mode 100644 pulsar-client-cpp/tests/BlockingQueueTest.cc delete mode 100644 pulsar-client-cpp/tests/CMakeLists.txt delete mode 100644 pulsar-client-cpp/tests/ClientDeduplicationTest.cc delete mode 100644 pulsar-client-cpp/tests/ClientTest.cc delete mode 100644 pulsar-client-cpp/tests/CompressionCodecSnappyTest.cc delete mode 100644 pulsar-client-cpp/tests/ConsumerConfigurationTest.cc delete mode 100644 pulsar-client-cpp/tests/ConsumerStatsTest.cc delete mode 100644 pulsar-client-cpp/tests/ConsumerTest.cc delete mode 100644 pulsar-client-cpp/tests/ConsumerTest.h delete mode 100644 pulsar-client-cpp/tests/CustomLoggerTest.cc delete mode 100644 pulsar-client-cpp/tests/CustomRoutingPolicy.h delete mode 100644 pulsar-client-cpp/tests/HashTest.cc delete mode 100644 pulsar-client-cpp/tests/HttpHelper.cc delete mode 100644 pulsar-client-cpp/tests/HttpHelper.h delete mode 100644 pulsar-client-cpp/tests/KeyBasedBatchingTest.cc delete mode 100644 pulsar-client-cpp/tests/KeySharedConsumerTest.cc delete mode 100644 pulsar-client-cpp/tests/KeySharedPolicyTest.cc delete mode 100644 pulsar-client-cpp/tests/LatchTest.cc delete mode 100644 pulsar-client-cpp/tests/LogHelper.h delete mode 100644 pulsar-client-cpp/tests/LoggerTest.cc delete mode 100644 pulsar-client-cpp/tests/LookupServiceTest.cc delete mode 100644 pulsar-client-cpp/tests/MapCacheTest.cc delete mode 100644 pulsar-client-cpp/tests/MemoryLimitControllerTest.cc delete mode 100644 pulsar-client-cpp/tests/MemoryLimitTest.cc delete mode 100644 pulsar-client-cpp/tests/MessageChunkingTest.cc delete mode 100644 pulsar-client-cpp/tests/MessageIdTest.cc delete mode 100644 pulsar-client-cpp/tests/MessageTest.cc delete mode 100644 pulsar-client-cpp/tests/NamespaceNameTest.cc delete mode 100644 pulsar-client-cpp/tests/NoOpsCryptoKeyReader.h delete mode 100644 pulsar-client-cpp/tests/PaddingDemo.proto delete mode 100644 pulsar-client-cpp/tests/PartitionsUpdateTest.cc delete mode 100644 pulsar-client-cpp/tests/PeriodicTaskTest.cc delete mode 100644 pulsar-client-cpp/tests/ProducerConfigurationTest.cc delete mode 100644 pulsar-client-cpp/tests/ProducerTest.cc delete mode 100644 pulsar-client-cpp/tests/PromiseTest.cc delete mode 100644 pulsar-client-cpp/tests/ProtobufNativeSchemaTest.cc delete mode 100644 pulsar-client-cpp/tests/PulsarFriend.h delete mode 100644 pulsar-client-cpp/tests/ReaderConfigurationTest.cc delete mode 100644 pulsar-client-cpp/tests/ReaderTest.cc delete mode 100644 pulsar-client-cpp/tests/RoundRobinMessageRouterTest.cc delete mode 100644 pulsar-client-cpp/tests/SchemaTest.cc delete mode 100644 pulsar-client-cpp/tests/SemaphoreTest.cc delete mode 100644 pulsar-client-cpp/tests/ServiceURITest.cc delete mode 100644 pulsar-client-cpp/tests/SinglePartitionMessageRouterTest.cc delete mode 100644 pulsar-client-cpp/tests/SynchronizedHashMapTest.cc delete mode 100644 pulsar-client-cpp/tests/TopicMetadataImplTest.cc delete mode 100644 pulsar-client-cpp/tests/TopicNameTest.cc delete mode 100644 pulsar-client-cpp/tests/UnboundedBlockingQueueTest.cc delete mode 100644 pulsar-client-cpp/tests/UrlTest.cc delete mode 100644 pulsar-client-cpp/tests/VersionTest.cc delete mode 100644 pulsar-client-cpp/tests/WaitUtils.h delete mode 100644 pulsar-client-cpp/tests/ZLibCompressionTest.cc delete mode 100644 pulsar-client-cpp/tests/ZTSClientTest.cc delete mode 100644 pulsar-client-cpp/tests/ZeroQueueSizeTest.cc delete mode 100644 pulsar-client-cpp/tests/authentication.conf delete mode 100644 pulsar-client-cpp/tests/c/c_BasicEndToEndTest.cc delete mode 100644 pulsar-client-cpp/tests/c/c_ConsumerConfigurationTest.cc delete mode 100644 pulsar-client-cpp/tests/c/c_ProducerConfigurationTest.cc delete mode 100644 pulsar-client-cpp/tests/client.conf delete mode 100644 pulsar-client-cpp/tests/main.cc delete mode 100644 pulsar-client-cpp/tests/mocks/GMockMessage.h delete mode 100644 pulsar-client-cpp/tests/standalone.conf delete mode 100644 pulsar-client-cpp/vcpkg.json delete mode 100644 pulsar-client-cpp/wireshark/CMakeLists.txt delete mode 100644 pulsar-client-cpp/wireshark/README.md delete mode 100644 pulsar-client-cpp/wireshark/pulsarDissector.cc diff --git a/.github/changes-filter.yaml b/.github/changes-filter.yaml index 82f5b22a8aad9..72da09398ebaf 100644 --- a/.github/changes-filter.yaml +++ b/.github/changes-filter.yaml @@ -10,8 +10,6 @@ docs: - '**/*.md' tests: - added|modified: '**/src/test/java/**/*.java' -cpp: - - 'pulsar-client-cpp/**' need_owasp: - 'pom.xml' - '**/pom.xml' diff --git a/.github/workflows/ci-cpp-build.yaml b/.github/workflows/ci-cpp-build.yaml deleted file mode 100644 index 7a450efe9725d..0000000000000 --- a/.github/workflows/ci-cpp-build.yaml +++ /dev/null @@ -1,259 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -name: CI - CPP build -on: - pull_request: - branches: - - master - paths: - - '.github/workflows/**' - - 'pulsar-client-cpp/**' - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - changed_files_job: - name: Preconditions - runs-on: ubuntu-20.04 - outputs: - docs_only: ${{ needs.changed_files_job.outputs.docs_only }} - cpp_only: ${{ needs.changed_files_job.outputs.cpp_only }} - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Detect changed files - id: changes - uses: apache/pulsar-test-infra/paths-filter@master - with: - filters: .github/changes-filter.yaml - list-files: csv - - - name: Check changed files - id: check_changes - run: | - echo "::set-output name=docs_only::${{ fromJSON(steps.changes.outputs.all_count) == fromJSON(steps.changes.outputs.docs_count) && fromJSON(steps.changes.outputs.docs_count) > 0 }}" - - - name: Check if the PR has been approved for testing - if: ${{ steps.check_changes.outputs.docs_only != 'true' && github.repository == 'apache/pulsar' && github.event_name == 'pull_request' }} - env: - GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }} - GITHUB_TOKEN: ${{ github.token }} - run: | - build/pulsar_ci_tool.sh check_ready_to_test - - cpp-build-centos7: - needs: changed_files_job - name: Build CPP Client on CentOS7 - runs-on: ubuntu-20.04 - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - timeout-minutes: 120 - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Tune Runner VM - uses: ./.github/actions/tune-runner-vm - - - name: build cpp client on centos 7 - run: | - echo "Build C++ client library on CentOS 7" - pulsar-client-cpp/docker-build-centos7.sh - - cpp-build-windows: - needs: changed_files_job - timeout-minutes: 120 - name: Build CPP Client on ${{ matrix.name }} - runs-on: ${{ matrix.os }} - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - env: - VCPKG_ROOT: '${{ github.workspace }}/vcpkg' - strategy: - fail-fast: false - matrix: - include: - - name: 'Windows x64' - os: windows-2022 - triplet: x64-windows - vcpkg_dir: 'C:\vcpkg' - suffix: 'windows-win64' - generator: 'Visual Studio 17 2022' - arch: '-A x64' - - name: 'Windows x86' - os: windows-2022 - triplet: x86-windows - vcpkg_dir: 'C:\vcpkg' - suffix: 'windows-win32' - generator: 'Visual Studio 17 2022' - arch: '-A Win32' - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Restore vcpkg and its artifacts. - uses: actions/cache@v2 - id: vcpkg-cache - with: - path: | - ${{ env.VCPKG_ROOT }} - pulsar-client-cpp/vcpkg_installed - !${{ env.VCPKG_ROOT }}/.git - !${{ env.VCPKG_ROOT }}/buildtrees - !${{ env.VCPKG_ROOT }}/packages - !${{ env.VCPKG_ROOT }}/downloads - key: | - ${{ runner.os }}-${{ matrix.triplet}}-${{ hashFiles( 'pulsar-client-cpp/vcpkg.json' ) }} - - - name: Get vcpkg(windows) - if: ${{ runner.os == 'Windows' && steps.vcpkg-cache.outputs.cache-hit != 'true' }} - run: | - cd ${{ github.workspace }} - mkdir build -force - git clone https://github.com/Microsoft/vcpkg.git - cd vcpkg - .\bootstrap-vcpkg.bat - - - name: remove system vcpkg(windows) - if: runner.os == 'Windows' - run: rm -rf "$VCPKG_INSTALLATION_ROOT" - shell: bash - - - name: Install vcpkg packages - run: | - cd pulsar-client-cpp && ${{ env.VCPKG_ROOT }}\vcpkg.exe install --triplet ${{ matrix.triplet }} - - - name: Configure (default) - - shell: bash - run: | - if [ "$RUNNER_OS" == "Windows" ]; then - cd pulsar-client-cpp && \ - cmake \ - -B ./build-0 \ - -G "${{ matrix.generator }}" ${{ matrix.arch }} \ - -DBUILD_PYTHON_WRAPPER=OFF -DBUILD_TESTS=OFF \ - -DVCPKG_TRIPLET=${{ matrix.triplet }} \ - -DCMAKE_BUILD_TYPE=Release \ - -S . - fi - - - name: Compile - shell: bash - run: | - if [ "$RUNNER_OS" == "Windows" ]; then - cd pulsar-client-cpp && \ - cmake --build ./build-0 --parallel --config Release - fi - - - name: Configure (dynamic library only) - shell: bash - run: | - if [ "$RUNNER_OS" == "Windows" ]; then - cd pulsar-client-cpp && \ - cmake \ - -B ./build-1 \ - -G "${{ matrix.generator }}" ${{ matrix.arch }} \ - -DBUILD_PYTHON_WRAPPER=OFF -DBUILD_TESTS=OFF \ - -DVCPKG_TRIPLET=${{ matrix.triplet }} \ - -DCMAKE_BUILD_TYPE=Release \ - -DBUILD_STATIC_LIB=OFF \ - -S . - fi - - - name: Compile - shell: bash - run: | - if [ "$RUNNER_OS" == "Windows" ]; then - cd pulsar-client-cpp && \ - cmake --build ./build-1 --parallel --config Release - fi - cpp-deb-rpm-packaging: - needs: changed_files_job - name: Build CPP Client on RPM - runs-on: ubuntu-20.04 - timeout-minutes: 120 - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Tune Runner VM - uses: ./.github/actions/tune-runner-vm - - - name: Cache local Maven repository - uses: actions/cache@v2 - with: - path: | - ~/.m2/repository/*/*/* - !~/.m2/repository/org/apache/pulsar - key: ${{ runner.os }}-m2-dependencies-core-modules-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-m2-dependencies-core-modules- - - - name: Set up JDK 17 - uses: actions/setup-java@v2 - with: - distribution: 'temurin' - java-version: 17 - - - name: clean disk - run: | - sudo apt clean - docker rmi $(docker images -q) -f - df -h - - - name: Package Pulsar source - run: mvn -B -ntp -q clean package -pl pulsar-client-api -am -DskipTests - - - name: Build Debian packages - run: | - echo "Build Debian packages" - BUILD_IMAGE=1 pulsar-client-cpp/pkg/deb/docker-build-deb.sh - - - name: Build RPM packages - run: | - echo "Build RPM packages" - BUILD_IMAGE=1 pulsar-client-cpp/pkg/rpm/docker-build-rpm.sh - - build-python-wheel: - needs: changed_files_job - name: Build Python Client - runs-on: ubuntu-20.04 - timeout-minutes: 120 - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Tune Runner VM - uses: ./.github/actions/tune-runner-vm - - - name: build python3.9 client - - run: | - echo "Build Python3.9 client library" - pulsar-client-cpp/docker-build-python3.9.sh \ No newline at end of file diff --git a/.github/workflows/ci-go-functions.yaml b/.github/workflows/ci-go-functions.yaml index 266bfd57723f1..834451c1c70dd 100644 --- a/.github/workflows/ci-go-functions.yaml +++ b/.github/workflows/ci-go-functions.yaml @@ -40,7 +40,6 @@ jobs: runs-on: ubuntu-20.04 outputs: docs_only: ${{ steps.check_changes.outputs.docs_only }} - cpp_only: ${{ steps.check_changes.outputs.cpp_only }} steps: - name: checkout uses: actions/checkout@v2 @@ -56,7 +55,6 @@ jobs: id: check_changes run: | echo "::set-output name=docs_only::${{ fromJSON(steps.changes.outputs.all_count) == fromJSON(steps.changes.outputs.docs_count) && fromJSON(steps.changes.outputs.docs_count) > 0 }}" - echo "::set-output name=cpp_only::${{ fromJSON(steps.changes.outputs.all_count) == fromJSON(steps.changes.outputs.cpp_count) && fromJSON(steps.changes.outputs.cpp_count) > 0 }}" - name: Check if the PR has been approved for testing if: ${{ steps.check_changes.outputs.docs_only != 'true' && github.repository == 'apache/pulsar' && github.event_name == 'pull_request' }} @@ -68,7 +66,7 @@ jobs: check-style: needs: changed_files_job - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' && needs.changed_files_job.outputs.cpp_only != 'true' }} + if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} name: Go ${{ matrix.go-version }} Functions style check runs-on: ubuntu-20.04 strategy: diff --git a/.github/workflows/pulsar-ci-flaky.yaml b/.github/workflows/pulsar-ci-flaky.yaml index 03e3adff33a9d..040bc4328b3bc 100644 --- a/.github/workflows/pulsar-ci-flaky.yaml +++ b/.github/workflows/pulsar-ci-flaky.yaml @@ -41,7 +41,6 @@ jobs: runs-on: ubuntu-20.04 outputs: docs_only: ${{ steps.check_changes.outputs.docs_only }} - cpp_only: ${{ steps.check_changes.outputs.cpp_only }} changed_tests: ${{ steps.changes.outputs.tests_files }} steps: - name: checkout @@ -58,7 +57,6 @@ jobs: id: check_changes run: | echo "::set-output name=docs_only::${{ fromJSON(steps.changes.outputs.all_count) == fromJSON(steps.changes.outputs.docs_count) && fromJSON(steps.changes.outputs.docs_count) > 0 }}" - echo "::set-output name=cpp_only::${{ fromJSON(steps.changes.outputs.all_count) == fromJSON(steps.changes.outputs.cpp_count) && fromJSON(steps.changes.outputs.cpp_count) > 0 }}" - name: Check if the PR has been approved for testing if: ${{ steps.check_changes.outputs.docs_only != 'true' && github.repository == 'apache/pulsar' && github.event_name == 'pull_request' }} @@ -73,7 +71,7 @@ jobs: name: Flaky tests suite runs-on: ubuntu-20.04 timeout-minutes: 100 - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' && needs.changed_files_job.outputs.cpp_only != 'true' }} + if: ${{ needs.changed_files_job.outputs.docs_only != 'true' != 'true' }} steps: - name: checkout uses: actions/checkout@v2 diff --git a/.github/workflows/pulsar-ci.yaml b/.github/workflows/pulsar-ci.yaml index e9418ee963a53..ef641feec1523 100644 --- a/.github/workflows/pulsar-ci.yaml +++ b/.github/workflows/pulsar-ci.yaml @@ -41,7 +41,6 @@ jobs: runs-on: ubuntu-20.04 outputs: docs_only: ${{ steps.check_changes.outputs.docs_only }} - cpp_only: ${{ steps.check_changes.outputs.cpp_only }} changed_tests: ${{ steps.changes.outputs.tests_files }} steps: - name: checkout @@ -58,7 +57,7 @@ jobs: id: check_changes run: | echo "::set-output name=docs_only::${{ fromJSON(steps.changes.outputs.all_count) == fromJSON(steps.changes.outputs.docs_count) && fromJSON(steps.changes.outputs.docs_count) > 0 }}" - echo "::set-output name=cpp_only::${{ fromJSON(steps.changes.outputs.all_count) == fromJSON(steps.changes.outputs.cpp_count) && fromJSON(steps.changes.outputs.cpp_count) > 0 }}" + - name: Check if the PR has been approved for testing if: ${{ steps.check_changes.outputs.docs_only != 'true' && github.repository == 'apache/pulsar' && github.event_name == 'pull_request' }} @@ -141,7 +140,7 @@ jobs: runs-on: ubuntu-20.04 timeout-minutes: ${{ matrix.timeout || 60 }} needs: ['changed_files_job', 'build-and-license-check'] - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' && needs.changed_files_job.outputs.cpp_only != 'true' }} + if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} strategy: fail-fast: false matrix: @@ -255,7 +254,7 @@ jobs: runs-on: ubuntu-20.04 timeout-minutes: 60 needs: ['changed_files_job', 'build-and-license-check'] - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' && needs.changed_files_job.outputs.cpp_only != 'true' }} + if: ${{ needs.changed_files_job.outputs.docs_only != 'true'}} env: UBUNTU_MIRROR: http://azure.archive.ubuntu.com/ubuntu/ steps: @@ -320,7 +319,7 @@ jobs: runs-on: ubuntu-20.04 timeout-minutes: ${{ matrix.timeout || 60 }} needs: ['changed_files_job', 'pulsar-java-test-image'] - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' && needs.changed_files_job.outputs.cpp_only != 'true' }} + if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} env: PULSAR_TEST_IMAGE_NAME: apachepulsar/java-test-image:latest strategy: @@ -462,7 +461,7 @@ jobs: 'changed_files_job', 'integration-tests' ] - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' && needs.changed_files_job.outputs.cpp_only != 'true' }} + if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} steps: - name: checkout uses: actions/checkout@v2 @@ -477,94 +476,6 @@ jobs: run: | gh-actions-artifact-client.js delete pulsar-java-test-image.zst - cpp-tests: - name: CI - CPP, Python Tests - runs-on: ubuntu-20.04 - timeout-minutes: 120 - needs: [ - 'changed_files_job', - 'integration-tests' - ] - if: always() - steps: - - name: check condition - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - run: | - if [[ ! ( ( \ - "${{needs.changed_files_job.outputs.cpp_only}}" == "false" \ - && "${{ needs.integration-tests.result }}" == "success" \ - ) || ( \ - "${{needs.changed_files_job.outputs.cpp_only}}" == "true" \ - ) ) ]]; then - echo "Required jobs haven't been completed successfully." - exit 1 - fi - - - name: checkout - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - uses: actions/checkout@v2 - - - name: Tune Runner VM - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - uses: ./.github/actions/tune-runner-vm - - - name: Cache local Maven repository - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - uses: actions/cache@v2 - with: - path: | - ~/.m2/repository/*/*/* - !~/.m2/repository/org/apache/pulsar - key: ${{ runner.os }}-m2-dependencies-core-modules-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-m2-dependencies-core-modules- - - - name: Set up JDK 17 - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - uses: actions/setup-java@v2 - with: - distribution: 'temurin' - java-version: 17 - - - name: Clean Disk - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - uses: ./.github/actions/clean-disk - - - name: Install gh-actions-artifact-client.js - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - uses: apache/pulsar-test-infra/gh-actions-artifact-client/dist@master - - - name: Restore maven build results from Github artifact cache - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - run: | - cd $HOME - $GITHUB_WORKSPACE/build/pulsar_ci_tool.sh restore_tar_from_github_actions_artifacts pulsar-maven-repository-binaries - cd $GITHUB_WORKSPACE - $GITHUB_WORKSPACE/build/pulsar_ci_tool.sh restore_tar_from_github_actions_artifacts pulsar-server-distribution - - - name: copy python tests - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - run: | - mvn -B -Pskip-all -ntp -pl pulsar-functions/instance package - - - name: build cpp artifacts - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - run: | - echo "Build C++ client library" - pulsar-client-cpp/docker-build.sh - - - name: run c++ tests - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} - run: pulsar-client-cpp/docker-tests.sh - - - name: Upload test-logs - if: ${{ failure() && needs.changed_files_job.outputs.docs_only != 'true' }} - uses: actions/upload-artifact@v3 - continue-on-error: true - with: - name: cpp-tests-logs - path: test-logs - pulsar-test-latest-version-image: name: Build Pulsar docker image runs-on: ubuntu-20.04 @@ -927,7 +838,7 @@ jobs: runs-on: macos-11 timeout-minutes: 120 needs: ['changed_files_job', 'integration-tests'] - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' && needs.changed_files_job.outputs.cpp_only != 'true' }} + if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} steps: - name: checkout uses: actions/checkout@v2 @@ -1033,24 +944,19 @@ jobs: 'integration-tests', 'system-tests', 'flaky-system-tests', - 'macos-build', - 'cpp-tests' + 'macos-build' ] steps: - name: Check that all required jobs were completed successfully if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} run: | if [[ ! ( ( \ - "${{needs.changed_files_job.outputs.cpp_only}}" == "false" \ - && "${{ needs.unit-tests.result }}" == "success" \ + "${{ needs.unit-tests.result }}" == "success" \ && "${{ needs.integration-tests.result }}" == "success" \ && "${{ needs.system-tests.result }}" == "success" \ && "${{ needs.macos-build.result }}" == "success" \ - && "${{ needs.cpp-tests.result }}" == "success" \ ) || ( \ - "${{needs.changed_files_job.outputs.cpp_only}}" == "true" \ - && "${{ needs.system-tests.result }}" == "success" \ - && "${{ needs.cpp-tests.result }}" == "success" \ + "${{ needs.system-tests.result }}" == "success" \ ) ) ]]; then echo "Required jobs haven't been completed successfully." exit 1 diff --git a/docker/pulsar/Dockerfile b/docker/pulsar/Dockerfile index 3746fe1c26c2e..7ec8788b89c60 100644 --- a/docker/pulsar/Dockerfile +++ b/docker/pulsar/Dockerfile @@ -89,13 +89,14 @@ RUN pip3 install pyyaml==5.4.1 # 4. /pulsar - hadoop writes to this directory RUN mkdir /pulsar && chmod g+w /pulsar -ADD target/python-client/ /pulsar/pulsar-client - ENV PULSAR_ROOT_LOGGER=INFO,CONSOLE COPY --from=pulsar /pulsar /pulsar WORKDIR /pulsar +ARG PULSAR_CLIENT_PYTHON_VERSION +ENV PULSAR_CLIENT_PYTHON_VERSION ${PULSAR_CLIENT_PYTHON_VERSION} + # This script is intentionally run as the root user to make the dependencies available for all UIDs. RUN /pulsar/bin/install-pulsar-client.sh diff --git a/docker/pulsar/pom.xml b/docker/pulsar/pom.xml index a458cea39763a..f7d91b2120841 100644 --- a/docker/pulsar/pom.xml +++ b/docker/pulsar/pom.xml @@ -30,12 +30,6 @@ Apache Pulsar :: Docker Images :: Pulsar Latest Version pom - - x86_64 - false - false - - ${project.groupId} @@ -58,55 +52,11 @@ docker target/pulsar-server-distribution-${project.version}-bin.tar.gz + ${pulsar.client.python.version} ${env.UBUNTU_MIRROR} - - - org.codehaus.mojo - exec-maven-plugin - ${exec-maven-plugin.version} - - - build-pulsar-clients-python-35 - compile - - exec - - - ${skipBuildPythonClient} - ${project.basedir}/target - ${project.basedir}/../../pulsar-client-cpp/docker/build-wheels.sh - - - 3.8 cp38-cp38 manylinux2014 ${pythonClientBuildArch} - - - - - - - org.apache.maven.plugins - maven-antrun-plugin - - - copy-pulsar-clients-python - compile - - run - - - ${skipCopyPythonClients} - - copy python wheel file - - - - - - - io.fabric8 docker-maven-plugin diff --git a/docker/pulsar/scripts/install-pulsar-client.sh b/docker/pulsar/scripts/install-pulsar-client.sh index 355f5f6524a3a..0951b2aec1b60 100755 --- a/docker/pulsar/scripts/install-pulsar-client.sh +++ b/docker/pulsar/scripts/install-pulsar-client.sh @@ -27,6 +27,4 @@ if [ "${ARCH}" == "arm64" ]; then apt -y install build-essential python3-dev fi -PYTHON_MAJOR_MINOR=$(python3 -V | sed -E 's/.* ([[:digit:]]+)\.([[:digit:]]+).*/\1\2/') -WHEEL_FILE=$(ls /pulsar/pulsar-client | grep "cp${PYTHON_MAJOR_MINOR}") -pip3 install /pulsar/pulsar-client/${WHEEL_FILE}[all] +pip3 install pulsar-client[all]==${PULSAR_CLIENT_PYTHON_VERSION} diff --git a/pom.xml b/pom.xml index 8bd36cc63d263..8bb53b47943b3 100644 --- a/pom.xml +++ b/pom.xml @@ -82,6 +82,8 @@ flexible messaging model and an intuitive client API. ${maven.compiler.target} 8 + 2.10.1 + **/Test*.java,**/*Test.java,**/*Tests.java,**/*TestCase.java diff --git a/pulsar-client-cpp/.clang-format b/pulsar-client-cpp/.clang-format deleted file mode 100644 index cb40b5069841e..0000000000000 --- a/pulsar-client-cpp/.clang-format +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -BasedOnStyle: Google -IndentWidth: 4 -ColumnLimit: 110 -SortIncludes: false -BreakBeforeBraces: Custom -BraceWrapping: - AfterEnum: true diff --git a/pulsar-client-cpp/.gitignore b/pulsar-client-cpp/.gitignore deleted file mode 100644 index 8c8c065e61935..0000000000000 --- a/pulsar-client-cpp/.gitignore +++ /dev/null @@ -1,91 +0,0 @@ -# Compiled Object files -*.slo -*.lo -*.o -*.obj -*.os -*.scons* - -# Compiled Dynamic libraries -*.so -lib*.so* -*.dylib -*.dll -*.so.1 - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app - -# Dependency file -*.d - -# Mac swap file -*.DS_Store - -# Linux swap file -*.swp - -# Exclude compiled executables -/examples/SampleProducer -/examples/SampleProducerCApi -/examples/SampleConsumer -/examples/SampleConsumerCApi -/examples/SampleAsyncProducer -/examples/SampleAsyncConsumerCApi -/examples/SampleConsumerListener -/examples/SampleConsumerListenerCApi -/examples/SampleReaderCApi -/examples/SampleFileLogger -/tests/main -/perf/perfProducer -/perf/perfConsumer -/system-test/SystemTest - -# Files generated from templates by CMAKE -include/pulsar/Version.h - -# IDE generated files -.csettings -.cproject -.project -.settings/ -.pydevproject -.idea/ -.vs/ -*.cbp -*.ninja* -.clangd/ -compile_commands.json - -# doxygen files -apidocs/ - -# generated files -generated/ - -# CMAKE -.cmake -Makefile -cmake_install.cmake -CMakeFiles -CMakeCache.txt - -pulsar-dist -install_manifest.txt -merged-library -python/venv - -# Visual Studio files -out/ -CMakeSettings.json - -# vcpkg dependencies directory -vcpkg_installed/ diff --git a/pulsar-client-cpp/CMakeLists.txt b/pulsar-client-cpp/CMakeLists.txt deleted file mode 100644 index 1975cd64bf7e1..0000000000000 --- a/pulsar-client-cpp/CMakeLists.txt +++ /dev/null @@ -1,465 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -cmake_minimum_required(VERSION 3.4) - -project (pulsar-cpp) -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake_modules") - -execute_process(COMMAND ${CMAKE_SOURCE_DIR}/../src/gen-pulsar-version-macro.py OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE PVM) -set(PVM_COMMENT "This is generated from Version.h.in by CMAKE. DO NOT EDIT DIRECTLY") -configure_file(templates/Version.h.in include/pulsar/Version.h @ONLY) - -if (VCPKG_TRIPLET) - message(STATUS "Use vcpkg, triplet is ${VCPKG_TRIPLET}") - set(CMAKE_PREFIX_PATH "${CMAKE_SOURCE_DIR}/vcpkg_installed/${VCPKG_TRIPLET}") - message(STATUS "Use CMAKE_PREFIX_PATH: ${CMAKE_PREFIX_PATH}") - set(PROTOC_PATH "${CMAKE_PREFIX_PATH}/tools/protobuf/protoc") - message(STATUS "Use protoc: ${PROTOC_PATH}") - set(VCPKG_DEBUG_ROOT "${CMAKE_SOURCE_DIR}/vcpkg_installed/${VCPKG_TRIPLET}/debug") - if (CMAKE_BUILD_TYPE STREQUAL "Debug") - set(ZLIB_ROOT ${VCPKG_DEBUG_ROOT}) - set(OPENSSL_ROOT_DIR ${VCPKG_DEBUG_ROOT}) - endif () -endif() - -find_program(CCACHE_PROGRAM ccache) -if(CCACHE_PROGRAM) - set(CMAKE_CXX_COMPILER_LAUNCHER "ccache") - MESSAGE(STATUS "Using CCache") -endif(CCACHE_PROGRAM) - -MESSAGE(STATUS "ARCHITECTURE: ${CMAKE_SYSTEM_PROCESSOR}") - -option(BUILD_DYNAMIC_LIB "Build dynamic lib" ON) -MESSAGE(STATUS "BUILD_DYNAMIC_LIB: " ${BUILD_DYNAMIC_LIB}) - -option(BUILD_STATIC_LIB "Build static lib" ON) -MESSAGE(STATUS "BUILD_STATIC_LIB: " ${BUILD_STATIC_LIB}) - -option(BUILD_TESTS "Build tests" ON) -MESSAGE(STATUS "BUILD_TESTS: " ${BUILD_TESTS}) - -option(BUILD_PYTHON_WRAPPER "Build Pulsar Python wrapper" ON) -MESSAGE(STATUS "BUILD_PYTHON_WRAPPER: " ${BUILD_PYTHON_WRAPPER}) - -option(BUILD_WIRESHARK "Build Pulsar Wireshark dissector" OFF) -MESSAGE(STATUS "BUILD_WIRESHARK: " ${BUILD_WIRESHARK}) - -option(BUILD_PERF_TOOLS "Build Pulsar CLI perf producer/consumer" OFF) -MESSAGE(STATUS "BUILD_PERF_TOOLS: " ${BUILD_PERF_TOOLS}) - -option(LINK_STATIC "Link against static libraries" OFF) -MESSAGE(STATUS "LINK_STATIC: " ${LINK_STATIC}) - -option(USE_LOG4CXX "Build with Log4cxx support" OFF) -MESSAGE(STATUS "USE_LOG4CXX: " ${USE_LOG4CXX}) - -IF (CMAKE_BUILD_TYPE STREQUAL "") - set(CMAKE_BUILD_TYPE RelWithDebInfo) -ENDIF () - -MESSAGE(STATUS "CMAKE_BUILD_TYPE: " ${CMAKE_BUILD_TYPE}) - -set(THREADS_PREFER_PTHREAD_FLAG TRUE) -find_package(Threads REQUIRED) -MESSAGE(STATUS "Threads library: " ${CMAKE_THREAD_LIBS_INIT}) - -set(Boost_NO_BOOST_CMAKE ON) -set(CMAKE_CXX_STANDARD 11) -set(CMAKE_C_STANDARD 11) - -# Compiler specific configuration: -# https://stackoverflow.com/questions/10046114/in-cmake-how-can-i-test-if-the-compiler-is-clang -if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - add_definitions(-DWIN32_LEAN_AND_MEAN -DNOGDI -D_WIN32_WINNT=0x0501 -D_CRT_SECURE_NO_WARNINGS) - add_compile_options(/wd4244 /wd4267 /wd4018 /wd4715 /wd4251 /wd4275) -elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - # ?? Don't have this to test with -else() # GCC or Clang are mostly compatible: - # Turn on warnings and enable warnings-as-errors: - add_compile_options(-Wall -Wformat-security -Wvla -Werror) - # Turn off certain warnings that are too much pain for too little gain: - add_compile_options(-Wno-sign-compare -Wno-deprecated-declarations -Wno-error=cpp) - if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR APPLE) - add_compile_options(-msse4.2 -mpclmul) - endif() - # Options unique to Clang or GCC: - if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") - add_compile_options(-Qunused-arguments) - elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.1)) - add_compile_options(-Wno-stringop-truncation) - endif() -endif() - -set(CMAKE_POSITION_INDEPENDENT_CODE ON) - -set(LOG_CATEGORY_NAME $ENV{LOG_CATEGORY_NAME}) - -if (NOT LOG_CATEGORY_NAME) - set(LOG_CATEGORY_NAME "\"pulsar.\"") -endif(NOT LOG_CATEGORY_NAME) - -add_definitions(-DLOG_CATEGORY_NAME=${LOG_CATEGORY_NAME} -DBUILDING_PULSAR -DBOOST_ALL_NO_LIB -DBOOST_ALLOW_DEPRECATED_HEADERS) - -set(OPENSSL_ROOT_DIR ${OPENSSL_ROOT_DIR} /usr/lib64/) - -### This part is to find and keep SSL dynamic libs in RECORD_OPENSSL_SSL_LIBRARY and RECORD_OPENSSL_CRYPTO_LIBRARY -### After find the libs, will unset related cache, and will not affect another same call to find_package. -if (APPLE) - set(OPENSSL_INCLUDE_DIR /usr/local/opt/openssl/include/ /opt/homebrew/opt/openssl/include) - set(OPENSSL_ROOT_DIR ${OPENSSL_ROOT_DIR} /usr/local/opt/openssl/ /opt/homebrew/opt/openssl) -endif () - -set(OPENSSL_USE_STATIC_LIBS FALSE) -find_package(OpenSSL REQUIRED) -set(RECORD_OPENSSL_SSL_LIBRARY ${OPENSSL_SSL_LIBRARY}) -set(RECORD_OPENSSL_CRYPTO_LIBRARY ${OPENSSL_CRYPTO_LIBRARY}) - -unset(OPENSSL_FOUND CACHE) -unset(OPENSSL_INCLUDE_DIR CACHE) -unset(OPENSSL_CRYPTO_LIBRARY CACHE) -unset(OPENSSL_CRYPTO_LIBRARIES CACHE) -unset(OPENSSL_SSL_LIBRARY CACHE) -unset(OPENSSL_SSL_LIBRARIES CACHE) -unset(OPENSSL_LIBRARIES CACHE) -unset(OPENSSL_VERSION CACHE) - -if (LINK_STATIC) - find_library(ZLIB_LIBRARIES REQUIRED NAMES libz.a z zlib) - message(STATUS "ZLIB_LIBRARIES: ${ZLIB_LIBRARIES}") - find_library(Protobuf_LIBRARIES NAMES libprotobuf.a libprotobuf) - message(STATUS "Protobuf: ${Protobuf_LIBRARIES}") - find_library(CURL_LIBRARIES NAMES libcurl.a curl curl_a libcurl_a) - message(STATUS "CURL_LIBRARIES: ${CURL_LIBRARIES}") - find_library(LIB_ZSTD NAMES libzstd.a) - message(STATUS "ZStd: ${LIB_ZSTD}") - find_library(LIB_SNAPPY NAMES libsnappy.a) - message(STATUS "LIB_SNAPPY: ${LIB_SNAPPY}") - set(COMMON_LIBS ${Protobuf_LIBRARIES} ${COMMON_LIBS}) - - if (USE_LOG4CXX) - if (LOG4CXX_USE_DYNAMIC_LIBS) - find_library(LOG4CXX_LIBRARY_PATH log4cxx) - else () - find_library(LOG4CXX_LIBRARY_PATH NAMES liblog4cxx.a) - - # Libraries needed by log4cxx to link statically with - find_library(APR_LIBRARY_PATH NAMES libapr-1 PATHS /usr/lib /usr/local/apr/lib /usr/local/opt/apr/libexec/lib/) - find_library(APR_UTIL_LIBRARY_PATH NAMES libaprutil-1 PATHS /usr/lib /usr/local/apr/lib /usr/local/opt/apr-util/libexec/lib/) - find_library(EXPAT_LIBRARY_PATH NAMES libexpat expat) - if (APPLE) - find_library(ICONV_LIBRARY_PATH NAMES libiconv iconv) - else () - set(ICONV_LIBRARY_PATH ) - endif (APPLE) - endif (LOG4CXX_USE_DYNAMIC_LIBS) - endif (USE_LOG4CXX) - - if (MSVC) - add_definitions(-DCURL_STATICLIB) - endif() - - if (UNIX AND NOT APPLE) - set(CMAKE_FIND_LIBRARY_SUFFIXES .a) - endif() - - SET(Boost_USE_STATIC_LIBS ON) - SET(OPENSSL_USE_STATIC_LIBS TRUE) -else() - # Link to shared libraries - find_package(ZLIB REQUIRED) - set(ZLIB_LIBRARIES ${ZLIB_LIBRARIES}) - # NOTE: The default MODULE mode may not find debug libraries so use CONFIG mode here - unset(Protobuf_INCLUDE_DIRS CACHE) - unset(Protobuf_LIBRARIES CACHE) - find_package(Protobuf QUIET CONFIG) - # NOTE: On Windows x86 platform, Protobuf_FOUND might be set false but Protobuf_INCLUDE_DIRS and - # Protobuf_LIBRARIES are both found. - if (Protobuf_INCLUDE_DIRS AND Protobuf_LIBRARIES AND NOT Protobuf_FOUND) - set(Protobuf_FOUND TRUE) - endif () - if (Protobuf_FOUND) - message("Found Protobuf in config mode") - message(STATUS "Protobuf_LIBRARIES: ${Protobuf_LIBRARIES}") - message(STATUS "Protobuf_INCLUDE_DIRS: ${Protobuf_INCLUDE_DIRS}") - else () - message("Failed to find Protobuf in config mode, try to find it from system path") - find_library(Protobuf_LIBRARIES protobuf libprotobuf) - find_path(Protobuf_INCLUDE_DIRS google/protobuf/stubs/common.h) - message(STATUS "Protobuf_LIBRARIES: ${Protobuf_LIBRARIES}") - message(STATUS "Protobuf_INCLUDE_DIRS: ${Protobuf_INCLUDE_DIRS}") - endif () - - if (${Protobuf_FOUND} AND (${CMAKE_VERSION} VERSION_GREATER 3.8)) - set(COMMON_LIBS protobuf::libprotobuf ${COMMON_LIBS}) - else () - set(COMMON_LIBS ${Protobuf_LIBRARIES} ${COMMON_LIBS}) - endif () - - if (MSVC AND (${CMAKE_BUILD_TYPE} STREQUAL Debug)) - find_library(LIB_ZSTD zstdd HINTS "${VCPKG_DEBUG_ROOT}/lib") - else () - find_library(LIB_ZSTD zstd) - endif () - if (MSVC AND (${CMAKE_BUILD_TYPE} STREQUAL Debug)) - find_library(LIB_SNAPPY NAMES snappyd HINTS "${VCPKG_DEBUG_ROOT}/lib") - else () - find_library(LIB_SNAPPY NAMES snappy libsnappy) - endif () - - find_package(CURL REQUIRED) - if (${CMAKE_VERSION} VERSION_GREATER "3.12") - set(COMMON_LIBS ${COMMON_LIBS} CURL::libcurl) - endif () - - if (USE_LOG4CXX) - find_library(LOG4CXX_LIBRARY_PATH log4cxx) - find_path(LOG4CXX_INCLUDE_PATH log4cxx/logger.h) - endif (USE_LOG4CXX) -endif (LINK_STATIC) - - -find_package(Boost) - -if (Boost_MAJOR_VERSION EQUAL 1 AND Boost_MINOR_VERSION LESS 69) - # Boost System does not require linking since 1.69 - set(BOOST_COMPONENTS ${BOOST_COMPONENTS} system) - MESSAGE(STATUS "Linking with Boost:System") -endif() - -if (MSVC) - set(BOOST_COMPONENTS ${BOOST_COMPONENTS} date_time) -endif() - -if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) - # GCC 4.8.2 implementation of std::regex is buggy - set(BOOST_COMPONENTS ${BOOST_COMPONENTS} regex) - set(CMAKE_CXX_FLAGS " -DPULSAR_USE_BOOST_REGEX") - MESSAGE(STATUS "Using Boost::Regex") -else() - MESSAGE(STATUS "Using std::regex") - # Turn on color error messages and show additional help with errors (only available in GCC v4.9+): - add_compile_options(-fdiagnostics-show-option -fdiagnostics-color) -endif() - -if(BUILD_PERF_TOOLS) - set(BOOST_COMPONENTS ${BOOST_COMPONENTS} program_options) -endif() - -find_package(Boost REQUIRED COMPONENTS ${BOOST_COMPONENTS}) - -if (BUILD_PYTHON_WRAPPER) - find_package(PythonLibs REQUIRED) - MESSAGE(STATUS "PYTHON: " ${PYTHONLIBS_VERSION_STRING}) - - string(REPLACE "." ";" PYTHONLIBS_VERSION_NO_LIST ${PYTHONLIBS_VERSION_STRING}) - list(GET PYTHONLIBS_VERSION_NO_LIST 0 PYTHONLIBS_VERSION_MAJOR) - list(GET PYTHONLIBS_VERSION_NO_LIST 1 PYTHONLIBS_VERSION_MINOR) - set(BOOST_PYTHON_NAME_POSTFIX ${PYTHONLIBS_VERSION_MAJOR}${PYTHONLIBS_VERSION_MINOR}) - # For python3 the lib name is boost_python3 - set(BOOST_PYTHON_NAME_LIST python37;python38;python39;python310;python3;python3-mt;python-py${BOOST_PYTHON_NAME_POSTFIX};python${BOOST_PYTHON_NAME_POSTFIX}-mt;python${BOOST_PYTHON_NAME_POSTFIX}) - - foreach (BOOST_PYTHON_NAME IN LISTS BOOST_PYTHON_NAME_LIST) - find_package(Boost QUIET COMPONENTS ${BOOST_PYTHON_NAME}) - if (${Boost_FOUND}) - set(BOOST_PYTHON_NAME_FOUND ${BOOST_PYTHON_NAME}) - break() - endif() - endforeach() - - if (NOT ${Boost_FOUND}) - MESSAGE(FATAL_ERROR "Could not find Boost Python library") - endif () - - MESSAGE(STATUS "BOOST_PYTHON_NAME_FOUND: " ${BOOST_PYTHON_NAME_FOUND}) - find_package(Boost REQUIRED COMPONENTS ${BOOST_PYTHON_NAME_FOUND}) -endif (BUILD_PYTHON_WRAPPER) - -find_package(OpenSSL REQUIRED) - -if (BUILD_TESTS) - find_path(GTEST_INCLUDE_PATH gtest/gtest.h) - find_path(GMOCK_INCLUDE_PATH gmock/gmock.h) -endif () - -if (USE_LOG4CXX) - set(CMAKE_CXX_FLAGS " -DUSE_LOG4CXX ${CMAKE_CXX_FLAGS}") - find_path(LOG4CXX_INCLUDE_PATH log4cxx/logger.h) -endif (USE_LOG4CXX) - -if (NOT APPLE AND NOT MSVC) - # we don't set options below to build _pulsar.so - set(CMAKE_CXX_FLAGS_PYTHON "${CMAKE_CXX_FLAGS}") - # Hide all non-exported symbols to avoid conflicts - add_compile_options(-fvisibility=hidden) - if (CMAKE_COMPILER_IS_GNUCC) - add_compile_options(-Wl,--exclude-libs,ALL) - endif () -endif () - -if (LIB_ZSTD) - set(HAS_ZSTD 1) -else () - set(HAS_ZSTD 0) -endif () -MESSAGE(STATUS "HAS_ZSTD: ${HAS_ZSTD}") - -if (LIB_SNAPPY) - set(HAS_SNAPPY 1) -else () - set(HAS_SNAPPY 0) -endif () -MESSAGE(STATUS "HAS_SNAPPY: ${HAS_SNAPPY}") - -set(ADDITIONAL_LIBRARIES $ENV{PULSAR_ADDITIONAL_LIBRARIES}) -link_directories( $ENV{PULSAR_ADDITIONAL_LIBRARY_PATH} ) - -set(AUTOGEN_DIR ${CMAKE_BINARY_DIR}/generated) -file(MAKE_DIRECTORY ${AUTOGEN_DIR}) - -include_directories( - ${CMAKE_SOURCE_DIR} - ${CMAKE_SOURCE_DIR}/include - ${CMAKE_BINARY_DIR}/include - ${AUTOGEN_DIR} - ${Boost_INCLUDE_DIR} - ${OPENSSL_INCLUDE_DIR} - ${ZLIB_INCLUDE_DIRS} - ${CURL_INCLUDE_DIRS} - ${Protobuf_INCLUDE_DIRS} - ${LOG4CXX_INCLUDE_PATH} - ${GTEST_INCLUDE_PATH} - ${GMOCK_INCLUDE_PATH} -) - -set(COMMON_LIBS - ${COMMON_LIBS} - Threads::Threads - ${Boost_REGEX_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${CURL_LIBRARIES} - ${OPENSSL_LIBRARIES} - ${ZLIB_LIBRARIES} - ${ADDITIONAL_LIBRARIES} - ${CMAKE_DL_LIBS} -) - -if (MSVC) - set(COMMON_LIBS - ${COMMON_LIBS} - ${Boost_DATE_TIME_LIBRARY} - ) -endif() - -if (NOT MSVC) - set(COMMON_LIBS ${COMMON_LIBS} m) -else() - set(COMMON_LIBS - ${COMMON_LIBS} - wldap32.lib - Normaliz.lib) -endif() - -if (USE_LOG4CXX) - set(COMMON_LIBS - ${COMMON_LIBS} - ${LOG4CXX_LIBRARY_PATH} - ${APR_LIBRARY_PATH} - ${APR_UTIL_LIBRARY_PATH} - ${EXPAT_LIBRARY_PATH} - ${ICONV_LIBRARY_PATH} - ) -endif () - -if (HAS_ZSTD) - set(COMMON_LIBS ${COMMON_LIBS} ${LIB_ZSTD} ) -endif () - -add_definitions(-DHAS_ZSTD=${HAS_ZSTD}) - -if (HAS_SNAPPY) - set(COMMON_LIBS ${COMMON_LIBS} ${LIB_SNAPPY} ) -endif () - -add_definitions(-DHAS_SNAPPY=${HAS_SNAPPY}) - -if(NOT APPLE AND NOT MSVC) - set(COMMON_LIBS ${COMMON_LIBS} rt) -endif () - -link_directories(${CMAKE_BINARY_DIR}/lib) - -set(LIB_NAME $ENV{PULSAR_LIBRARY_NAME}) -if (NOT LIB_NAME) - set(LIB_NAME pulsar) -endif(NOT LIB_NAME) - -set(CLIENT_LIBS - ${COMMON_LIBS} - ${LIB_NAME} -) - -add_subdirectory(lib) -if(BUILD_PERF_TOOLS) - add_subdirectory(perf) -endif(BUILD_PERF_TOOLS) - -if (BUILD_DYNAMIC_LIB) - add_subdirectory(examples) -endif() - -if (BUILD_TESTS) - add_subdirectory(tests) -endif() - -if (BUILD_PYTHON_WRAPPER) - add_subdirectory(python) -endif () - -if (BUILD_WIRESHARK) - add_subdirectory(wireshark) -endif() - -find_package(ClangTools) -set(BUILD_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/build-support") -add_custom_target(format ${BUILD_SUPPORT_DIR}/run_clang_format.py - ${CLANG_FORMAT_BIN} - 0 - ${BUILD_SUPPORT_DIR}/clang_format_exclusions.txt - ${CMAKE_SOURCE_DIR}/lib - ${CMAKE_SOURCE_DIR}/perf - ${CMAKE_SOURCE_DIR}/examples - ${CMAKE_SOURCE_DIR}/tests - ${CMAKE_SOURCE_DIR}/include - ${CMAKE_SOURCE_DIR}/python/src - ${CMAKE_SOURCE_DIR}/wireshark) - -# `make check-format` option (for CI test) -add_custom_target(check-format ${BUILD_SUPPORT_DIR}/run_clang_format.py - ${CLANG_FORMAT_BIN} - 1 - ${BUILD_SUPPORT_DIR}/clang_format_exclusions.txt - ${CMAKE_SOURCE_DIR}/lib - ${CMAKE_SOURCE_DIR}/perf - ${CMAKE_SOURCE_DIR}/examples - ${CMAKE_SOURCE_DIR}/tests - ${CMAKE_SOURCE_DIR}/include - ${CMAKE_SOURCE_DIR}/python/src - ${CMAKE_SOURCE_DIR}/wireshark) diff --git a/pulsar-client-cpp/Doxyfile b/pulsar-client-cpp/Doxyfile deleted file mode 100644 index b3ac004d9a89b..0000000000000 --- a/pulsar-client-cpp/Doxyfile +++ /dev/null @@ -1,2500 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Doxyfile 1.8.14 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See -# https://www.gnu.org/software/libiconv/ for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = "pulsar-client-cpp" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = ../target/doxygen - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = YES - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up -# to that level are automatically included in the table of contents, even if -# they do not have an id attribute. -# Note: This feature currently applies only to Markdown headings. -# Minimum value: 0, maximum value: 99, default value: 0. -# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. - -TOC_INCLUDE_HEADINGS = 0 - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# If one adds a struct or class to a group and this option is enabled, then also -# any nested class or struct is added to the same group. By default this option -# is disabled and one has to add nested compounds explicitly via \ingroup. -# The default value is: NO. - -GROUP_NESTED_COMPOUNDS = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO, these declarations will be -# included in the documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. -# The default value is: NO. - -WARN_AS_ERROR = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING -# Note: If this tag is empty the current directory is searched. - -INPUT = include docs/MainPage.md - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: https://www.gnu.org/software/libiconv/) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# read by doxygen. -# -# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, -# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, -# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. - -FILE_PATTERNS = *.c \ - *.cc \ - *.cxx \ - *.cpp \ - *.c++ \ - *.java \ - *.ii \ - *.ixx \ - *.ipp \ - *.i++ \ - *.inl \ - *.idl \ - *.ddl \ - *.odl \ - *.h \ - *.hh \ - *.hxx \ - *.hpp \ - *.h++ \ - *.cs \ - *.d \ - *.php \ - *.php4 \ - *.php5 \ - *.phtml \ - *.inc \ - *.m \ - *.markdown \ - *.md \ - *.mm \ - *.dox \ - *.py \ - *.pyw \ - *.f90 \ - *.f95 \ - *.f03 \ - *.f08 \ - *.f \ - *.for \ - *.tcl \ - *.vhd \ - *.vhdl \ - *.ucf \ - *.qsf - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = * - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = docs/MainPage.md - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see https://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# https://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to YES can help to show when doxygen was last run and thus if the -# documentation is up to date. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = NO - -# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML -# documentation will contain a main index with vertical navigation menus that -# are dynamically created via Javascript. If disabled, the navigation index will -# consists of multiple levels of tabs that are statically embedded in every HTML -# page. Disable this option to support browsers that do not have Javascript, -# like the Qt help browser. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_MENUS = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: https://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See https://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: http://doc.qt.io/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://doc.qt.io/qt-4.8/qthelpproject.html#virtual-folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://doc.qt.io/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANSPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# https://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from https://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /

- * The Reader provides a low-level abstraction that allows for manual positioning in the topic, without - * using a - * subscription. Reader can only work on non-partitioned topics. - *

- * The initial reader positioning is done by specifying a message id. The options are: - *

    - *
  • MessageId.earliest : Start reading from the earliest message available in the topic - *
  • MessageId.latest : Start reading from the end topic, only getting messages published - * after the - * reader was created - *
  • MessageId : When passing a particular message id, the reader will position itself on - * that - * specific position. The first message to be read will be the message next to the specified messageId. - *
- * - * @param topic - * The name of the topic where to read - * @param startMessageId - * The message id where the reader will position itself. The first message returned will be the - * one after - * the specified startMessageId - * @param conf - * The {@code ReaderConfiguration} object - * @return The {@code Reader} object - */ - Result createReader(const std::string& topic, const MessageId& startMessageId, - const ReaderConfiguration& conf, Reader& reader); - - /** - * Asynchronously create a topic reader with the customized ReaderConfiguration for reading messages from - * the specified topic. - * - * The Reader provides a low-level abstraction that allows for manual positioning in the topic, without - * using a - * subscription. The reader can only work on non-partitioned topics. - * - * The initial reader positioning is done by specifying a message ID. The options are as below: - *
    - *
  • MessageId.earliest : start reading from the earliest message available in the topic - *
  • MessageId.latest : start reading from the latest topic, only getting messages - * published after the reader was created
  • MessageId : when passing a particular message - * ID, the reader positions itself on that is the message next to the specified messageId. - *
- * - * @param topic - * the name of the topic where to read - * @param startMessageId - * the message ID where the reader positions itself. The first message returned is the - * one after - * the specified startMessageId - * @param conf - * the ReaderConfiguration object - * @return the Reader object - */ - void createReaderAsync(const std::string& topic, const MessageId& startMessageId, - const ReaderConfiguration& conf, ReaderCallback callback); - - /** - * Get the list of partitions for a given topic. - * - * If the topic is partitioned, this will return a list of partition names. If the topic is not - * partitioned, the returned list will contain the topic name itself. - * - * This can be used to discover the partitions and create Reader, Consumer or Producer - * instances directly on a particular partition. - * - * @param topic - * the topic name - * @since 2.3.0 - */ - Result getPartitionsForTopic(const std::string& topic, std::vector& partitions); - - /** - * Get the list of partitions for a given topic in asynchronous mode. - * - * If the topic is partitioned, this will return a list of partition names. If the topic is not - * partitioned, the returned list will contain the topic name itself. - * - * This can be used to discover the partitions and create Reader, Consumer or Producer - * instances directly on a particular partition. - * - * @param topic - * the topic name - * @param callback - * the callback that will be invoked when the list of partitions is available - * @since 2.3.0 - */ - void getPartitionsForTopicAsync(const std::string& topic, GetPartitionsCallback callback); - - /** - * - * @return - */ - Result close(); - - /** - * Asynchronously close the Pulsar client and release all resources. - * - * All producers, consumers, and readers are orderly closed. The client waits until all pending write - * requests are persisted. - * - * @param callback the callback that is triggered when the Pulsar client is asynchronously closed - * successfully or not - */ - void closeAsync(CloseCallback callback); - - /** - * Perform immediate shutdown of Pulsar client. - * - * Release all resources and close all producer, consumer, and readers without waiting - * for ongoing operations to complete. - */ - void shutdown(); - - /** - * @brief Get the number of alive producers on the current client. - * - * @return The number of alive producers on the current client. - */ - uint64_t getNumberOfProducers(); - - /** - * @brief Get the number of alive consumers on the current client. - * - * @return The number of alive consumers on the current client. - */ - uint64_t getNumberOfConsumers(); - - private: - Client(const std::string& serviceUrl, const ClientConfiguration& clientConfiguration, - bool poolConnections); - Client(const std::shared_ptr); - - friend class PulsarFriend; - friend class PulsarWrapper; - std::shared_ptr impl_; -}; -} // namespace pulsar - -#endif /* PULSAR_CLIENT_HPP_ */ diff --git a/pulsar-client-cpp/include/pulsar/ClientConfiguration.h b/pulsar-client-cpp/include/pulsar/ClientConfiguration.h deleted file mode 100644 index 451ab4ef2180b..0000000000000 --- a/pulsar-client-cpp/include/pulsar/ClientConfiguration.h +++ /dev/null @@ -1,272 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CLIENTCONFIGURATION_H_ -#define PULSAR_CLIENTCONFIGURATION_H_ - -#include -#include -#include - -namespace pulsar { -class PulsarWrapper; -struct ClientConfigurationImpl; -class PULSAR_PUBLIC ClientConfiguration { - public: - ClientConfiguration(); - ~ClientConfiguration(); - ClientConfiguration(const ClientConfiguration&); - ClientConfiguration& operator=(const ClientConfiguration&); - - /** - * Configure a limit on the amount of memory that will be allocated by this client instance. - * Setting this to 0 will disable the limit. By default this is disabled. - * - * @param memoryLimitBytes the memory limit - */ - ClientConfiguration& setMemoryLimit(uint64_t memoryLimitBytes); - - /** - * @return the client memory limit in bytes - */ - uint64_t getMemoryLimit() const; - - /** - * Set the authentication method to be used with the broker - * - * @param authentication the authentication data to use - */ - ClientConfiguration& setAuth(const AuthenticationPtr& authentication); - - /** - * @return the authentication data - */ - Authentication& getAuth() const; - - /** - * Set timeout on client operations (subscribe, create producer, close, unsubscribe) - * Default is 30 seconds. - * - * @param timeout the timeout after which the operation will be considered as failed - */ - ClientConfiguration& setOperationTimeoutSeconds(int timeout); - - /** - * @return the client operations timeout in seconds - */ - int getOperationTimeoutSeconds() const; - - /** - * Set the number of IO threads to be used by the Pulsar client. Default is 1 - * thread. - * - * @param threads number of threads - */ - ClientConfiguration& setIOThreads(int threads); - - /** - * @return the number of IO threads to use - */ - int getIOThreads() const; - - /** - * Set the number of threads to be used by the Pulsar client when delivering messages - * through message listener. Default is 1 thread per Pulsar client. - * - * If using more than 1 thread, messages for distinct MessageListener will be - * delivered in different threads, however a single MessageListener will always - * be assigned to the same thread. - * - * @param threads number of threads - */ - ClientConfiguration& setMessageListenerThreads(int threads); - - /** - * @return the number of IO threads to use - */ - int getMessageListenerThreads() const; - - /** - * Number of concurrent lookup-requests allowed on each broker-connection to prevent overload on broker. - * (default: 50000) It should be configured with higher value only in case of it requires to - * produce/subscribe on - * thousands of topic using created {@link PulsarClient} - * - * @param concurrentLookupRequest - */ - ClientConfiguration& setConcurrentLookupRequest(int concurrentLookupRequest); - - /** - * @return Get configured total allowed concurrent lookup-request. - */ - int getConcurrentLookupRequest() const; - - /** - * Initialize the log configuration - * - * @param logConfFilePath path of the configuration file - * @deprecated - */ - ClientConfiguration& setLogConfFilePath(const std::string& logConfFilePath); - - /** - * Get the path of log configuration file (log4cpp) - */ - const std::string& getLogConfFilePath() const; - - /** - * Configure a custom logger backend to route of Pulsar client library - * to a different logger implementation. - * - * By default, log messages are printed on standard output. - * - * When passed in, the configuration takes ownership of the loggerFactory object. - * The logger factory can only be set once per process. Any subsequent calls to - * set the logger factory will have no effect, though the logger factory object - * will be cleaned up. - */ - ClientConfiguration& setLogger(LoggerFactory* loggerFactory); - - /** - * Configure whether to use the TLS encryption on the connections. - * - * The default value is false. - * - * @param useTls - */ - ClientConfiguration& setUseTls(bool useTls); - - /** - * @return whether the TLS encryption is used on the connections - */ - bool isUseTls() const; - - /** - * Set the path to the trusted TLS certificate file. - * - * @param tlsTrustCertsFilePath - */ - ClientConfiguration& setTlsTrustCertsFilePath(const std::string& tlsTrustCertsFilePath); - - /** - * @return the path to the trusted TLS certificate file - */ - const std::string& getTlsTrustCertsFilePath() const; - - /** - * Configure whether the Pulsar client accepts untrusted TLS certificates from brokers. - * - * The default value is false. - * - * @param tlsAllowInsecureConnection - */ - ClientConfiguration& setTlsAllowInsecureConnection(bool allowInsecure); - - /** - * @return whether the Pulsar client accepts untrusted TLS certificates from brokers - */ - bool isTlsAllowInsecureConnection() const; - - /** - * Configure whether it allows validating hostname verification when a client connects to a broker over - * TLS. - * - * It validates the incoming x509 certificate and matches the provided hostname (CN/SAN) with the - * expected broker's hostname. It follows the server identity hostname verification in RFC 2818. - * - * The default value is false. - * - * @see [RFC 2818](https://tools.ietf.org/html/rfc2818). - * - * @param validateHostName whether to enable the TLS hostname verification - */ - ClientConfiguration& setValidateHostName(bool validateHostName); - - /** - * @return true if the TLS hostname verification is enabled - */ - bool isValidateHostName() const; - - /** - * Configure the listener name that the broker returns the corresponding `advertisedListener`. - * - * @param name the listener name - */ - ClientConfiguration& setListenerName(const std::string& listenerName); - - /** - * @return the listener name for the broker - */ - const std::string& getListenerName() const; - - /** - * Initialize stats interval in seconds. Stats are printed and reset after every `statsIntervalInSeconds`. - * - * Default: 600 - * - * Set to 0 means disabling stats collection. - */ - ClientConfiguration& setStatsIntervalInSeconds(const unsigned int&); - - /** - * @return the stats interval configured for the client - */ - const unsigned int& getStatsIntervalInSeconds() const; - - /** - * Set partitions update interval in seconds. - * If a partitioned topic is produced or subscribed and `intervalInSeconds` is not 0, every - * `intervalInSeconds` seconds the partition number will be retrieved by sending lookup requests. If - * partition number has been increased, more producer/consumer of increased partitions will be created. - * Default is 60 seconds. - * - * @param intervalInSeconds the seconds between two lookup request for partitioned topic's metadata - */ - ClientConfiguration& setPartititionsUpdateInterval(unsigned int intervalInSeconds); - - /** - * Get partitions update interval in seconds. - */ - unsigned int getPartitionsUpdateInterval() const; - - /** - * Set the duration of time to wait for a connection to a broker to be established. If the duration passes - * without a response from the broker, the connection attempt is dropped. - * - * Default: 10000 - * - * @param timeoutMs the duration in milliseconds - * @return - */ - ClientConfiguration& setConnectionTimeout(int timeoutMs); - - /** - * The getter associated with setConnectionTimeout(). - */ - int getConnectionTimeout() const; - - friend class ClientImpl; - friend class PulsarWrapper; - - private: - const AuthenticationPtr& getAuthPtr() const; - std::shared_ptr impl_; -}; -} // namespace pulsar - -#endif /* PULSAR_CLIENTCONFIGURATION_H_ */ diff --git a/pulsar-client-cpp/include/pulsar/CompressionType.h b/pulsar-client-cpp/include/pulsar/CompressionType.h deleted file mode 100644 index 6fd663a5ba717..0000000000000 --- a/pulsar-client-cpp/include/pulsar/CompressionType.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_COMPRESSIONTYPE_H_ -#define PULSAR_COMPRESSIONTYPE_H_ - -namespace pulsar { -enum CompressionType -{ - CompressionNone = 0, - CompressionLZ4 = 1, - CompressionZLib = 2, - CompressionZSTD = 3, - CompressionSNAPPY = 4 -}; -} - -#endif /* PULSAR_COMPRESSIONTYPE_H_ */ diff --git a/pulsar-client-cpp/include/pulsar/ConsoleLoggerFactory.h b/pulsar-client-cpp/include/pulsar/ConsoleLoggerFactory.h deleted file mode 100644 index bfb5e9e3a75e6..0000000000000 --- a/pulsar-client-cpp/include/pulsar/ConsoleLoggerFactory.h +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -namespace pulsar { - -class ConsoleLoggerFactoryImpl; - -/** - * The default LoggerFactory of Client if `USE_LOG4CXX` macro was not defined during compilation. - * - * - * The log format is "yyyy-MM-dd HH:mm:ss,SSS Z : | ", like - * - * ``` - * 2021-03-24 17:35:46,571 +0800 INFO [0x10a951e00] ConnectionPool:85 | Created connection for ... - * ``` - * - * It uses `std::cout` to prints logs to standard output. You can use this factory class to change your log - * level simply. - * - * ```c++ - * #include - * - * ClientConfiguration conf; - * conf.setLogger(new ConsoleLoggerFactory(Logger::LEVEL_DEBUG)); - * Client client("pulsar://localhost:6650", conf); - * ``` - */ -class PULSAR_PUBLIC ConsoleLoggerFactory : public LoggerFactory { - public: - explicit ConsoleLoggerFactory(Logger::Level level = Logger::LEVEL_INFO); - - ~ConsoleLoggerFactory(); - - Logger* getLogger(const std::string& fileName) override; - - private: - std::unique_ptr impl_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/include/pulsar/Consumer.h b/pulsar-client-cpp/include/pulsar/Consumer.h deleted file mode 100644 index 6c0ab27b06c75..0000000000000 --- a/pulsar-client-cpp/include/pulsar/Consumer.h +++ /dev/null @@ -1,417 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef CONSUMER_HPP_ -#define CONSUMER_HPP_ - -#include -#include -#include -#include - -namespace pulsar { -class PulsarWrapper; -class ConsumerImplBase; -class PulsarFriend; -typedef std::shared_ptr ConsumerImplBasePtr; -/** - * - */ -class PULSAR_PUBLIC Consumer { - public: - /** - * Construct an uninitialized consumer object - */ - Consumer(); - virtual ~Consumer() = default; - - /** - * @return the topic this consumer is subscribed to - */ - const std::string& getTopic() const; - - /** - * @return the consumer name - */ - const std::string& getSubscriptionName() const; - - /** - * Unsubscribe the current consumer from the topic. - * - * This method will block until the operation is completed. Once the consumer is - * unsubscribed, no more messages will be received and subsequent new messages - * will not be retained for this consumer. - * - * This consumer object cannot be reused. - * - * @see asyncUnsubscribe - * @return Result::ResultOk if the unsubscribe operation completed successfully - * @return Result::ResultError if the unsubscribe operation failed - */ - Result unsubscribe(); - - /** - * Asynchronously unsubscribe the current consumer from the topic. - * - * This method will block until the operation is completed. Once the consumer is - * unsubscribed, no more messages will be received and subsequent new messages - * will not be retained for this consumer. - * - * This consumer object cannot be reused. - * - * @param callback the callback to get notified when the operation is complete - */ - void unsubscribeAsync(ResultCallback callback); - - /** - * Receive a single message. - * - * If a message is not immediately available, this method will block until a new - * message is available. - * - * @param msg a non-const reference where the received message will be copied - * @return ResultOk when a message is received - * @return ResultInvalidConfiguration if a message listener had been set in the configuration - */ - Result receive(Message& msg); - - /** - * - * @param msg a non-const reference where the received message will be copied - * @param timeoutMs the receive timeout in milliseconds - * @return ResultOk if a message was received - * @return ResultTimeout if the receive timeout was triggered - * @return ResultInvalidConfiguration if a message listener had been set in the configuration - */ - Result receive(Message& msg, int timeoutMs); - - /** - * Receive a single message - *

- * Retrieves a message when it will be available and completes callback with received message. - *

- *

- * receiveAsync() should be called subsequently once callback gets completed with received message. - * Else it creates backlog of receive requests in the application. - *

- * @param ReceiveCallback will be completed when message is available - */ - void receiveAsync(ReceiveCallback callback); - - /** - * Acknowledge the reception of a single message. - * - * This method will block until an acknowledgement is sent to the broker. After - * that, the message will not be re-delivered to this consumer. - * - * @see asyncAcknowledge - * @param message the message to acknowledge - * @return ResultOk if the message was successfully acknowledged - * @return ResultError if there was a failure - */ - Result acknowledge(const Message& message); - - /** - * Acknowledge the reception of a single message. - * - * This method is blocked until an acknowledgement is sent to the broker. After that, the message is not - * re-delivered to the consumer. - * - * @see asyncAcknowledge - * @param messageId the MessageId to acknowledge - * @return ResultOk if the messageId is successfully acknowledged - */ - Result acknowledge(const MessageId& messageId); - - /** - * Asynchronously acknowledge the reception of a single message. - * - * This method will initiate the operation and return immediately. The provided callback - * will be triggered when the operation is complete. - * - * @param message the message to acknowledge - * @param callback callback that will be triggered when the message has been acknowledged - */ - void acknowledgeAsync(const Message& message, ResultCallback callback); - - /** - * Asynchronously acknowledge the reception of a single message. - * - * This method initiates the operation and returns the result immediately. The provided callback - * is triggered when the operation is completed. - * - * @param messageId the messageId to acknowledge - * @param callback the callback that is triggered when the message has been acknowledged or not - */ - void acknowledgeAsync(const MessageId& messageId, ResultCallback callback); - - /** - * Acknowledge the reception of all the messages in the stream up to (and including) - * the provided message. - * - * This method will block until an acknowledgement is sent to the broker. After - * that, the messages will not be re-delivered to this consumer. - * - * Cumulative acknowledge cannot be used when the consumer type is set to ConsumerShared. - * - * It's equivalent to calling asyncAcknowledgeCumulative(const Message&, ResultCallback) and - * waiting for the callback to be triggered. - * - * @param message the last message in the stream to acknowledge - * @return ResultOk if the message was successfully acknowledged. All previously delivered messages for - * this topic are also acknowledged. - * @return ResultError if there was a failure - */ - Result acknowledgeCumulative(const Message& message); - - /** - * Acknowledge the reception of all the messages in the stream up to (and including) - * the provided message. - * - * This method is blocked until an acknowledgement is sent to the broker. After - * that, the message is not re-delivered to this consumer. - * - * Cumulative acknowledge cannot be used when the consumer type is set to ConsumerShared. - * - * It is equivalent to calling the asyncAcknowledgeCumulative(const Message&, ResultCallback) method and - * waiting for the callback to be triggered. - * - * @param messageId the last messageId in the stream to acknowledge - * @return ResultOk if the message is successfully acknowledged. All previously delivered messages for - * this topic are also acknowledged. - */ - Result acknowledgeCumulative(const MessageId& messageId); - - /** - * Asynchronously acknowledge the reception of all the messages in the stream up to (and - * including) the provided message. - * - * This method will initiate the operation and return immediately. The provided callback - * will be triggered when the operation is complete. - * - * @param message the message to acknowledge - * @param callback callback that will be triggered when the message has been acknowledged - */ - void acknowledgeCumulativeAsync(const Message& message, ResultCallback callback); - - /** - * Asynchronously acknowledge the reception of all the messages in the stream up to (and - * including) the provided message. - * - * This method initiates the operation and returns the result immediately. The provided callback - * is triggered when the operation is completed. - * - * @param messageId the messageId to acknowledge - * @param callback the callback that is triggered when the message has been acknowledged or not - */ - void acknowledgeCumulativeAsync(const MessageId& messageId, ResultCallback callback); - - /** - * Acknowledge the failure to process a single message. - *

- * When a message is "negatively acked" it will be marked for redelivery after - * some fixed delay. The delay is configurable when constructing the consumer - * with {@link ConsumerConfiguration#setNegativeAckRedeliveryDelayMs}. - *

- * This call is not blocking. - * - *

- * Example of usage: - *


-     * while (true) {
-     *     Message msg;
-     *     consumer.receive(msg);
-     *
-     *     try {
-     *          // Process message...
-     *
-     *          consumer.acknowledge(msg);
-     *     } catch (Throwable t) {
-     *          log.warn("Failed to process message");
-     *          consumer.negativeAcknowledge(msg);
-     *     }
-     * }
-     * 
- * - * @param message - * The {@code Message} to be acknowledged - */ - void negativeAcknowledge(const Message& message); - - /** - * Acknowledge the failure to process a single message. - *

- * When a message is "negatively acked" it will be marked for redelivery after - * some fixed delay. The delay is configurable when constructing the consumer - * with {@link ConsumerConfiguration#setNegativeAckRedeliveryDelayMs}. - *

- * This call is not blocking. - * - *

- * Example of usage: - *


-     * while (true) {
-     *     Message msg;
-     *     consumer.receive(msg);
-     *
-     *     try {
-     *          // Process message...
-     *
-     *          consumer.acknowledge(msg);
-     *     } catch (Throwable t) {
-     *          log.warn("Failed to process message");
-     *          consumer.negativeAcknowledge(msg);
-     *     }
-     * }
-     * 
- * - * @param messageId - * The {@code MessageId} to be acknowledged - */ - void negativeAcknowledge(const MessageId& messageId); - - /** - * Close the consumer and stop the broker to push more messages - */ - Result close(); - - /** - * Asynchronously close the consumer and stop the broker to push more messages - * - */ - void closeAsync(ResultCallback callback); - - /** - * Pause receiving messages via the messageListener, till resumeMessageListener() is called. - */ - Result pauseMessageListener(); - - /** - * Resume receiving the messages via the messageListener. - * Asynchronously receive all the messages enqueued from time pauseMessageListener() was called. - */ - Result resumeMessageListener(); - - /** - * Redelivers all the unacknowledged messages. In Failover mode, the request is ignored if the consumer is - * not - * active for the given topic. In Shared mode, the consumers messages to be redelivered are distributed - * across all - * the connected consumers. This is a non blocking call and doesn't throw an exception. In case the - * connection - * breaks, the messages are redelivered after reconnect. - */ - void redeliverUnacknowledgedMessages(); - - /** - * Gets Consumer Stats from broker. - * The stats are cached for 30 seconds, if a call is made before the stats returned by the previous call - * expires - * then cached data will be returned. BrokerConsumerStats::isValid() function can be used to check if the - * stats are - * still valid. - * - * @param brokerConsumerStats - if the function returns ResultOk, this object will contain consumer stats - * - * @note This is a blocking call with timeout of thirty seconds. - */ - Result getBrokerConsumerStats(BrokerConsumerStats& brokerConsumerStats); - - /** - * Asynchronous call to gets Consumer Stats from broker. - * The stats are cached for 30 seconds, if a call is made before the stats returned by the previous call - * expires - * then cached data will be returned. BrokerConsumerStats::isValid() function can be used to check if the - * stats are - * still valid. - * - * @param callback - callback function to get the brokerConsumerStats, - * if result is ResultOk then the brokerConsumerStats will be populated - */ - void getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback); - - /** - * Reset the subscription associated with this consumer to a specific message id. - * The message id can either be a specific message or represent the first or last messages in the topic. - * - * Note: this operation can only be done on non-partitioned topics. For these, one can rather perform the - * seek() on the individual partitions. - * - * @param messageId - * the message id where to reposition the subscription - */ - Result seek(const MessageId& messageId); - - /** - * Reset the subscription associated with this consumer to a specific message publish time. - * - * @param timestamp - * the message publish time where to reposition the subscription - */ - Result seek(uint64_t timestamp); - - /** - * Asynchronously reset the subscription associated with this consumer to a specific message id. - * The message id can either be a specific message or represent the first or last messages in the topic. - * - * Note: this operation can only be done on non-partitioned topics. For these, one can rather perform the - * seek() on the individual partitions. - * - * @param messageId - * the message id where to reposition the subscription - */ - virtual void seekAsync(const MessageId& messageId, ResultCallback callback); - - /** - * Asynchronously reset the subscription associated with this consumer to a specific message publish time. - * - * @param timestamp - * the message publish time where to reposition the subscription - */ - virtual void seekAsync(uint64_t timestamp, ResultCallback callback); - - /** - * @return Whether the consumer is currently connected to the broker - */ - bool isConnected() const; - - /** - * Asynchronously get an ID of the last available message or a message ID with -1 as an entryId if the - * topic is empty. - */ - void getLastMessageIdAsync(GetLastMessageIdCallback callback); - - /** - * Get an ID of the last available message or a message ID with -1 as an entryId if the topic is empty. - */ - Result getLastMessageId(MessageId& messageId); - - private: - ConsumerImplBasePtr impl_; - explicit Consumer(ConsumerImplBasePtr); - - friend class PulsarFriend; - friend class PulsarWrapper; - friend class MultiTopicsConsumerImpl; - friend class ConsumerImpl; - friend class ClientImpl; - friend class ConsumerTest; -}; -} // namespace pulsar - -#endif /* CONSUMER_HPP_ */ diff --git a/pulsar-client-cpp/include/pulsar/ConsumerConfiguration.h b/pulsar-client-cpp/include/pulsar/ConsumerConfiguration.h deleted file mode 100644 index 4347c3b2d5fc3..0000000000000 --- a/pulsar-client-cpp/include/pulsar/ConsumerConfiguration.h +++ /dev/null @@ -1,522 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CONSUMERCONFIGURATION_H_ -#define PULSAR_CONSUMERCONFIGURATION_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace pulsar { - -class Consumer; -class PulsarWrapper; - -/// Callback definition for non-data operation -typedef std::function ResultCallback; -typedef std::function ReceiveCallback; -typedef std::function GetLastMessageIdCallback; - -/// Callback definition for MessageListener -typedef std::function MessageListener; - -typedef std::shared_ptr ConsumerEventListenerPtr; - -struct ConsumerConfigurationImpl; - -/** - * Class specifying the configuration of a consumer. - */ -class PULSAR_PUBLIC ConsumerConfiguration { - public: - ConsumerConfiguration(); - ~ConsumerConfiguration(); - ConsumerConfiguration(const ConsumerConfiguration&); - ConsumerConfiguration& operator=(const ConsumerConfiguration&); - - /** - * Create a new instance of ConsumerConfiguration with the same - * initial settings as the current one. - */ - ConsumerConfiguration clone() const; - - /** - * Declare the schema of the data that this consumer will be accepting. - * - * The schema will be checked against the schema of the topic, and the - * consumer creation will fail if it's not compatible. - * - * @param schemaInfo the schema definition object - */ - ConsumerConfiguration& setSchema(const SchemaInfo& schemaInfo); - - /** - * @return the schema information declared for this consumer - */ - const SchemaInfo& getSchema() const; - - /** - * Specify the consumer type. The consumer type enables - * specifying the type of subscription. In Exclusive subscription, - * only a single consumer is allowed to attach to the subscription. Other consumers - * will get an error message. In Shared subscription, multiple consumers will be - * able to use the same subscription name and the messages will be dispatched in a - * round robin fashion. In Failover subscription, a primary-failover subscription model - * allows for multiple consumers to attach to a single subscription, though only one - * of them will be “master” at a given time. Only the primary consumer will receive - * messages. When the primary consumer gets disconnected, one among the failover - * consumers will be promoted to primary and will start getting messages. - */ - ConsumerConfiguration& setConsumerType(ConsumerType consumerType); - - /** - * @return the consumer type - */ - ConsumerType getConsumerType() const; - - /** - * Set KeyShared subscription policy for consumer. - * - * By default, KeyShared subscription use auto split hash range to maintain consumers. If you want to - * set a different KeyShared policy, you can set by following example: - * - * @param keySharedPolicy The {@link KeySharedPolicy} want to specify - */ - ConsumerConfiguration& setKeySharedPolicy(KeySharedPolicy keySharedPolicy); - - /** - * @return the KeyShared subscription policy - */ - KeySharedPolicy getKeySharedPolicy() const; - - /** - * A message listener enables your application to configure how to process - * and acknowledge messages delivered. A listener will be called in order - * for every message received. - */ - ConsumerConfiguration& setMessageListener(MessageListener messageListener); - - /** - * @return the message listener - */ - MessageListener getMessageListener() const; - - /** - * @return true if the message listener has been set - */ - bool hasMessageListener() const; - - /** - * A event listener enables your application to react the consumer state - * change event (active or inactive). - */ - ConsumerConfiguration& setConsumerEventListener(ConsumerEventListenerPtr eventListener); - - /** - * @return the consumer event listener - */ - ConsumerEventListenerPtr getConsumerEventListener() const; - - /** - * @return true if the consumer event listener has been set - */ - bool hasConsumerEventListener() const; - - /** - * Sets the size of the consumer receive queue. - * - * The consumer receive queue controls how many messages can be accumulated by the consumer before the - * application calls receive(). Using a higher value may potentially increase the consumer throughput - * at the expense of bigger memory utilization. - * - * Setting the consumer queue size to 0 decreases the throughput of the consumer by disabling - * pre-fetching of - * messages. This approach improves the message distribution on shared subscription by pushing messages - * only to - * the consumers that are ready to process them. Neither receive with timeout nor partitioned topics can - * be - * used if the consumer queue size is 0. The receive() function call should not be interrupted when - * the consumer queue size is 0. - * - * The default value is 1000 messages and it is appropriate for the most use cases. - * - * @param size the new receiver queue size value - * - */ - void setReceiverQueueSize(int size); - - /** - * @return the receiver queue size - */ - int getReceiverQueueSize() const; - - /** - * Set the max total receiver queue size across partitons. - * - * This setting is used to reduce the receiver queue size for individual partitions - * {@link #setReceiverQueueSize(int)} if the total exceeds this value (default: 50000). - * - * @param maxTotalReceiverQueueSizeAcrossPartitions - */ - void setMaxTotalReceiverQueueSizeAcrossPartitions(int maxTotalReceiverQueueSizeAcrossPartitions); - - /** - * @return the configured max total receiver queue size across partitions - */ - int getMaxTotalReceiverQueueSizeAcrossPartitions() const; - - /** - * Set the consumer name. - * - * @param consumerName - */ - void setConsumerName(const std::string& consumerName); - - /** - * @return the consumer name - */ - const std::string& getConsumerName() const; - - /** - * Set the timeout in milliseconds for unacknowledged messages, the timeout needs to be greater than - * 10 seconds. An Exception is thrown if the given value is less than 10000 (10 seconds). - * If a successful acknowledgement is not sent within the timeout all the unacknowledged messages are - * redelivered. - * - * Default: 0, which means the the tracker for unacknowledged messages is disabled. - * - * @param timeout in milliseconds - */ - void setUnAckedMessagesTimeoutMs(const uint64_t milliSeconds); - - /** - * @return the configured timeout in milliseconds for unacked messages. - */ - long getUnAckedMessagesTimeoutMs() const; - - /** - * Set the tick duration time that defines the granularity of the ack-timeout redelivery (in - * milliseconds). - * - * The default value is 1000, which means 1 second. - * - * Using a higher tick time reduces - * the memory overhead to track messages when the ack-timeout is set to a bigger value. - * - * @param milliSeconds the tick duration time (in milliseconds) - */ - void setTickDurationInMs(const uint64_t milliSeconds); - - /** - * @return the tick duration time (in milliseconds) - */ - long getTickDurationInMs() const; - - /** - * Set the delay to wait before re-delivering messages that have failed to be process. - * - * When application uses {@link Consumer#negativeAcknowledge(Message)}, the failed message - * will be redelivered after a fixed timeout. The default is 1 min. - * - * @param redeliveryDelay - * redelivery delay for failed messages - * @param timeUnit - * unit in which the timeout is provided. - * @return the consumer builder instance - */ - void setNegativeAckRedeliveryDelayMs(long redeliveryDelayMillis); - - /** - * Get the configured delay to wait before re-delivering messages that have failed to be process. - * - * @return redelivery delay for failed messages - */ - long getNegativeAckRedeliveryDelayMs() const; - - /** - * Set time window in milliseconds for grouping message ACK requests. An ACK request is not sent - * to broker until the time window reaches its end, or the number of grouped messages reaches - * limit. Default is 100 milliseconds. If it's set to a non-positive value, ACK requests will be - * directly sent to broker without grouping. - * - * @param ackGroupMillis time of ACK grouping window in milliseconds. - */ - void setAckGroupingTimeMs(long ackGroupingMillis); - - /** - * Get grouping time window in milliseconds. - * - * @return grouping time window in milliseconds. - */ - long getAckGroupingTimeMs() const; - - /** - * Set max number of grouped messages within one grouping time window. If it's set to a - * non-positive value, number of grouped messages is not limited. Default is 1000. - * - * @param maxGroupingSize max number of grouped messages with in one grouping time window. - */ - void setAckGroupingMaxSize(long maxGroupingSize); - - /** - * Get max number of grouped messages within one grouping time window. - * - * @return max number of grouped messages within one grouping time window. - */ - long getAckGroupingMaxSize() const; - - /** - * Set the time duration for which the broker side consumer stats will be cached in the client. - * - * Default: 30000, which means 30 seconds. - * - * @param cacheTimeInMs in milliseconds - */ - void setBrokerConsumerStatsCacheTimeInMs(const long cacheTimeInMs); - - /** - * @return the configured timeout in milliseconds caching BrokerConsumerStats. - */ - long getBrokerConsumerStatsCacheTimeInMs() const; - - /** - * @return true if encryption keys are added - */ - bool isEncryptionEnabled() const; - - /** - * @return the shared pointer to CryptoKeyReader. - */ - const CryptoKeyReaderPtr getCryptoKeyReader() const; - - /** - * Set the shared pointer to CryptoKeyReader. - * - * @param the shared pointer to CryptoKeyReader - */ - ConsumerConfiguration& setCryptoKeyReader(CryptoKeyReaderPtr cryptoKeyReader); - - /** - * @return the ConsumerCryptoFailureAction - */ - ConsumerCryptoFailureAction getCryptoFailureAction() const; - - /** - * Set the ConsumerCryptoFailureAction. - */ - ConsumerConfiguration& setCryptoFailureAction(ConsumerCryptoFailureAction action); - - /** - * @return true if readCompacted is enabled - */ - bool isReadCompacted() const; - - /** - * If enabled, the consumer reads messages from the compacted topics rather than reading the full message - * backlog of the topic. This means that if the topic has been compacted, the consumer only sees the - * latest value for each key in the topic, up until the point in the topic message backlog that has been - * compacted. Beyond that point, message is sent as normal. - * - * `readCompacted` can only be enabled subscriptions to persistent topics, which have a single active - * consumer (for example, failure or exclusive subscriptions). Attempting to enable it on subscriptions to - * a non-persistent topics or on a shared subscription leads to the subscription call failure. - * - * @param readCompacted - * whether to read from the compacted topic - */ - void setReadCompacted(bool compacted); - - /** - * Set the time duration in minutes, for which the PatternMultiTopicsConsumer will do a pattern auto - * discovery. - * The default value is 60 seconds. less than 0 will disable auto discovery. - * - * @param periodInSeconds period in seconds to do an auto discovery - */ - void setPatternAutoDiscoveryPeriod(int periodInSeconds); - - /** - * @return the time duration for the PatternMultiTopicsConsumer performs a pattern auto discovery - */ - int getPatternAutoDiscoveryPeriod() const; - - /** - * The default value is `InitialPositionLatest`. - * - * @param subscriptionInitialPosition the initial position at which to set - * the cursor when subscribing to the topic for the first time - */ - void setSubscriptionInitialPosition(InitialPosition subscriptionInitialPosition); - - /** - * @return the configured `InitialPosition` for the consumer - */ - InitialPosition getSubscriptionInitialPosition() const; - - /** - * Set whether the subscription status should be replicated. - * The default value is `false`. - * - * @param replicateSubscriptionState whether the subscription status should be replicated - */ - void setReplicateSubscriptionStateEnabled(bool enabled); - - /** - * @return whether the subscription status should be replicated - */ - bool isReplicateSubscriptionStateEnabled() const; - - /** - * Check whether the message has a specific property attached. - * - * @param name the name of the property to check - * @return true if the message has the specified property - * @return false if the property is not defined - */ - bool hasProperty(const std::string& name) const; - - /** - * Get the value of a specific property - * - * @param name the name of the property - * @return the value of the property or null if the property was not defined - */ - const std::string& getProperty(const std::string& name) const; - - /** - * Get all the properties attached to this producer. - */ - std::map& getProperties() const; - - /** - * Sets a new property on a message. - * @param name the name of the property - * @param value the associated value - */ - ConsumerConfiguration& setProperty(const std::string& name, const std::string& value); - - /** - * Add all the properties in the provided map - */ - ConsumerConfiguration& setProperties(const std::map& properties); - - /** - * Get all the subscription properties attached to this subscription. - */ - std::map& getSubscriptionProperties() const; - - /** - * Sets a new subscription properties for this subscription. - * Notice: SubscriptionProperties are immutable, and consumers under the same subscription will fail to - * create a subscription if they use different properties. - * - * @param subscriptionProperties all the subscription properties in the provided map - */ - ConsumerConfiguration& setSubscriptionProperties( - const std::map& subscriptionProperties); - - /** - * Set the Priority Level for consumer (0 is the default value and means the highest priority). - * - * @param priorityLevel the priority of this consumer - * @return the ConsumerConfiguration instance - */ - ConsumerConfiguration& setPriorityLevel(int priorityLevel); - - /** - * @return the configured priority for the consumer - */ - int getPriorityLevel() const; - - /** - * Consumer buffers chunk messages into memory until it receives all the chunks of the original message. - * While consuming chunk-messages, chunks from same message might not be contiguous in the stream and they - * might be mixed with other messages' chunks. so, consumer has to maintain multiple buffers to manage - * chunks coming from different messages. This mainly happens when multiple publishers are publishing - * messages on the topic concurrently or publisher failed to publish all chunks of the messages. - * - * eg: M1-C1, M2-C1, M1-C2, M2-C2 - * Here, Messages M1-C1 and M1-C2 belong to original message M1, M2-C1 and M2-C2 belong to M2 message. - * - * Buffering large number of outstanding uncompleted chunked messages can create memory pressure and it - * can be guarded by providing this maxPendingChunkedMessage threshold. Once, consumer reaches this - * threshold, it drops the outstanding unchunked-messages by silently acking or asking broker to redeliver - * later by marking it unacked. See setAutoAckOldestChunkedMessageOnQueueFull. - * - * If it's zero, the pending chunked messages will not be limited. - * - * Default: 10 - * - * @param maxPendingChunkedMessage the number of max pending chunked messages - */ - ConsumerConfiguration& setMaxPendingChunkedMessage(size_t maxPendingChunkedMessage); - - /** - * The associated getter of setMaxPendingChunkedMessage - */ - size_t getMaxPendingChunkedMessage() const; - - /** - * Buffering large number of outstanding uncompleted chunked messages can create memory pressure and it - * can be guarded by providing the maxPendingChunkedMessage threshold. See setMaxPendingChunkedMessage. - * Once, consumer reaches this threshold, it drops the outstanding unchunked-messages by silently acking - * if autoAckOldestChunkedMessageOnQueueFull is true else it marks them for redelivery. - * - * Default: false - * - * @param autoAckOldestChunkedMessageOnQueueFull whether to ack the discarded chunked message - */ - ConsumerConfiguration& setAutoAckOldestChunkedMessageOnQueueFull( - bool autoAckOldestChunkedMessageOnQueueFull); - - /** - * The associated getter of setAutoAckOldestChunkedMessageOnQueueFull - */ - bool isAutoAckOldestChunkedMessageOnQueueFull() const; - - /** - * Set the consumer to include the given position of any reset operation like Consumer::seek. - * - * Default: false - * - * @param startMessageIdInclusive whether to include the reset position - */ - ConsumerConfiguration& setStartMessageIdInclusive(bool startMessageIdInclusive); - - /** - * The associated getter of setStartMessageIdInclusive - */ - bool isStartMessageIdInclusive() const; - - friend class PulsarWrapper; - - private: - std::shared_ptr impl_; -}; -} // namespace pulsar -#endif /* PULSAR_CONSUMERCONFIGURATION_H_ */ diff --git a/pulsar-client-cpp/include/pulsar/ConsumerCryptoFailureAction.h b/pulsar-client-cpp/include/pulsar/ConsumerCryptoFailureAction.h deleted file mode 100644 index b061d7bf65e2e..0000000000000 --- a/pulsar-client-cpp/include/pulsar/ConsumerCryptoFailureAction.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef CONSUMERCRYPTOFAILUREACTION_H_ -#define CONSUMERCRYPTOFAILUREACTION_H_ - -namespace pulsar { - -enum class ConsumerCryptoFailureAction -{ - FAIL, // This is the default option to fail consume until crypto succeeds - DISCARD, // Message is silently acknowledged and not delivered to the application - CONSUME // Deliver the encrypted message to the application. It's the application's - // responsibility to decrypt the message. If message is also compressed, - // decompression will fail. If message contain batch messages, client will - // not be able to retrieve individual messages in the batch -}; - -} /* namespace pulsar */ - -#endif /* CONSUMERCRYPTOFAILUREACTION_H_ */ diff --git a/pulsar-client-cpp/include/pulsar/ConsumerEventListener.h b/pulsar-client-cpp/include/pulsar/ConsumerEventListener.h deleted file mode 100644 index 467dce178cb3c..0000000000000 --- a/pulsar-client-cpp/include/pulsar/ConsumerEventListener.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CONSUMEREVENTLISTENER_H_ -#define PULSAR_CONSUMEREVENTLISTENER_H_ - -#include - -namespace pulsar { - -class Consumer; - -class PULSAR_PUBLIC ConsumerEventListener { - public: - virtual ~ConsumerEventListener(){}; - /** - * @brief Notified when the consumer group is changed, and the consumer becomes active. - * - * @param consumer the consumer that originated the event - * @param partitionId the id of the partition that beconmes active. - */ - virtual void becameActive(Consumer consumer, int partitionId) = 0; - - /** - * @brief Notified when the consumer group is changed, and the consumer is still inactive or becomes - * inactive. - * - * @param consumer the consumer that originated the event - * @param partitionId the id of the partition that is still inactive or becomes inactive. - */ - virtual void becameInactive(Consumer consumer, int partitionId) = 0; -}; -} // namespace pulsar -#endif /* PULSAR_CONSUMEREVENTLISTENER_H_ */ diff --git a/pulsar-client-cpp/include/pulsar/ConsumerType.h b/pulsar-client-cpp/include/pulsar/ConsumerType.h deleted file mode 100644 index 8068a5c2c8407..0000000000000 --- a/pulsar-client-cpp/include/pulsar/ConsumerType.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CPP_CONSUMERTYPE_H -#define PULSAR_CPP_CONSUMERTYPE_H - -namespace pulsar { -enum ConsumerType -{ - /** - * There can be only 1 consumer on the same topic with the same consumerName - */ - ConsumerExclusive, - - /** - * Multiple consumers will be able to use the same consumerName and the messages - * will be dispatched according to a round-robin rotation between the connected consumers - */ - ConsumerShared, - - /** Only one consumer is active on the subscription; Subscription can have N consumers - * connected one of which will get promoted to master if the current master becomes inactive - */ - ConsumerFailover, - - /** - * Multiple consumer will be able to use the same subscription and all messages with the same key - * will be dispatched to only one consumer - */ - ConsumerKeyShared -}; -} - -#endif // PULSAR_CPP_CONSUMERTYPE_H diff --git a/pulsar-client-cpp/include/pulsar/CryptoKeyReader.h b/pulsar-client-cpp/include/pulsar/CryptoKeyReader.h deleted file mode 100644 index e0b2a7781dca7..0000000000000 --- a/pulsar-client-cpp/include/pulsar/CryptoKeyReader.h +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef CRYPTOKEYREADER_H_ -#define CRYPTOKEYREADER_H_ - -#include -#include -#include - -namespace pulsar { - -/** - * The abstract class that abstracts the access to a key store - */ -class PULSAR_PUBLIC CryptoKeyReader { - public: - CryptoKeyReader(); - virtual ~CryptoKeyReader(); - - /** - * Return the encryption key corresponding to the key name in the argument - *

- * This method should be implemented to return the EncryptionKeyInfo. This method will be - * called at the time of producer creation as well as consumer receiving messages. - * Hence, application should not make any blocking calls within the implementation. - *

- * - * @param keyName - * Unique name to identify the key - * @param metadata - * Additional information needed to identify the key - * @param encKeyInfo updated with details about the public key - * @return Result ResultOk is returned for success - * - */ - virtual Result getPublicKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const = 0; - - /** - * @param keyName - * Unique name to identify the key - * @param metadata - * Additional information needed to identify the key - * @param encKeyInfo updated with details about the private key - * @return Result ResultOk is returned for success - */ - virtual Result getPrivateKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const = 0; - -}; /* namespace pulsar */ - -typedef std::shared_ptr CryptoKeyReaderPtr; - -class PULSAR_PUBLIC DefaultCryptoKeyReader : public CryptoKeyReader { - private: - std::string publicKeyPath_; - std::string privateKeyPath_; - void readFile(std::string fileName, std::string& fileContents) const; - - public: - /** - * The constructor of {@link #CryptoKeyReader} - * - * Configure the key reader to be used to decrypt the message payloads - * - * @param publicKeyPath the path to the public key - * @param privateKeyPath the path to the private key - * @since 2.8.0 - */ - DefaultCryptoKeyReader(const std::string& publicKeyPath, const std::string& privateKeyPath); - ~DefaultCryptoKeyReader(); - - /** - * Return the encryption key corresponding to the key name in the argument. - * - * This method should be implemented to return the EncryptionKeyInfo. This method is called when creating - * producers as well as allowing consumers to receive messages. Consequently, the application should not - * make any blocking calls within the implementation. - * - * @param[in] keyName - * the unique name to identify the key - * @param[in] metadata - * the additional information needed to identify the key - * @param[out] encKeyInfo the EncryptionKeyInfo with details about the public key - * @return ResultOk - */ - Result getPublicKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const; - - /** - * Return the encryption key corresponding to the key name in the argument. - * - * @param[in] keyName - * the unique name to identify the key - * @param[in] metadata - * the additional information needed to identify the key - * @param[out] encKeyInfo the EncryptionKeyInfo with details about the private key - * @return ResultOk - */ - Result getPrivateKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const; - static CryptoKeyReaderPtr create(const std::string& publicKeyPath, const std::string& privateKeyPath); -}; /* namespace pulsar */ - -} // namespace pulsar - -#endif /* CRYPTOKEYREADER_H_ */ diff --git a/pulsar-client-cpp/include/pulsar/DeprecatedException.h b/pulsar-client-cpp/include/pulsar/DeprecatedException.h deleted file mode 100644 index 9680591b99abe..0000000000000 --- a/pulsar-client-cpp/include/pulsar/DeprecatedException.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef DEPRECATED_EXCEPTION_HPP_ -#define DEPRECATED_EXCEPTION_HPP_ - -#include -#include -#include - -namespace pulsar { -class PULSAR_PUBLIC DeprecatedException : public std::runtime_error { - public: - explicit DeprecatedException(const std::string& __arg); - - private: - static const std::string message_prefix; -}; -} // namespace pulsar - -#endif // DEPRECATED_EXCEPTION_HPP_ diff --git a/pulsar-client-cpp/include/pulsar/EncryptionKeyInfo.h b/pulsar-client-cpp/include/pulsar/EncryptionKeyInfo.h deleted file mode 100644 index 0357622f036b2..0000000000000 --- a/pulsar-client-cpp/include/pulsar/EncryptionKeyInfo.h +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef ENCRYPTIONKEYINFO_H_ -#define ENCRYPTIONKEYINFO_H_ - -#include -#include -#include -#include - -namespace pulsar { - -class EncryptionKeyInfoImpl; -class PulsarWrapper; - -typedef std::shared_ptr EncryptionKeyInfoImplPtr; - -class PULSAR_PUBLIC EncryptionKeyInfo { - /* - * This object contains the encryption key and corresponding metadata which contains - * additional information about the key such as version, timestammp - */ - - public: - typedef std::map StringMap; - - EncryptionKeyInfo(); - - /** - * EncryptionKeyInfo contains the encryption key and corresponding metadata which contains additional - * information about the key, such as version and timestamp. - */ - EncryptionKeyInfo(std::string key, StringMap& metadata); - - /** - * @return the key of the message - */ - std::string& getKey(); - - /** - * Set the key of the message for routing policy - * - * @param Key the key of the message for routing policy - */ - void setKey(std::string key); - - /** - * @return the metadata information - */ - StringMap& getMetadata(void); - - /** - * Set metadata information - * - * @param Metadata the information of metadata - */ - void setMetadata(StringMap& metadata); - - private: - explicit EncryptionKeyInfo(EncryptionKeyInfoImplPtr); - - EncryptionKeyInfoImplPtr impl_; - - friend class PulsarWrapper; -}; - -} /* namespace pulsar */ - -#endif /* ENCRYPTIONKEYINFO_H_ */ diff --git a/pulsar-client-cpp/include/pulsar/FileLoggerFactory.h b/pulsar-client-cpp/include/pulsar/FileLoggerFactory.h deleted file mode 100644 index 92cd1806adcbf..0000000000000 --- a/pulsar-client-cpp/include/pulsar/FileLoggerFactory.h +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -namespace pulsar { - -class FileLoggerFactoryImpl; - -/** - * A logger factory that is appending logs to a single file. - * - * The log format is "yyyy-MM-dd HH:mm:ss,SSS Z : | ", like - * - * ``` - * 2021-03-24 17:35:46,571 +0800 INFO [0x10a951e00] ConnectionPool:85 | Created connection for ... - * ``` - * - * Example: - * - * ```c++ - * #include - * - * ClientConfiguration conf; - * conf.setLogger(new FileLoggerFactory(Logger::LEVEL_DEBUG, "pulsar-client-cpp.log")); - * Client client("pulsar://localhost:6650", conf); - * ``` - */ -class PULSAR_PUBLIC FileLoggerFactory : public pulsar::LoggerFactory { - public: - /** - * Create a FileLoggerFactory instance. - * - * @param level the log level - * @param logFilePath the log file's path - */ - FileLoggerFactory(Logger::Level level, const std::string& logFilePath); - - ~FileLoggerFactory(); - - pulsar::Logger* getLogger(const std::string& filename) override; - - private: - std::unique_ptr impl_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/include/pulsar/InitialPosition.h b/pulsar-client-cpp/include/pulsar/InitialPosition.h deleted file mode 100644 index f5ac19eb1f4e5..0000000000000 --- a/pulsar-client-cpp/include/pulsar/InitialPosition.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CPP_INITIAL_POSITION_H -#define PULSAR_CPP_INITIAL_POSITION_H - -namespace pulsar { -enum InitialPosition -{ - InitialPositionLatest, - InitialPositionEarliest -}; -} - -#endif // PULSAR_CPP_INITIAL_POSITION_H diff --git a/pulsar-client-cpp/include/pulsar/KeySharedPolicy.h b/pulsar-client-cpp/include/pulsar/KeySharedPolicy.h deleted file mode 100644 index 53efc4c780f95..0000000000000 --- a/pulsar-client-cpp/include/pulsar/KeySharedPolicy.h +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include - -#include - -#include -#include - -namespace pulsar { - -/** - * KeyShared mode of KeyShared subscription. - */ -enum KeySharedMode -{ - - /** - * Auto split while new consumer connected. - */ - AUTO_SPLIT = 0, - - /** - * New consumer with fixed hash range to attach the topic, if new consumer use conflict hash range with - * exits consumers, new consumer will be rejected. - */ - STICKY = 1 -}; - -struct KeySharedPolicyImpl; - -typedef std::pair StickyRange; -typedef std::vector StickyRanges; - -class PULSAR_PUBLIC KeySharedPolicy { - public: - KeySharedPolicy(); - ~KeySharedPolicy(); - - KeySharedPolicy(const KeySharedPolicy&); - KeySharedPolicy& operator=(const KeySharedPolicy&); - - /** - * Create a new instance of KeySharedPolicy with the same - * initial settings as the current one. - */ - KeySharedPolicy clone() const; - - /** - * Configure the KeyShared mode of KeyShared subscription - * - * @param KeyShared mode - * @see {@link #KeySharedMode} - */ - KeySharedPolicy& setKeySharedMode(KeySharedMode keySharedMode); - - /** - * @return the KeySharedMode of KeyShared subscription - */ - KeySharedMode getKeySharedMode() const; - - /** - * If it is enabled, it relaxes the ordering requirement and allows the broker to send out-of-order - * messages in case of failures. This makes it faster for new consumers to join without being stalled by - * an existing slow consumer. - * - * In this case, a single consumer still receives all keys, but they may come in different orders. - * - * @param allowOutOfOrderDelivery - * whether to allow for out of order delivery - */ - KeySharedPolicy& setAllowOutOfOrderDelivery(bool allowOutOfOrderDelivery); - - /** - * @return true if out of order delivery is enabled - */ - bool isAllowOutOfOrderDelivery() const; - - /** - * @param ranges used with sticky mode - */ - KeySharedPolicy& setStickyRanges(std::initializer_list ranges); - - /** - * @return ranges used with sticky mode - */ - StickyRanges getStickyRanges() const; - - private: - std::shared_ptr impl_; -}; -} // namespace pulsar diff --git a/pulsar-client-cpp/include/pulsar/Logger.h b/pulsar-client-cpp/include/pulsar/Logger.h deleted file mode 100644 index e9487a716844b..0000000000000 --- a/pulsar-client-cpp/include/pulsar/Logger.h +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include - -namespace pulsar { - -class PULSAR_PUBLIC Logger { - public: - enum Level - { - LEVEL_DEBUG = 0, - LEVEL_INFO = 1, - LEVEL_WARN = 2, - LEVEL_ERROR = 3 - }; - - virtual ~Logger() {} - - /** - * Check whether the log level is enabled - * - * @param level the Logger::Level - * @return true if log is enabled - */ - virtual bool isEnabled(Level level) = 0; - - /** - * Log the message with related metadata - * - * @param level the Logger::Level - * @param line the line number of this log - * @param message the message to log - */ - virtual void log(Level level, int line, const std::string& message) = 0; -}; - -class PULSAR_PUBLIC LoggerFactory { - public: - virtual ~LoggerFactory() {} - - /** - * Create a Logger that is created from the filename - * - * @param fileName the filename that is used to construct the Logger - * @return a pointer to the created Logger instance - * @note the pointer must be allocated with the `new` keyword in C++ - */ - virtual Logger* getLogger(const std::string& fileName) = 0; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/include/pulsar/Message.h b/pulsar-client-cpp/include/pulsar/Message.h deleted file mode 100644 index 935236bd5bb5b..0000000000000 --- a/pulsar-client-cpp/include/pulsar/Message.h +++ /dev/null @@ -1,198 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef MESSAGE_HPP_ -#define MESSAGE_HPP_ - -#include -#include - -#include - -#include -#include "MessageId.h" - -namespace pulsar { -namespace proto { -class CommandMessage; -class MessageMetadata; -class SingleMessageMetadata; -} // namespace proto - -class SharedBuffer; -class MessageBuilder; -class MessageImpl; -class PulsarWrapper; - -class PULSAR_PUBLIC Message { - public: - typedef std::map StringMap; - - Message(); - - /** - * Return the properties attached to the message. - * Properties are application defined key/value pairs that will be attached to the message - * - * @return an unmodifiable view of the properties map - */ - const StringMap& getProperties() const; - - /** - * Check whether the message has a specific property attached. - * - * @param name the name of the property to check - * @return true if the message has the specified property - * @return false if the property is not defined - */ - bool hasProperty(const std::string& name) const; - - /** - * Get the value of a specific property - * - * @param name the name of the property - * @return the value of the property or null if the property was not defined - */ - const std::string& getProperty(const std::string& name) const; - - /** - * Get the content of the message - * - * - * @return the pointer to the message payload - */ - const void* getData() const; - - /** - * Get the length of the message - * - * @return the length of the message payload - */ - std::size_t getLength() const; - - /** - * Get string representation of the message - * - * @return the string representation of the message payload - */ - std::string getDataAsString() const; - - /** - * Get the unique message ID associated with this message. - * - * The message id can be used to univocally refer to a message without having to keep the entire payload - * in memory. - * - * Only messages received from the consumer will have a message id assigned. - * - */ - const MessageId& getMessageId() const; - - /** - * Set the unique message ID. - * - */ - void setMessageId(const MessageId& messageId) const; - - /** - * Get the partition key for this message - * @return key string that is hashed to determine message's topic partition - */ - const std::string& getPartitionKey() const; - - /** - * @return true if the message has a partition key - */ - bool hasPartitionKey() const; - - /** - * Get the ordering key of the message - * - * @return the ordering key of the message - */ - const std::string& getOrderingKey() const; - - /** - * Check whether the message has a ordering key - * - * @return true if the ordering key was set while creating the message - * false if the ordering key was not set while creating the message - */ - bool hasOrderingKey() const; - - /** - * Get the UTC based timestamp in milliseconds referring to when the message was published by the client - * producer - */ - uint64_t getPublishTimestamp() const; - - /** - * Get the event timestamp associated with this message. It is set by the client producer. - */ - uint64_t getEventTimestamp() const; - - /** - * Get the topic Name from which this message originated from - */ - const std::string& getTopicName() const; - - /** - * Get the redelivery count for this message - */ - const int getRedeliveryCount() const; - - /** - * Check if schema version exists - */ - bool hasSchemaVersion() const; - - /** - * Get the schema version - */ - const std::string& getSchemaVersion() const; - - bool operator==(const Message& msg) const; - - private: - typedef std::shared_ptr MessageImplPtr; - MessageImplPtr impl_; - - Message(MessageImplPtr& impl); - Message(const proto::CommandMessage& msg, proto::MessageMetadata& data, SharedBuffer& payload, - int32_t partition); - /// Used for Batch Messages - Message(const MessageId& messageId, proto::MessageMetadata& metadata, SharedBuffer& payload, - proto::SingleMessageMetadata& singleMetadata, const std::string& topicName); - friend class PartitionedProducerImpl; - friend class MultiTopicsConsumerImpl; - friend class MessageBuilder; - friend class ConsumerImpl; - friend class ProducerImpl; - friend class Commands; - friend class BatchMessageContainerBase; - friend class BatchAcknowledgementTracker; - friend class PulsarWrapper; - friend class MessageBatch; - friend struct OpSendMsg; - - friend PULSAR_PUBLIC std::ostream& operator<<(std::ostream& s, const StringMap& map); - friend PULSAR_PUBLIC std::ostream& operator<<(std::ostream& s, const Message& msg); -}; -} // namespace pulsar - -#endif /* MESSAGE_HPP_ */ diff --git a/pulsar-client-cpp/include/pulsar/MessageBatch.h b/pulsar-client-cpp/include/pulsar/MessageBatch.h deleted file mode 100644 index be94358870112..0000000000000 --- a/pulsar-client-cpp/include/pulsar/MessageBatch.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef LIB_MESSAGE_BATCH_H -#define LIB_MESSAGE_BATCH_H -#include - -#include -#include - -namespace pulsar { - -class PULSAR_PUBLIC MessageBatch { - public: - MessageBatch(); - - MessageBatch& withMessageId(const MessageId& messageId); - - MessageBatch& parseFrom(const std::string& payload, uint32_t batchSize); - - MessageBatch& parseFrom(const SharedBuffer& payload, uint32_t batchSize); - - const std::vector& messages(); - - private: - typedef std::shared_ptr MessageImplPtr; - MessageImplPtr impl_; - Message batchMessage_; - - std::vector batch_; -}; -} // namespace pulsar -#endif // LIB_MESSAGE_BATCH_H diff --git a/pulsar-client-cpp/include/pulsar/MessageBuilder.h b/pulsar-client-cpp/include/pulsar/MessageBuilder.h deleted file mode 100644 index 71dafaaec0b08..0000000000000 --- a/pulsar-client-cpp/include/pulsar/MessageBuilder.h +++ /dev/null @@ -1,167 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef MESSAGE_BUILDER_H -#define MESSAGE_BUILDER_H - -#include -#include -#include - -#include -#include - -namespace pulsar { -class PulsarWrapper; - -class PULSAR_PUBLIC MessageBuilder { - public: - MessageBuilder(); - - typedef std::map StringMap; - - /** - * Finalize the immutable message - */ - Message build(); - - /** - * Set content of the message. The given data is copied into message. - */ - MessageBuilder& setContent(const void* data, size_t size); - - /** - * Set the content of the message - * - * @param data the content of the message. - * @see setContent(const void*, size_t) - */ - MessageBuilder& setContent(const std::string& data); - - /** - * Set the content of the message - * - * @param data the content of the message. The given data is moved into message. - */ - MessageBuilder& setContent(std::string&& data); - - /** - * Set content of the message to a buffer already allocated by the caller. No copies of - * this buffer will be made. The caller is responsible to ensure the memory buffer is - * valid until the message has been persisted (or an error is returned). - */ - MessageBuilder& setAllocatedContent(void* data, size_t size); - - /** - * Sets a new property on a message. - * @param name the name of the property - * @param value the associated value - */ - MessageBuilder& setProperty(const std::string& name, const std::string& value); - - /** - * Add all the properties in the provided map - */ - MessageBuilder& setProperties(const StringMap& properties); - - /** - * set partition key for message routing and topic compaction - * @param hash of this key is used to determine message's topic partition - */ - MessageBuilder& setPartitionKey(const std::string& partitionKey); - - /** - * set ordering key used for key_shared subscriptions - * @param the ordering key for the message - */ - MessageBuilder& setOrderingKey(const std::string& orderingKey); - - /** - * Specify a delay for the delivery of the messages. - * - * @param delay the delay in milliseconds - */ - MessageBuilder& setDeliverAfter(const std::chrono::milliseconds delay); - - /** - * Specify the this message should not be delivered earlier than the - * specified timestamp. - * - * @param deliveryTimestamp UTC based timestamp in milliseconds - */ - MessageBuilder& setDeliverAt(uint64_t deliveryTimestamp); - - /** - * Set the event timestamp for the message. - */ - MessageBuilder& setEventTimestamp(uint64_t eventTimestamp); - - /** - * Specify a custom sequence id for the message being published. - *

- * The sequence id can be used for deduplication purposes and it needs to follow these rules: - *

    - *
  1. sequenceId >= 0 - *
  2. Sequence id for a message needs to be greater than sequence id for earlier messages: - * sequenceId(N+1) > sequenceId(N) - *
  3. It's not necessary for sequence ids to be consecutive. There can be holes between - * messages. Eg. the sequenceId could represent an offset or a cumulative size. - *
- * - * @param sequenceId - * the sequence id to assign to the current message - * @since 1.20.0 - */ - MessageBuilder& setSequenceId(int64_t sequenceId); - - /** - * override namespace replication clusters. note that it is the - * caller's responsibility to provide valid cluster names, and that - * all clusters have been previously configured as topics. - * - * given an empty list, the message will replicate per the namespace - * configuration. - * - * @param clusters where to send this message. - */ - MessageBuilder& setReplicationClusters(const std::vector& clusters); - - /** - * Do not replicate this message - * @param flag if true, disable replication, otherwise use default - * replication - */ - MessageBuilder& disableReplication(bool flag); - - /** - * create a empty message, with no properties or data - * - */ - MessageBuilder& create(); - - private: - MessageBuilder(const MessageBuilder&); - void checkMetadata(); - static std::shared_ptr createMessageImpl(); - Message::MessageImplPtr impl_; - - friend class PulsarWrapper; -}; -} // namespace pulsar - -#endif diff --git a/pulsar-client-cpp/include/pulsar/MessageId.h b/pulsar-client-cpp/include/pulsar/MessageId.h deleted file mode 100644 index 06be790c1ea4b..0000000000000 --- a/pulsar-client-cpp/include/pulsar/MessageId.h +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef MESSAGE_ID_H -#define MESSAGE_ID_H - -#include -#include -#include -#include -#include - -namespace pulsar { - -class MessageIdImpl; - -class PULSAR_PUBLIC MessageId { - public: - MessageId& operator=(const MessageId&); - MessageId(); - - /** - * Construct the MessageId - * - * @param partition the partition number of a topic - * @param ledgerId the ledger id - * @param entryId the entry id - * @param batchIndex the batch index of a single message in a batch - */ - explicit MessageId(int32_t partition, int64_t ledgerId, int64_t entryId, int32_t batchIndex); - - /** - * MessageId representing the "earliest" or "oldest available" message stored in the topic - */ - static const MessageId& earliest(); - - /** - * MessageId representing the "latest" or "last published" message in the topic - */ - static const MessageId& latest(); - - /** - * Serialize the message id into a binary string for storing - */ - void serialize(std::string& result) const; - - /** - * Get the topic Name from which this message originated from - */ - const std::string& getTopicName() const; - - /** - * Set the topicName - */ - void setTopicName(const std::string& topicName); - - /** - * Deserialize a message id from a binary string - */ - static MessageId deserialize(const std::string& serializedMessageId); - - // These functions compare the message order as stored in bookkeeper - bool operator<(const MessageId& other) const; - bool operator<=(const MessageId& other) const; - bool operator>(const MessageId& other) const; - bool operator>=(const MessageId& other) const; - bool operator==(const MessageId& other) const; - bool operator!=(const MessageId& other) const; - - int64_t ledgerId() const; - int64_t entryId() const; - int32_t batchIndex() const; - int32_t partition() const; - - private: - friend class ConsumerImpl; - friend class ReaderImpl; - friend class Message; - friend class MessageImpl; - friend class Commands; - friend class PartitionedProducerImpl; - friend class MultiTopicsConsumerImpl; - friend class UnAckedMessageTrackerEnabled; - friend class BatchAcknowledgementTracker; - friend class PulsarWrapper; - friend class PulsarFriend; - friend class NegativeAcksTracker; - - friend PULSAR_PUBLIC std::ostream& operator<<(std::ostream& s, const MessageId& messageId); - - typedef std::shared_ptr MessageIdImplPtr; - MessageIdImplPtr impl_; -}; -} // namespace pulsar - -#endif // MESSAGE_ID_H diff --git a/pulsar-client-cpp/include/pulsar/MessageRoutingPolicy.h b/pulsar-client-cpp/include/pulsar/MessageRoutingPolicy.h deleted file mode 100644 index bc76259bfad00..0000000000000 --- a/pulsar-client-cpp/include/pulsar/MessageRoutingPolicy.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_MESSAGE_ROUTING_POLICY_HEADER_ -#define PULSAR_MESSAGE_ROUTING_POLICY_HEADER_ - -#include -#include -#include -#include -#include - -/* - * Implement this interface to define custom policy giving message to - * partition mapping. - */ -namespace pulsar { - -class PULSAR_PUBLIC MessageRoutingPolicy { - public: - virtual ~MessageRoutingPolicy() {} - - /** @deprecated - Use int getPartition(const Message& msg, const TopicMetadata& topicMetadata) - */ - virtual int getPartition(const Message& msg) { - throw DeprecatedException( - "Use int getPartition(const Message& msg," - " const TopicMetadata& topicMetadata)"); - } - - /** - * Choose the partition from the message and topic metadata - * - * @param message the Message - * @param topicMetadata the TopicMetadata that contains the partition number - * @return the partition number - */ - virtual int getPartition(const Message& msg, const TopicMetadata& topicMetadata) { - return getPartition(msg); - } -}; - -typedef std::shared_ptr MessageRoutingPolicyPtr; -} // namespace pulsar - -#endif // PULSAR_MESSAGE_ROUTING_POLICY_HEADER_ diff --git a/pulsar-client-cpp/include/pulsar/Producer.h b/pulsar-client-cpp/include/pulsar/Producer.h deleted file mode 100644 index f414b76e1e56a..0000000000000 --- a/pulsar-client-cpp/include/pulsar/Producer.h +++ /dev/null @@ -1,176 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PRODUCER_HPP_ -#define PRODUCER_HPP_ - -#include -#include -#include -#include - -namespace pulsar { -class ProducerImplBase; -class PulsarWrapper; -class PulsarFriend; - -typedef std::function FlushCallback; -typedef std::shared_ptr ProducerImplBasePtr; - -class PULSAR_PUBLIC Producer { - public: - /** - * Construct an uninitialized Producer. - */ - Producer(); - - /** - * @return the topic to which producer is publishing to - */ - const std::string& getTopic() const; - - /** - * @return the producer name which could have been assigned by the system or specified by the client - */ - const std::string& getProducerName() const; - - /** - * @deprecated - * It's the same with send(const Message& msg, MessageId& messageId) except that MessageId will be stored - * in `msg` though `msg` is `const`. - */ - Result send(const Message& msg); - - /** - * Publish a message on the topic associated with this Producer and get the associated MessageId. - * - * This method will block until the message will be accepted and persisted - * by the broker. In case of errors, the client library will try to - * automatically recover and use a different broker. - * - * If it wasn't possible to successfully publish the message within the sendTimeout, - * an error will be returned. - * - * This method is equivalent to asyncSend() and wait until the callback is triggered. - * - * @param [in] msg message to publish - * @param [out] messageId the message id assigned to the published message - * @return ResultOk if the message was published successfully - * @return ResultTimeout if message was not sent successfully in ProducerConfiguration#getSendTimeout - * @return ResultProducerQueueIsFull if the outgoing messsage queue is full when - * ProducerConfiguration::getBlockIfQueueFull was false - * @return ResultMessageTooBig if message size is bigger than the maximum message size - * @return ResultAlreadyClosed if Producer was already closed when message was sent - * @return ResultCryptoError if ProducerConfiguration::isEncryptionEnabled returns true but the message - * was failed to encrypt - * @return ResultInvalidMessage if message's invalid, it's usually caused by resending the same Message - */ - Result send(const Message& msg, MessageId& messageId); - - /** - * Asynchronously publish a message on the topic associated with this Producer. - * - * This method will initiate the publish operation and return immediately. The - * provided callback will be triggered when the message has been be accepted and persisted - * by the broker. In case of errors, the client library will try to - * automatically recover and use a different broker. - * - * If it wasn't possible to successfully publish the message within the sendTimeout, the - * callback will be triggered with a Result::WriteError code. - * - * @param msg message to publish - * @param callback the callback to get notification of the completion - */ - void sendAsync(const Message& msg, SendCallback callback); - - /** - * Flush all the messages buffered in the client and wait until all messages have been successfully - * persisted. - */ - Result flush(); - - /** - * Flush all the messages buffered in the client and wait until all messages have been successfully - * persisted. - */ - void flushAsync(FlushCallback callback); - - /** - * Get the last sequence id that was published by this producer. - * - * This represent either the automatically assigned or custom sequence id (set on the MessageBuilder) that - * was published and acknowledged by the broker. - * - * After recreating a producer with the same producer name, this will return the last message that was - * published in - * the previous producer session, or -1 if there no message was ever published. - * - * @return the last sequence id published by this producer - */ - int64_t getLastSequenceId() const; - - /** - * Return an identifier for the schema version that this producer was created with. - * - * When the producer is created, if a schema info was passed, the broker will - * determine the version of the passed schema. This identifier should be treated - * as an opaque identifier. In particular, even though this is represented as a string, the - * version might not be ascii printable. - */ - const std::string& getSchemaVersion() const; - - /** - * Close the producer and release resources allocated. - * - * No more writes will be accepted from this producer. Waits until - * all pending write requests are persisted. In case of errors, - * pending writes will not be retried. - * - * @return an error code to indicate the success or failure - */ - Result close(); - - /** - * Close the producer and release resources allocated. - * - * No more writes will be accepted from this producer. The provided callback will be - * triggered when all pending write requests are persisted. In case of errors, - * pending writes will not be retried. - */ - void closeAsync(CloseCallback callback); - - /** - * @return Whether the producer is currently connected to the broker - */ - bool isConnected() const; - - private: - explicit Producer(ProducerImplBasePtr); - - friend class ClientImpl; - friend class PulsarFriend; - friend class PulsarWrapper; - - ProducerImplBasePtr impl_; - - // For unit test case BatchMessageTest::producerFailureResult only - void producerFailMessages(Result result); -}; -} // namespace pulsar - -#endif /* PRODUCER_HPP_ */ diff --git a/pulsar-client-cpp/include/pulsar/ProducerConfiguration.h b/pulsar-client-cpp/include/pulsar/ProducerConfiguration.h deleted file mode 100644 index fb331ea828c72..0000000000000 --- a/pulsar-client-cpp/include/pulsar/ProducerConfiguration.h +++ /dev/null @@ -1,537 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_PRODUCERCONFIGURATION_H_ -#define PULSAR_PRODUCERCONFIGURATION_H_ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace pulsar { - -typedef std::function SendCallback; -typedef std::function CloseCallback; - -struct ProducerConfigurationImpl; -class PulsarWrapper; - -/** - * Class that holds the configuration for a producer - */ -class PULSAR_PUBLIC ProducerConfiguration { - public: - enum PartitionsRoutingMode - { - UseSinglePartition, - RoundRobinDistribution, - CustomPartition - }; - enum HashingScheme - { - Murmur3_32Hash, - BoostHash, - JavaStringHash - }; - enum BatchingType - { - /** - * Default batching. - * - *

incoming single messages: - * (k1, v1), (k2, v1), (k3, v1), (k1, v2), (k2, v2), (k3, v2), (k1, v3), (k2, v3), (k3, v3) - * - *

batched into single batch message: - * [(k1, v1), (k2, v1), (k3, v1), (k1, v2), (k2, v2), (k3, v2), (k1, v3), (k2, v3), (k3, v3)] - */ - DefaultBatching, - - /** - * Key based batching. - * - *

incoming single messages: - * (k1, v1), (k2, v1), (k3, v1), (k1, v2), (k2, v2), (k3, v2), (k1, v3), (k2, v3), (k3, v3) - * - *

batched into single batch message: - * [(k1, v1), (k1, v2), (k1, v3)], [(k2, v1), (k2, v2), (k2, v3)], [(k3, v1), (k3, v2), (k3, v3)] - */ - KeyBasedBatching - }; - enum ProducerAccessMode - { - /** - * By default multiple producers can publish on a topic. - */ - Shared = 0, - - /** - * Require exclusive access for producer. Fail immediately if there's already a producer connected. - */ - Exclusive = 1 - }; - - ProducerConfiguration(); - ~ProducerConfiguration(); - ProducerConfiguration(const ProducerConfiguration&); - ProducerConfiguration& operator=(const ProducerConfiguration&); - - /** - * Set the producer name which could be assigned by the system or specified by the client. - * - * @param producerName producer name. - * @return - */ - ProducerConfiguration& setProducerName(const std::string& producerName); - - /** - * The getter associated with setProducerName(). - */ - const std::string& getProducerName() const; - - /** - * Declare the schema of the data that will be published by this producer. - * - * The schema will be checked against the schema of the topic, and it - * will fail if it's not compatible, though the client library will - * not perform any validation that the actual message payload are - * conforming to the specified schema. - * - * For all purposes, this - * @param schemaInfo - * @return - */ - ProducerConfiguration& setSchema(const SchemaInfo& schemaInfo); - - /** - * @return the schema information declared for this producer - */ - const SchemaInfo& getSchema() const; - - /** - * The getter associated with getSendTimeout() - */ - ProducerConfiguration& setSendTimeout(int sendTimeoutMs); - - /** - * Get the send timeout is milliseconds. - * - * If a message is not acknowledged by the server before the sendTimeout expires, an error will be - * reported. - * - * If the timeout is zero, there will be no timeout. - * - * @return the send timeout in milliseconds (Default: 30000) - */ - int getSendTimeout() const; - - /** - * Set the baseline of the sequence ID for messages published by the producer. - *

- * The first message uses (initialSequenceId + 1) as its sequence ID and subsequent messages are assigned - * incremental sequence IDs. - * - * Default: -1, which means the first message's sequence ID is 0. - * - * @param initialSequenceId the initial sequence ID for the producer. - * @return - */ - ProducerConfiguration& setInitialSequenceId(int64_t initialSequenceId); - - /** - * The getter associated with setInitialSequenceId(). - */ - int64_t getInitialSequenceId() const; - - /** - * Set the compression type for the producer. - *

- * By default, message payloads are not compressed. Supported compression types are: - *

    - * - *
  • {@link CompressionNone}: No compression
  • - *
  • {@link CompressionLZ4}: LZ4 Compression https://lz4.github.io/lz4/ - *
  • {@link CompressionZLib}: ZLib Compression http://zlib.net/
  • - *
  • {@link CompressionZSTD}: Zstandard Compression https://facebook.github.io/zstd/ (Since Pulsar 2.3. - * Zstd cannot be used if consumer applications are not in version >= 2.3 as well)
  • - *
  • {@link CompressionSNAPPY}: Snappy Compression https://google.github.io/snappy/ (Since Pulsar 2.4. - * Snappy cannot be used if consumer applications are not in version >= 2.4 as well)
  • - *
- */ - ProducerConfiguration& setCompressionType(CompressionType compressionType); - - /** - * The getter associated with setCompressionType(). - */ - CompressionType getCompressionType() const; - - /** - * Set the max size of the queue holding the messages pending to receive an acknowledgment from the - * broker.

When the queue is full, by default, all calls to Producer::send and Producer::sendAsync - * would fail unless blockIfQueueFull is set to true. Use {@link #setBlockIfQueueFull} to change the - * blocking behavior. - * - * Default: 1000 - * - * @param maxPendingMessages max number of pending messages. - * @return - */ - ProducerConfiguration& setMaxPendingMessages(int maxPendingMessages); - - /** - * The getter associated with setMaxPendingMessages(). - */ - int getMaxPendingMessages() const; - - /** - * Set the number of max pending messages across all the partitions - *

- * This setting will be used to lower the max pending messages for each partition - * ({@link #setMaxPendingMessages(int)}), if the total exceeds the configured value. - * - * Default: 50000 - * - * @param maxPendingMessagesAcrossPartitions - */ - ProducerConfiguration& setMaxPendingMessagesAcrossPartitions(int maxPendingMessagesAcrossPartitions); - - /** - * @return the maximum number of pending messages allowed across all the partitions - */ - int getMaxPendingMessagesAcrossPartitions() const; - - /** - * Set the message routing modes for partitioned topics. - * - * Default: UseSinglePartition - * - * @param PartitionsRoutingMode partition routing mode. - * @return - */ - ProducerConfiguration& setPartitionsRoutingMode(const PartitionsRoutingMode& mode); - - /** - * The getter associated with setPartitionsRoutingMode(). - */ - PartitionsRoutingMode getPartitionsRoutingMode() const; - - /** - * Set a custom message routing policy by passing an implementation of MessageRouter. - * - * @param messageRouter message router. - * @return - */ - ProducerConfiguration& setMessageRouter(const MessageRoutingPolicyPtr& router); - - /** - * The getter associated with setMessageRouter(). - */ - const MessageRoutingPolicyPtr& getMessageRouterPtr() const; - - /** - * Set the hashing scheme, which is a standard hashing function available when choosing the partition - * used for a particular message. - * - * Default: HashingScheme::BoostHash - * - *

Standard hashing functions available are: - *

    - *
  • {@link HashingScheme::JavaStringHash}: Java {@code String.hashCode()} (Default). - *
  • {@link HashingScheme::BoostHash}: Use [Boost hashing - * function](https://www.boost.org/doc/libs/1_72_0/doc/html/boost/hash.html). - *
  • {@link HashingScheme::Murmur3_32Hash}: Use [Murmur3 hashing - * function](https://en.wikipedia.org/wiki/MurmurHash"). - *
- * - * @param scheme hashing scheme. - * @return - */ - ProducerConfiguration& setHashingScheme(const HashingScheme& scheme); - - /** - * The getter associated with setHashingScheme(). - */ - HashingScheme getHashingScheme() const; - - /** - * This config affects producers of partitioned topics only. It controls whether - * producers register and connect immediately to the owner broker of each partition - * or start lazily on demand. The internal producer of one partition is always - * started eagerly, chosen by the routing policy, but the internal producers of - * any additional partitions are started on demand, upon receiving their first - * message. - * Using this mode can reduce the strain on brokers for topics with large numbers of - * partitions and when the SinglePartition routing policy is used without keyed messages. - * Because producer connection can be on demand, this can produce extra send latency - * for the first messages of a given partition. - * @param true/false as to whether to start partition producers lazily - * @return - */ - ProducerConfiguration& setLazyStartPartitionedProducers(bool); - - /** - * The getter associated with setLazyStartPartitionedProducers() - */ - bool getLazyStartPartitionedProducers() const; - - /** - * The setter associated with getBlockIfQueueFull() - */ - ProducerConfiguration& setBlockIfQueueFull(bool); - - /** - * @return whether Producer::send or Producer::sendAsync operations should block when the outgoing message - * queue is full. (Default: false) - */ - bool getBlockIfQueueFull() const; - - // Zero queue size feature will not be supported on consumer end if batching is enabled - - /** - * Control whether automatic batching of messages is enabled or not for the producer. - * - * Default: true - * - * When automatic batching is enabled, multiple calls to Producer::sendAsync can result in a single batch - * to be sent to the broker, leading to better throughput, especially when publishing small messages. If - * compression is enabled, messages are compressed at the batch level, leading to a much better - * compression ratio for similar headers or contents. - * - * When the default batch delay is set to 10 ms and the default batch size is 1000 messages. - * - * @see ProducerConfiguration::setBatchingMaxPublishDelayMs - * - */ - ProducerConfiguration& setBatchingEnabled(const bool& batchingEnabled); - - /** - * Return the flag whether automatic message batching is enabled or not for the producer. - * - * @return true if automatic message batching is enabled. Otherwise it returns false. - * @since 2.0.0
- * It is enabled by default. - */ - const bool& getBatchingEnabled() const; - - /** - * Set the max number of messages permitted in a batch. Default value: 1000. If you set this option - * to a value greater than 1, messages are queued until this threshold is reached or batch interval has - * elapsed. - * - * All messages in a batch are published as - * a single batch message. The consumer is delivered individual messages in the batch in the same - * order they are enqueued. - * @param batchMessagesMaxMessagesPerBatch max number of messages permitted in a batch - * @return - */ - ProducerConfiguration& setBatchingMaxMessages(const unsigned int& batchingMaxMessages); - - /** - * The getter associated with setBatchingMaxMessages(). - */ - const unsigned int& getBatchingMaxMessages() const; - - /** - * Set the max size of messages permitted in a batch. - * Default value: 128 KB. If you set this option to a value greater than 1, - * messages are queued until this threshold is reached or - * batch interval has elapsed. - * - *

All messages in a batch are published as a single batch message. - * The consumer is delivered individual - * messages in the batch in the same order they are enqueued. - * - * @param batchingMaxAllowedSizeInBytes - */ - ProducerConfiguration& setBatchingMaxAllowedSizeInBytes( - const unsigned long& batchingMaxAllowedSizeInBytes); - - /** - * The getter associated with setBatchingMaxAllowedSizeInBytes(). - */ - const unsigned long& getBatchingMaxAllowedSizeInBytes() const; - - /** - * Set the max time for message publish delay permitted in a batch. - * Default value: 10 ms. - * - * @param batchingMaxPublishDelayMs max time for message publish delay permitted in a batch. - * @return - */ - ProducerConfiguration& setBatchingMaxPublishDelayMs(const unsigned long& batchingMaxPublishDelayMs); - - /** - * The getter associated with setBatchingMaxPublishDelayMs(). - */ - const unsigned long& getBatchingMaxPublishDelayMs() const; - - /** - * Default: DefaultBatching - * - * @see BatchingType - */ - ProducerConfiguration& setBatchingType(BatchingType batchingType); - - /** - * @return batching type. - * @see BatchingType. - */ - BatchingType getBatchingType() const; - - /** - * The getter associated with setCryptoKeyReader(). - */ - const CryptoKeyReaderPtr getCryptoKeyReader() const; - - /** - * Set the shared pointer to CryptoKeyReader. - * - * @param shared pointer to CryptoKeyReader. - * @return - */ - ProducerConfiguration& setCryptoKeyReader(CryptoKeyReaderPtr cryptoKeyReader); - - /** - * The getter associated with setCryptoFailureAction(). - */ - ProducerCryptoFailureAction getCryptoFailureAction() const; - - /** - * Sets the ProducerCryptoFailureAction to the value specified. - * - * @param action - * the action taken by the producer in case of encryption failures. - * @return - */ - ProducerConfiguration& setCryptoFailureAction(ProducerCryptoFailureAction action); - - /** - * @return all the encryption keys added - */ - const std::set& getEncryptionKeys() const; - - /** - * @return true if encryption keys are added - */ - bool isEncryptionEnabled() const; - - /** - * Add public encryption key, used by producer to encrypt the data key. - * - * At the time of producer creation, Pulsar client checks if there are keys added to encryptionKeys. If - * keys are found, a callback getKey(String keyName) is invoked against each key to load the values of the - * key. Application should implement this callback to return the key in pkcs8 format. If compression is - * enabled, message is encrypted after compression. If batch messaging is enabled, the batched message is - * encrypted. - * - * @key the encryption key to add - * @return the ProducerConfiguration self - */ - ProducerConfiguration& addEncryptionKey(std::string key); - - /** - * Check whether the producer has a specific property attached. - * - * @param name the name of the property to check - * @return true if the message has the specified property - * @return false if the property is not defined - */ - bool hasProperty(const std::string& name) const; - - /** - * Get the value of a specific property - * - * @param name the name of the property - * @return the value of the property or null if the property was not defined - */ - const std::string& getProperty(const std::string& name) const; - - /** - * Get all the properties attached to this producer. - */ - std::map& getProperties() const; - - /** - * Sets a new property on the producer - * . - * @param name the name of the property - * @param value the associated value - */ - ProducerConfiguration& setProperty(const std::string& name, const std::string& value); - - /** - * Add all the properties in the provided map - */ - ProducerConfiguration& setProperties(const std::map& properties); - - /** - * If message size is higher than allowed max publish-payload size by broker then enableChunking helps - * producer to split message into multiple chunks and publish them to broker separately in order. So, it - * allows client to successfully publish large size of messages in pulsar. - * - * Set it true to enable this feature. If so, you must disable batching (see setBatchingEnabled), - * otherwise the producer creation will fail. - * - * There are some other recommendations when it's enabled: - * 1. This features is right now only supported for non-shared subscription and persistent-topic. - * 2. It's better to reduce setMaxPendingMessages to avoid producer accupying large amount of memory by - * buffered messages. - * 3. Set message-ttl on the namespace to cleanup chunked messages. Sometimes due to broker-restart or - * publish time, producer might fail to publish entire large message. So, consumer will not be able to - * consume and ack those messages. - * - * Default: false - * - * @param chunkingEnabled whether chunking is enabled - * @return the ProducerConfiguration self - */ - ProducerConfiguration& setChunkingEnabled(bool chunkingEnabled); - - /** - * The getter associated with setChunkingEnabled(). - */ - bool isChunkingEnabled() const; - - /** - * Set the type of access mode that the producer requires on the topic. - * - * @see ProducerAccessMode - * @param accessMode - * The type of access to the topic that the producer requires - */ - ProducerConfiguration& setAccessMode(const ProducerAccessMode& accessMode); - - /** - * Get the type of access mode that the producer requires on the topic. - */ - ProducerAccessMode getAccessMode() const; - - friend class PulsarWrapper; - - private: - struct Impl; - std::shared_ptr impl_; -}; -} // namespace pulsar -#endif /* PULSAR_PRODUCERCONFIGURATION_H_ */ diff --git a/pulsar-client-cpp/include/pulsar/ProducerCryptoFailureAction.h b/pulsar-client-cpp/include/pulsar/ProducerCryptoFailureAction.h deleted file mode 100644 index 7693956440674..0000000000000 --- a/pulsar-client-cpp/include/pulsar/ProducerCryptoFailureAction.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PRODUCERCRYPTOFAILUREACTION_H_ -#define PRODUCERCRYPTOFAILUREACTION_H_ - -namespace pulsar { - -enum class ProducerCryptoFailureAction -{ - FAIL, // This is the default option to fail send if crypto operation fails - SEND // Ignore crypto failure and proceed with sending unencrypted messages -}; - -} /* namespace pulsar */ - -#endif /* PRODUCERCRYPTOFAILUREACTION_H_ */ diff --git a/pulsar-client-cpp/include/pulsar/ProtobufNativeSchema.h b/pulsar-client-cpp/include/pulsar/ProtobufNativeSchema.h deleted file mode 100644 index ef9a7b1c0485c..0000000000000 --- a/pulsar-client-cpp/include/pulsar/ProtobufNativeSchema.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include - -namespace pulsar { - -/** - * Create a protobuf native schema using a descriptor. - * - * @param descriptor the Descriptor object of the target class - * @return the protobuf native schema - * @throw std::invalid_argument if descriptor is nullptr - */ -PULSAR_PUBLIC SchemaInfo createProtobufNativeSchema(const google::protobuf::Descriptor* descriptor); - -} // namespace pulsar diff --git a/pulsar-client-cpp/include/pulsar/Reader.h b/pulsar-client-cpp/include/pulsar/Reader.h deleted file mode 100644 index 554788e8cd654..0000000000000 --- a/pulsar-client-cpp/include/pulsar/Reader.h +++ /dev/null @@ -1,163 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_READER_HPP_ -#define PULSAR_READER_HPP_ - -#include -#include -#include - -namespace pulsar { -class PulsarWrapper; -class PulsarFriend; -class ReaderImpl; - -typedef std::function HasMessageAvailableCallback; - -/** - * A Reader can be used to scan through all the messages currently available in a topic. - */ -class PULSAR_PUBLIC Reader { - public: - /** - * Construct an uninitialized reader object - */ - Reader(); - - /** - * @return the topic this reader is reading from - */ - const std::string& getTopic() const; - - /** - * Read a single message. - * - * If a message is not immediately available, this method will block until a new - * message is available. - * - * @param msg a non-const reference where the received message will be copied - * @return ResultOk when a message is received - * @return ResultInvalidConfiguration if a message listener had been set in the configuration - */ - Result readNext(Message& msg); - - /** - * Read a single message - * - * @param msg a non-const reference where the received message will be copied - * @param timeoutMs the receive timeout in milliseconds - * @return ResultOk if a message was received - * @return ResultTimeout if the receive timeout was triggered - * @return ResultInvalidConfiguration if a message listener had been set in the configuration - */ - Result readNext(Message& msg, int timeoutMs); - - /** - * Close the reader and stop the broker to push more messages - * - * @return ResultOk if the reader is closed successfully - */ - Result close(); - - /** - * Asynchronously close the reader and stop the broker to push more messages - * - * @param callback the callback that is triggered when the reader is closed - */ - void closeAsync(ResultCallback callback); - - /** - * Asynchronously check if there is any message available to read from the current position. - */ - void hasMessageAvailableAsync(HasMessageAvailableCallback callback); - - /** - * Check if there is any message available to read from the current position. - */ - Result hasMessageAvailable(bool& hasMessageAvailable); - - /** - * Reset the this reader to a specific message id. - * The message id can either be a specific message or represent the first or last messages in the topic. - * - * Note: this operation can only be done on non-partitioned topics. For these, one can rather perform the - * seek() on the individual partitions. - * - * @param messageId - * the message id where to reposition the subscription - */ - Result seek(const MessageId& msgId); - - /** - * Reset this reader to a specific message publish time. - * - * @param timestamp - * the message publish time where to reposition the subscription - */ - Result seek(uint64_t timestamp); - - /** - * Asynchronously reset this reader to a specific message id. - * The message id can either be a specific message or represent the first or last messages in the topic. - * - * Note: this operation can only be done on non-partitioned topics. For these, one can rather perform the - * seek() on the individual partitions. - * - * @param messageId - * the message id where to reposition the subscription - */ - void seekAsync(const MessageId& msgId, ResultCallback callback); - - /** - * Asynchronously reset this reader to a specific message publish time. - * - * @param timestamp - * the message publish time where to reposition the subscription - */ - void seekAsync(uint64_t timestamp, ResultCallback callback); - - /** - * @return Whether the reader is currently connected to the broker - */ - bool isConnected() const; - - /** - * Asynchronously get an ID of the last available message or a message ID with -1 as an entryId if the - * topic is empty. - */ - void getLastMessageIdAsync(GetLastMessageIdCallback callback); - - /** - * Get an ID of the last available message or a message ID with -1 as an entryId if the topic is empty. - */ - Result getLastMessageId(MessageId& messageId); - - private: - typedef std::shared_ptr ReaderImplPtr; - ReaderImplPtr impl_; - explicit Reader(ReaderImplPtr); - - friend class PulsarFriend; - friend class PulsarWrapper; - friend class ReaderImpl; - friend class ReaderTest; -}; -} // namespace pulsar - -#endif /* PULSAR_READER_HPP_ */ diff --git a/pulsar-client-cpp/include/pulsar/ReaderConfiguration.h b/pulsar-client-cpp/include/pulsar/ReaderConfiguration.h deleted file mode 100644 index 5b88553534a6f..0000000000000 --- a/pulsar-client-cpp/include/pulsar/ReaderConfiguration.h +++ /dev/null @@ -1,302 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_READER_CONFIGURATION_H_ -#define PULSAR_READER_CONFIGURATION_H_ - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace pulsar { - -class Reader; -class PulsarWrapper; - -/// Callback definition for non-data operation -typedef std::function ResultCallback; -typedef std::function GetLastMessageIdCallback; - -/// Callback definition for MessageListener -typedef std::function ReaderListener; - -struct ReaderConfigurationImpl; - -/** - * Class specifying the configuration of a consumer. - */ -class PULSAR_PUBLIC ReaderConfiguration { - public: - ReaderConfiguration(); - ~ReaderConfiguration(); - ReaderConfiguration(const ReaderConfiguration&); - ReaderConfiguration& operator=(const ReaderConfiguration&); - - /** - * Declare the schema of the data that this reader will be accepting. - * - * The schema will be checked against the schema of the topic, and the - * reader creation will fail if it's not compatible. - * - * @param schemaInfo the schema definition object - */ - ReaderConfiguration& setSchema(const SchemaInfo& schemaInfo); - - /** - * @return the schema information declared for this consumer - */ - const SchemaInfo& getSchema() const; - - /** - * A message listener enables your application to configure how to process - * messages. A listener will be called in order for every message received. - */ - ReaderConfiguration& setReaderListener(ReaderListener listener); - - /** - * @return the configured {@link ReaderListener} for the reader - */ - ReaderListener getReaderListener() const; - - /** - * @return true if {@link ReaderListener} has been set - */ - bool hasReaderListener() const; - - /** - * Sets the size of the reader receive queue. - * - * The consumer receive queue controls how many messages can be accumulated by the consumer before the - * application calls receive(). Using a higher value may potentially increase the consumer throughput - * at the expense of bigger memory utilization. - * - * Setting the consumer queue size to 0 decreases the throughput of the consumer by disabling - * pre-fetching of - * messages. This approach improves the message distribution on shared subscription by pushing messages - * only to - * the consumers that are ready to process them. Neither receive with timeout nor partitioned topics can - * be - * used if the consumer queue size is 0. The receive() function call should not be interrupted when - * the consumer queue size is 0. - * - * The default value is 1000 messages and it is appropriate for most use cases. - * - * @param size - * the new receiver queue size value - */ - void setReceiverQueueSize(int size); - - /** - * @return the receiver queue size - */ - int getReceiverQueueSize() const; - - /** - * Set the reader name. - * - * @param readerName - */ - void setReaderName(const std::string& readerName); - - /** - * @return the reader name - */ - const std::string& getReaderName() const; - - /** - * Set the subscription role prefix. - * - * The default prefix is an empty string. - * - * @param subscriptionRolePrefix - */ - void setSubscriptionRolePrefix(const std::string& subscriptionRolePrefix); - - /** - * @return the subscription role prefix - */ - const std::string& getSubscriptionRolePrefix() const; - - /** - * If enabled, the consumer reads messages from the compacted topics rather than reading the full message - * backlog of the topic. This means that if the topic has been compacted, the consumer only sees the - * latest value for each key in the topic, up until the point in the topic message backlog that has been - * compacted. Beyond that point, message is sent as normal. - * - * readCompacted can only be enabled subscriptions to persistent topics, which have a single active - * consumer (for example, failure or exclusive subscriptions). Attempting to enable it on subscriptions to - * a non-persistent topics or on a shared subscription leads to the subscription call failure. - * - * @param readCompacted - * whether to read from the compacted topic - */ - void setReadCompacted(bool compacted); - - /** - * @return true if readCompacted is enabled - */ - bool isReadCompacted() const; - - /** - * Set the internal subscription name. - * - * @param internal subscriptionName - */ - void setInternalSubscriptionName(std::string internalSubscriptionName); - - /** - * @return the internal subscription name - */ - const std::string& getInternalSubscriptionName() const; - - /** - * Set the timeout in milliseconds for unacknowledged messages, the timeout needs to be greater than - * 10 seconds. An Exception is thrown if the given value is less than 10000 (10 seconds). - * If a successful acknowledgement is not sent within the timeout all the unacknowledged messages are - * redelivered. - * @param timeout in milliseconds - */ - void setUnAckedMessagesTimeoutMs(const uint64_t milliSeconds); - - /** - * @return the configured timeout in milliseconds for unacked messages. - */ - long getUnAckedMessagesTimeoutMs() const; - - /** - * Set the tick duration time that defines the granularity of the ack-timeout redelivery (in - * milliseconds). - * - * The default value is 1000, which means 1 second. - * - * Using a higher tick time - * reduces the memory overhead to track messages when the ack-timeout is set to a bigger value. - * - * @param milliSeconds the tick duration time (in milliseconds) - * - */ - void setTickDurationInMs(const uint64_t milliSeconds); - - /** - * @return the tick duration time (in milliseconds) - */ - long getTickDurationInMs() const; - - /** - * Set time window in milliseconds for grouping message ACK requests. An ACK request is not sent - * to broker until the time window reaches its end, or the number of grouped messages reaches - * limit. Default is 100 milliseconds. If it's set to a non-positive value, ACK requests will be - * directly sent to broker without grouping. - * - * @param ackGroupMillis time of ACK grouping window in milliseconds. - */ - void setAckGroupingTimeMs(long ackGroupingMillis); - - /** - * Get grouping time window in milliseconds. - * - * @return grouping time window in milliseconds. - */ - long getAckGroupingTimeMs() const; - - /** - * Set max number of grouped messages within one grouping time window. If it's set to a - * non-positive value, number of grouped messages is not limited. Default is 1000. - * - * @param maxGroupingSize max number of grouped messages with in one grouping time window. - */ - void setAckGroupingMaxSize(long maxGroupingSize); - - /** - * Get max number of grouped messages within one grouping time window. - * - * @return max number of grouped messages within one grouping time window. - */ - long getAckGroupingMaxSize() const; - - /** - * @return true if encryption keys are added - */ - bool isEncryptionEnabled() const; - - /** - * @return the shared pointer to CryptoKeyReader - */ - const CryptoKeyReaderPtr getCryptoKeyReader() const; - - /** - * Set the shared pointer to CryptoKeyReader. - * - * @param the shared pointer to CryptoKeyReader - */ - ReaderConfiguration& setCryptoKeyReader(CryptoKeyReaderPtr cryptoKeyReader); - - /** - * @return the ConsumerCryptoFailureAction - */ - ConsumerCryptoFailureAction getCryptoFailureAction() const; - - /** - * Set the CryptoFailureAction for the reader. - */ - ReaderConfiguration& setCryptoFailureAction(ConsumerCryptoFailureAction action); - - /** - * Check whether the message has a specific property attached. - * - * @param name the name of the property to check - * @return true if the message has the specified property - * @return false if the property is not defined - */ - bool hasProperty(const std::string& name) const; - - /** - * Get the value of a specific property - * - * @param name the name of the property - * @return the value of the property or null if the property was not defined - */ - const std::string& getProperty(const std::string& name) const; - - /** - * Get all the properties attached to this producer. - */ - std::map& getProperties() const; - - /** - * Sets a new property on a message. - * @param name the name of the property - * @param value the associated value - */ - ReaderConfiguration& setProperty(const std::string& name, const std::string& value); - - /** - * Add all the properties in the provided map - */ - ReaderConfiguration& setProperties(const std::map& properties); - - private: - std::shared_ptr impl_; -}; -} // namespace pulsar -#endif /* PULSAR_READER_CONFIGURATION_H_ */ diff --git a/pulsar-client-cpp/include/pulsar/Result.h b/pulsar-client-cpp/include/pulsar/Result.h deleted file mode 100644 index cc7b457528e1f..0000000000000 --- a/pulsar-client-cpp/include/pulsar/Result.h +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef ERROR_HPP_ -#define ERROR_HPP_ - -#include -#include - -namespace pulsar { - -/** - * Collection of return codes - */ -enum Result -{ - ResultRetryable = -1, /// An internal error code used for retry - ResultOk = 0, /// Operation successful - - ResultUnknownError, /// Unknown error happened on broker - - ResultInvalidConfiguration, /// Invalid configuration - - ResultTimeout, /// Operation timed out - ResultLookupError, /// Broker lookup failed - ResultConnectError, /// Failed to connect to broker - ResultReadError, /// Failed to read from socket - - ResultAuthenticationError, /// Authentication failed on broker - ResultAuthorizationError, /// Client is not authorized to create producer/consumer - ResultErrorGettingAuthenticationData, /// Client cannot find authorization data - - ResultBrokerMetadataError, /// Broker failed in updating metadata - ResultBrokerPersistenceError, /// Broker failed to persist entry - ResultChecksumError, /// Corrupt message checksum failure - - ResultConsumerBusy, /// Exclusive consumer is already connected - ResultNotConnected, /// Producer/Consumer is not currently connected to broker - ResultAlreadyClosed, /// Producer/Consumer is already closed and not accepting any operation - - ResultInvalidMessage, /// Error in publishing an already used message - - ResultConsumerNotInitialized, /// Consumer is not initialized - ResultProducerNotInitialized, /// Producer is not initialized - ResultProducerBusy, /// Producer with same name is already connected - ResultTooManyLookupRequestException, /// Too Many concurrent LookupRequest - - ResultInvalidTopicName, /// Invalid topic name - ResultInvalidUrl, /// Client Initialized with Invalid Broker Url (VIP Url passed to Client Constructor) - ResultServiceUnitNotReady, /// Service Unit unloaded between client did lookup and producer/consumer got - /// created - ResultOperationNotSupported, - ResultProducerBlockedQuotaExceededError, /// Producer is blocked - ResultProducerBlockedQuotaExceededException, /// Producer is getting exception - ResultProducerQueueIsFull, /// Producer queue is full - ResultMessageTooBig, /// Trying to send a messages exceeding the max size - ResultTopicNotFound, /// Topic not found - ResultSubscriptionNotFound, /// Subscription not found - ResultConsumerNotFound, /// Consumer not found - ResultUnsupportedVersionError, /// Error when an older client/version doesn't support a required feature - ResultTopicTerminated, /// Topic was already terminated - ResultCryptoError, /// Error when crypto operation fails - - ResultIncompatibleSchema, /// Specified schema is incompatible with the topic's schema - ResultConsumerAssignError, /// Error when a new consumer connected but can't assign messages to this - /// consumer - ResultCumulativeAcknowledgementNotAllowedError, /// Not allowed to call cumulativeAcknowledgement in - /// Shared and Key_Shared subscription mode - ResultTransactionCoordinatorNotFoundError, /// Transaction coordinator not found - ResultInvalidTxnStatusError, /// Invalid txn status error - ResultNotAllowedError, /// Not allowed - ResultTransactionConflict, /// Transaction ack conflict - ResultTransactionNotFound, /// Transaction not found - ResultProducerFenced, /// Producer was fenced by broker - - ResultMemoryBufferIsFull, /// Client-wide memory limit has been reached - - ResultInterrupted, /// Interrupted while waiting to dequeue -}; - -// Return string representation of result code -PULSAR_PUBLIC const char* strResult(Result result); - -PULSAR_PUBLIC std::ostream& operator<<(std::ostream& s, pulsar::Result result); -} // namespace pulsar - -#endif /* ERROR_HPP_ */ diff --git a/pulsar-client-cpp/include/pulsar/Schema.h b/pulsar-client-cpp/include/pulsar/Schema.h deleted file mode 100644 index 7e7a5aedbbb63..0000000000000 --- a/pulsar-client-cpp/include/pulsar/Schema.h +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include - -#include -#include -#include -#include - -namespace pulsar { - -enum SchemaType -{ - /** - * No schema defined - */ - NONE = 0, - - /** - * Simple String encoding with UTF-8 - */ - STRING = 1, - - /** - * JSON object encoding and validation - */ - JSON = 2, - - /** - * Protobuf message encoding and decoding - */ - PROTOBUF = 3, - - /** - * Serialize and deserialize via Avro - */ - AVRO = 4, - - /** - * A 8-byte integer. - */ - INT8 = 6, - - /** - * A 16-byte integer. - */ - INT16 = 7, - - /** - * A 32-byte integer. - */ - INT32 = 8, - - /** - * A 64-byte integer. - */ - INT64 = 9, - - /** - * A float number. - */ - FLOAT = 10, - - /** - * A double number - */ - DOUBLE = 11, - - /** - * A Schema that contains Key Schema and Value Schema. - */ - KEY_VALUE = 15, - - /** - * Protobuf native schema based on Descriptor. - */ - PROTOBUF_NATIVE = 20, - - /** - * A bytes array. - */ - BYTES = -1, - - /** - * Auto Consume Type. - */ - AUTO_CONSUME = -3, - - /** - * Auto Publish Type. - */ - AUTO_PUBLISH = -4, -}; - -// Return string representation of result code -PULSAR_PUBLIC const char *strSchemaType(SchemaType schemaType); - -class SchemaInfoImpl; - -typedef std::map StringMap; - -/** - * Encapsulates data around the schema definition - */ -class PULSAR_PUBLIC SchemaInfo { - public: - /** - * The default constructor with following configs: - * - schemaType: SchemaType::BYTES - * - name: "BYTES" - * - schema: "" - * - properties: {} - * - * @see SchemaInfo(SchemaType schemaType, const std::string& name, const std::string& schema, const - * StringMap& properties) - */ - SchemaInfo(); - - /** - * @param schemaType the schema type - * @param name the name of the schema definition - * @param schema the schema definition as a JSON string - * @param properties a map of custom defined properties attached to the schema - */ - SchemaInfo(SchemaType schemaType, const std::string &name, const std::string &schema, - const StringMap &properties = StringMap()); - - /** - * @return the schema type - */ - SchemaType getSchemaType() const; - - /** - * @return the name of the schema definition - */ - const std::string &getName() const; - - /** - * @return the schema definition as a JSON string - */ - const std::string &getSchema() const; - - /** - * @return a map of custom defined properties attached to the schema - */ - const StringMap &getProperties() const; - - private: - typedef std::shared_ptr SchemaInfoImplPtr; - SchemaInfoImplPtr impl_; -}; - -} // namespace pulsar - -PULSAR_PUBLIC std::ostream &operator<<(std::ostream &s, pulsar::SchemaType schemaType); diff --git a/pulsar-client-cpp/include/pulsar/TopicMetadata.h b/pulsar-client-cpp/include/pulsar/TopicMetadata.h deleted file mode 100644 index c57e56a26ccf7..0000000000000 --- a/pulsar-client-cpp/include/pulsar/TopicMetadata.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef TOPIC_METADATA_HPP_ -#define TOPIC_METADATA_HPP_ - -#include - -namespace pulsar { -/** - * Metadata of a topic that can be used for message routing. - */ -class PULSAR_PUBLIC TopicMetadata { - public: - virtual ~TopicMetadata() {} - - /** - * @return the number of partitions - */ - virtual int getNumPartitions() const = 0; -}; -} // namespace pulsar - -#endif /* TOPIC_METADATA_HPP_ */ diff --git a/pulsar-client-cpp/include/pulsar/c/authentication.h b/pulsar-client-cpp/include/pulsar/c/authentication.h deleted file mode 100644 index 9712e7158a6c8..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/authentication.h +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct _pulsar_authentication pulsar_authentication_t; - -typedef char *(*token_supplier)(void *); - -PULSAR_PUBLIC pulsar_authentication_t *pulsar_authentication_create(const char *dynamicLibPath, - const char *authParamsString); - -PULSAR_PUBLIC pulsar_authentication_t *pulsar_authentication_tls_create(const char *certificatePath, - const char *privateKeyPath); - -PULSAR_PUBLIC pulsar_authentication_t *pulsar_authentication_token_create(const char *token); -PULSAR_PUBLIC pulsar_authentication_t *pulsar_authentication_token_create_with_supplier( - token_supplier tokenSupplier, void *ctx); - -PULSAR_PUBLIC pulsar_authentication_t *pulsar_authentication_basic_create(const char *username, - const char *password); - -PULSAR_PUBLIC pulsar_authentication_t *pulsar_authentication_athenz_create(const char *authParamsString); - -PULSAR_PUBLIC pulsar_authentication_t *pulsar_authentication_oauth2_create(const char *authParamsString); - -PULSAR_PUBLIC void pulsar_authentication_free(pulsar_authentication_t *authentication); - -#ifdef __cplusplus -} -#endif \ No newline at end of file diff --git a/pulsar-client-cpp/include/pulsar/c/client.h b/pulsar-client-cpp/include/pulsar/c/client.h deleted file mode 100644 index f5da826663274..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/client.h +++ /dev/null @@ -1,192 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct _pulsar_client pulsar_client_t; -typedef struct _pulsar_producer pulsar_producer_t; -typedef struct _pulsar_string_list pulsar_string_list_t; - -typedef struct _pulsar_client_configuration pulsar_client_configuration_t; -typedef struct _pulsar_producer_configuration pulsar_producer_configuration_t; - -typedef void (*pulsar_create_producer_callback)(pulsar_result result, pulsar_producer_t *producer, void *ctx); - -typedef void (*pulsar_subscribe_callback)(pulsar_result result, pulsar_consumer_t *consumer, void *ctx); -typedef void (*pulsar_reader_callback)(pulsar_result result, pulsar_reader_t *reader, void *ctx); -typedef void (*pulsar_get_partitions_callback)(pulsar_result result, pulsar_string_list_t *partitions, - void *ctx); - -typedef void (*pulsar_close_callback)(pulsar_result result, void *ctx); - -/** - * Create a Pulsar client object connecting to the specified cluster address and using the specified - * configuration. - * - * @param serviceUrl the Pulsar endpoint to use (eg: pulsar://broker-example.com:6650) - * @param clientConfiguration the client configuration to use - */ -PULSAR_PUBLIC pulsar_client_t *pulsar_client_create(const char *serviceUrl, - const pulsar_client_configuration_t *clientConfiguration); - -/** - * Create a producer with default configuration - * - * @see createProducer(const std::string&, const ProducerConfiguration&, Producer&) - * - * @param topic the topic where the new producer will publish - * @param producer a non-const reference where the new producer will be copied - * @return ResultOk if the producer has been successfully created - * @return ResultError if there was an error - */ -PULSAR_PUBLIC pulsar_result pulsar_client_create_producer(pulsar_client_t *client, const char *topic, - const pulsar_producer_configuration_t *conf, - pulsar_producer_t **producer); - -PULSAR_PUBLIC void pulsar_client_create_producer_async(pulsar_client_t *client, const char *topic, - const pulsar_producer_configuration_t *conf, - pulsar_create_producer_callback callback, void *ctx); - -PULSAR_PUBLIC pulsar_result pulsar_client_subscribe(pulsar_client_t *client, const char *topic, - const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_consumer_t **consumer); - -PULSAR_PUBLIC void pulsar_client_subscribe_async(pulsar_client_t *client, const char *topic, - const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_subscribe_callback callback, void *ctx); - -/** - * Create a consumer to multiple topics under the same namespace with default configuration - * - * @see subscribe(const std::vector&, const std::string&, Consumer& consumer) - * - * @param topics a list of topic names to subscribe to - * @param topicsCount the number of topics - * @param subscriptionName the subscription name - * @param consumer a non-const reference where the new consumer will be copied - * @return ResultOk if the consumer has been successfully created - * @return ResultError if there was an error - */ -PULSAR_PUBLIC pulsar_result pulsar_client_subscribe_multi_topics(pulsar_client_t *client, const char **topics, - int topicsCount, - const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_consumer_t **consumer); - -PULSAR_PUBLIC void pulsar_client_subscribe_multi_topics_async(pulsar_client_t *client, const char **topics, - int topicsCount, const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_subscribe_callback callback, void *ctx); - -/** - * Create a consumer to multiple (which match given topicPattern) with default configuration - * - * @see subscribeWithRegex(const std::string&, const std::string&, Consumer& consumer) - * - * @param topicPattern topic regex topics should match to subscribe to - * @param subscriptionName the subscription name - * @param consumer a non-const reference where the new consumer will be copied - * @return ResultOk if the consumer has been successfully created - * @return ResultError if there was an error - */ -PULSAR_PUBLIC pulsar_result pulsar_client_subscribe_pattern(pulsar_client_t *client, const char *topicPattern, - const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_consumer_t **consumer); - -PULSAR_PUBLIC void pulsar_client_subscribe_pattern_async(pulsar_client_t *client, const char *topicPattern, - const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_subscribe_callback callback, void *ctx); - -/** - * Create a topic reader with given {@code ReaderConfiguration} for reading messages from the specified - * topic. - *

- * The Reader provides a low-level abstraction that allows for manual positioning in the topic, without - * using a - * subscription. Reader can only work on non-partitioned topics. - *

- * The initial reader positioning is done by specifying a message id. The options are: - *

    - *
  • MessageId.earliest : Start reading from the earliest message available in the topic - *
  • MessageId.latest : Start reading from the end topic, only getting messages published - * after the - * reader was created - *
  • MessageId : When passing a particular message id, the reader will position itself on - * that - * specific position. The first message to be read will be the message next to the specified messageId. - *
- * - * @param topic - * The name of the topic where to read - * @param startMessageId - * The message id where the reader will position itself. The first message returned will be the - * one after - * the specified startMessageId - * @param conf - * The {@code ReaderConfiguration} object - * @return The {@code Reader} object - */ -PULSAR_PUBLIC pulsar_result pulsar_client_create_reader(pulsar_client_t *client, const char *topic, - const pulsar_message_id_t *startMessageId, - pulsar_reader_configuration_t *conf, - pulsar_reader_t **reader); - -PULSAR_PUBLIC void pulsar_client_create_reader_async(pulsar_client_t *client, const char *topic, - const pulsar_message_id_t *startMessageId, - pulsar_reader_configuration_t *conf, - pulsar_reader_callback callback, void *ctx); - -PULSAR_PUBLIC pulsar_result pulsar_client_get_topic_partitions(pulsar_client_t *client, const char *topic, - pulsar_string_list_t **partitions); - -PULSAR_PUBLIC void pulsar_client_get_topic_partitions_async(pulsar_client_t *client, const char *topic, - pulsar_get_partitions_callback callback, - void *ctx); - -PULSAR_PUBLIC pulsar_result pulsar_client_close(pulsar_client_t *client); - -PULSAR_PUBLIC void pulsar_client_close_async(pulsar_client_t *client, pulsar_close_callback callback, - void *ctx); - -PULSAR_PUBLIC void pulsar_client_free(pulsar_client_t *client); - -#ifdef __cplusplus -} -#endif diff --git a/pulsar-client-cpp/include/pulsar/c/client_configuration.h b/pulsar-client-cpp/include/pulsar/c/client_configuration.h deleted file mode 100644 index 3bf9432264107..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/client_configuration.h +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef enum -{ - pulsar_DEBUG = 0, - pulsar_INFO = 1, - pulsar_WARN = 2, - pulsar_ERROR = 3 -} pulsar_logger_level_t; - -typedef void (*pulsar_logger)(pulsar_logger_level_t level, const char *file, int line, const char *message, - void *ctx); - -typedef struct _pulsar_client_configuration pulsar_client_configuration_t; -typedef struct _pulsar_authentication pulsar_authentication_t; - -PULSAR_PUBLIC pulsar_client_configuration_t *pulsar_client_configuration_create(); - -PULSAR_PUBLIC void pulsar_client_configuration_free(pulsar_client_configuration_t *conf); - -/** - * Set the authentication method to be used with the broker - * - * @param authentication the authentication data to use - */ -PULSAR_PUBLIC void pulsar_client_configuration_set_auth(pulsar_client_configuration_t *conf, - pulsar_authentication_t *authentication); - -/** - * Configure a limit on the amount of memory that will be allocated by this client instance. - * Setting this to 0 will disable the limit. By default this is disabled. - * - * @param memoryLimitBytes the memory limit - */ -PULSAR_PUBLIC void pulsar_client_configuration_set_memory_limit(pulsar_client_configuration_t *conf, - unsigned long long memoryLimitBytes); - -/** - * @return the client memory limit in bytes - */ -PULSAR_PUBLIC unsigned long long pulsar_client_configuration_get_memory_limit( - pulsar_client_configuration_t *conf); - -/** - * Set timeout on client operations (subscribe, create producer, close, unsubscribe) - * Default is 30 seconds. - * - * @param timeout the timeout after which the operation will be considered as failed - */ -PULSAR_PUBLIC void pulsar_client_configuration_set_operation_timeout_seconds( - pulsar_client_configuration_t *conf, int timeout); - -/** - * @return the client operations timeout in seconds - */ -PULSAR_PUBLIC int pulsar_client_configuration_get_operation_timeout_seconds( - pulsar_client_configuration_t *conf); - -/** - * Set the number of IO threads to be used by the Pulsar client. Default is 1 - * thread. - * - * @param threads number of threads - */ -PULSAR_PUBLIC void pulsar_client_configuration_set_io_threads(pulsar_client_configuration_t *conf, - int threads); - -/** - * @return the number of IO threads to use - */ -PULSAR_PUBLIC int pulsar_client_configuration_get_io_threads(pulsar_client_configuration_t *conf); - -/** - * Set the number of threads to be used by the Pulsar client when delivering messages - * through message listener. Default is 1 thread per Pulsar client. - * - * If using more than 1 thread, messages for distinct MessageListener will be - * delivered in different threads, however a single MessageListener will always - * be assigned to the same thread. - * - * @param threads number of threads - */ -PULSAR_PUBLIC void pulsar_client_configuration_set_message_listener_threads( - pulsar_client_configuration_t *conf, int threads); - -/** - * @return the number of IO threads to use - */ -PULSAR_PUBLIC int pulsar_client_configuration_get_message_listener_threads( - pulsar_client_configuration_t *conf); - -/** - * Number of concurrent lookup-requests allowed on each broker-connection to prevent overload on broker. - * (default: 50000) It should be configured with higher value only in case of it requires to - * produce/subscribe on - * thousands of topic using created {@link PulsarClient} - * - * @param concurrentLookupRequest - */ -PULSAR_PUBLIC void pulsar_client_configuration_set_concurrent_lookup_request( - pulsar_client_configuration_t *conf, int concurrentLookupRequest); - -/** - * @return Get configured total allowed concurrent lookup-request. - */ -PULSAR_PUBLIC int pulsar_client_configuration_get_concurrent_lookup_request( - pulsar_client_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_client_configuration_set_logger(pulsar_client_configuration_t *conf, - pulsar_logger logger, void *ctx); - -PULSAR_PUBLIC void pulsar_client_configuration_set_use_tls(pulsar_client_configuration_t *conf, int useTls); - -PULSAR_PUBLIC int pulsar_client_configuration_is_use_tls(pulsar_client_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_client_configuration_set_tls_trust_certs_file_path( - pulsar_client_configuration_t *conf, const char *tlsTrustCertsFilePath); - -PULSAR_PUBLIC const char *pulsar_client_configuration_get_tls_trust_certs_file_path( - pulsar_client_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_client_configuration_set_tls_allow_insecure_connection( - pulsar_client_configuration_t *conf, int allowInsecure); - -PULSAR_PUBLIC int pulsar_client_configuration_is_tls_allow_insecure_connection( - pulsar_client_configuration_t *conf); - -/* - * Initialize stats interval in seconds. Stats are printed and reset after every 'statsIntervalInSeconds'. - * Set to 0 in order to disable stats collection. - */ -PULSAR_PUBLIC void pulsar_client_configuration_set_stats_interval_in_seconds( - pulsar_client_configuration_t *conf, const unsigned int interval); - -PULSAR_PUBLIC int pulsar_client_configuration_is_validate_hostname(pulsar_client_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_client_configuration_set_validate_hostname(pulsar_client_configuration_t *conf, - int validateHostName); - -/* - * Get the stats interval set in the client. - */ -PULSAR_PUBLIC const unsigned int pulsar_client_configuration_get_stats_interval_in_seconds( - pulsar_client_configuration_t *conf); - -#ifdef __cplusplus -} -#endif diff --git a/pulsar-client-cpp/include/pulsar/c/consumer.h b/pulsar-client-cpp/include/pulsar/c/consumer.h deleted file mode 100644 index 52610d2df59bd..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/consumer.h +++ /dev/null @@ -1,257 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -#include - -typedef struct _pulsar_consumer pulsar_consumer_t; - -typedef void (*pulsar_result_callback)(pulsar_result, void *); - -typedef void (*pulsar_receive_callback)(pulsar_result result, pulsar_message_t *msg, void *ctx); - -/** - * @return the topic this consumer is subscribed to - */ -PULSAR_PUBLIC const char *pulsar_consumer_get_topic(pulsar_consumer_t *consumer); - -/** - * @return the consumer name - */ -PULSAR_PUBLIC const char *pulsar_consumer_get_subscription_name(pulsar_consumer_t *consumer); - -/** - * Unsubscribe the current consumer from the topic. - * - * This method will block until the operation is completed. Once the consumer is - * unsubscribed, no more messages will be received and subsequent new messages - * will not be retained for this consumer. - * - * This consumer object cannot be reused. - * - * @see asyncUnsubscribe - * @return Result::ResultOk if the unsubscribe operation completed successfully - * @return Result::ResultError if the unsubscribe operation failed - */ -PULSAR_PUBLIC pulsar_result pulsar_consumer_unsubscribe(pulsar_consumer_t *consumer); - -/** - * Asynchronously unsubscribe the current consumer from the topic. - * - * This method will block until the operation is completed. Once the consumer is - * unsubscribed, no more messages will be received and subsequent new messages - * will not be retained for this consumer. - * - * This consumer object cannot be reused. - * - * @param callback the callback to get notified when the operation is complete - */ -PULSAR_PUBLIC void pulsar_consumer_unsubscribe_async(pulsar_consumer_t *consumer, - pulsar_result_callback callback, void *ctx); - -/** - * Receive a single message. - * - * If a message is not immediately available, this method will block until a new - * message is available. - * - * @param msg a non-const reference where the received message will be copied - * @return ResultOk when a message is received - * @return ResultInvalidConfiguration if a message listener had been set in the configuration - */ -PULSAR_PUBLIC pulsar_result pulsar_consumer_receive(pulsar_consumer_t *consumer, pulsar_message_t **msg); - -/** - * - * @param msg a non-const reference where the received message will be copied - * @param timeoutMs the receive timeout in milliseconds - * @return ResultOk if a message was received - * @return ResultTimeout if the receive timeout was triggered - * @return ResultInvalidConfiguration if a message listener had been set in the configuration - */ -PULSAR_PUBLIC pulsar_result pulsar_consumer_receive_with_timeout(pulsar_consumer_t *consumer, - pulsar_message_t **msg, int timeoutMs); - -/** - * Asynchronously receive a single message. - * - * This method will initiate the operation and return immediately. The provided callback - * will be triggered when the operation is complete. - * - * @param callback callback that will be triggered when the message is available - */ -PULSAR_PUBLIC void pulsar_consumer_receive_async(pulsar_consumer_t *consumer, - pulsar_receive_callback callback, void *ctx); - -/** - * Acknowledge the reception of a single message. - * - * This method will block until an acknowledgement is sent to the broker. After - * that, the message will not be re-delivered to this consumer. - * - * @see asyncAcknowledge - * @param message the message to acknowledge - * @return ResultOk if the message was successfully acknowledged - * @return ResultError if there was a failure - */ -PULSAR_PUBLIC pulsar_result pulsar_consumer_acknowledge(pulsar_consumer_t *consumer, - pulsar_message_t *message); - -PULSAR_PUBLIC pulsar_result pulsar_consumer_acknowledge_id(pulsar_consumer_t *consumer, - pulsar_message_id_t *messageId); - -/** - * Asynchronously acknowledge the reception of a single message. - * - * This method will initiate the operation and return immediately. The provided callback - * will be triggered when the operation is complete. - * - * @param message the message to acknowledge - * @param callback callback that will be triggered when the message has been acknowledged - */ -PULSAR_PUBLIC void pulsar_consumer_acknowledge_async(pulsar_consumer_t *consumer, pulsar_message_t *message, - pulsar_result_callback callback, void *ctx); - -PULSAR_PUBLIC void pulsar_consumer_acknowledge_async_id(pulsar_consumer_t *consumer, - pulsar_message_id_t *messageId, - pulsar_result_callback callback, void *ctx); - -/** - * Acknowledge the reception of all the messages in the stream up to (and including) - * the provided message. - * - * This method will block until an acknowledgement is sent to the broker. After - * that, the messages will not be re-delivered to this consumer. - * - * Cumulative acknowledge cannot be used when the consumer type is set to ConsumerShared. - * - * It's equivalent to calling asyncAcknowledgeCumulative(const Message&, ResultCallback) and - * waiting for the callback to be triggered. - * - * @param message the last message in the stream to acknowledge - * @return ResultOk if the message was successfully acknowledged. All previously delivered messages for - * this topic are also acknowledged. - * @return ResultError if there was a failure - */ -PULSAR_PUBLIC pulsar_result pulsar_consumer_acknowledge_cumulative(pulsar_consumer_t *consumer, - pulsar_message_t *message); - -PULSAR_PUBLIC pulsar_result pulsar_consumer_acknowledge_cumulative_id(pulsar_consumer_t *consumer, - pulsar_message_id_t *messageId); - -/** - * Asynchronously acknowledge the reception of all the messages in the stream up to (and - * including) the provided message. - * - * This method will initiate the operation and return immediately. The provided callback - * will be triggered when the operation is complete. - * - * @param message the message to acknowledge - * @param callback callback that will be triggered when the message has been acknowledged - */ -PULSAR_PUBLIC void pulsar_consumer_acknowledge_cumulative_async(pulsar_consumer_t *consumer, - pulsar_message_t *message, - pulsar_result_callback callback, void *ctx); - -PULSAR_PUBLIC void pulsar_consumer_acknowledge_cumulative_async_id(pulsar_consumer_t *consumer, - pulsar_message_id_t *messageId, - pulsar_result_callback callback, - void *ctx); - -/** - * Acknowledge the failure to process a single message. - *

- * When a message is "negatively acked" it will be marked for redelivery after - * some fixed delay. The delay is configurable when constructing the consumer - * with {@link ConsumerConfiguration#setNegativeAckRedeliveryDelayMs}. - *

- * This call is not blocking. - * - * @param message - * The {@code Message} to be acknowledged - */ -PULSAR_PUBLIC void pulsar_consumer_negative_acknowledge(pulsar_consumer_t *consumer, - pulsar_message_t *message); - -/** - * Acknowledge the failure to process a single message through its message id - *

- * When a message is "negatively acked" it will be marked for redelivery after - * some fixed delay. The delay is configurable when constructing the consumer - * with {@link ConsumerConfiguration#setNegativeAckRedeliveryDelayMs}. - *

- * This call is not blocking. - * - * @param message - * The message id to be acknowledged - */ -PULSAR_PUBLIC void pulsar_consumer_negative_acknowledge_id(pulsar_consumer_t *consumer, - pulsar_message_id_t *messageId); - -PULSAR_PUBLIC pulsar_result pulsar_consumer_close(pulsar_consumer_t *consumer); - -PULSAR_PUBLIC void pulsar_consumer_close_async(pulsar_consumer_t *consumer, pulsar_result_callback callback, - void *ctx); - -PULSAR_PUBLIC void pulsar_consumer_free(pulsar_consumer_t *consumer); - -/* - * Pause receiving messages via the messageListener, till resumeMessageListener() is called. - */ -PULSAR_PUBLIC pulsar_result pulsar_consumer_pause_message_listener(pulsar_consumer_t *consumer); - -/* - * Resume receiving the messages via the messageListener. - * Asynchronously receive all the messages enqueued from time pauseMessageListener() was called. - */ -PULSAR_PUBLIC pulsar_result resume_message_listener(pulsar_consumer_t *consumer); - -/** - * Redelivers all the unacknowledged messages. In Failover mode, the request is ignored if the consumer is - * not - * active for the given topic. In Shared mode, the consumers messages to be redelivered are distributed - * across all - * the connected consumers. This is a non blocking call and doesn't throw an exception. In case the - * connection - * breaks, the messages are redelivered after reconnect. - */ -PULSAR_PUBLIC void pulsar_consumer_redeliver_unacknowledged_messages(pulsar_consumer_t *consumer); - -PULSAR_PUBLIC void pulsar_consumer_seek_async(pulsar_consumer_t *consumer, pulsar_message_id_t *messageId, - pulsar_result_callback callback, void *ctx); - -PULSAR_PUBLIC pulsar_result pulsar_consumer_seek(pulsar_consumer_t *consumer, pulsar_message_id_t *messageId); - -PULSAR_PUBLIC int pulsar_consumer_is_connected(pulsar_consumer_t *consumer); - -PULSAR_PUBLIC pulsar_result pulsar_consumer_get_last_message_id(pulsar_consumer_t *consumer, - pulsar_message_id_t *messageId); - -#ifdef __cplusplus -} -#endif diff --git a/pulsar-client-cpp/include/pulsar/c/consumer_configuration.h b/pulsar-client-cpp/include/pulsar/c/consumer_configuration.h deleted file mode 100644 index fc3ccec640bed..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/consumer_configuration.h +++ /dev/null @@ -1,316 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include "consumer.h" -#include "producer_configuration.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct _pulsar_consumer_configuration pulsar_consumer_configuration_t; - -typedef enum -{ - /** - * There can be only 1 consumer on the same topic with the same consumerName - */ - pulsar_ConsumerExclusive, - - /** - * Multiple consumers will be able to use the same consumerName and the messages - * will be dispatched according to a round-robin rotation between the connected consumers - */ - pulsar_ConsumerShared, - - /** Only one consumer is active on the subscription; Subscription can have N consumers - * connected one of which will get promoted to master if the current master becomes inactive - */ - pulsar_ConsumerFailover, - - /** - * Multiple consumer will be able to use the same subscription and all messages with the same key - * will be dispatched to only one consumer - */ - pulsar_ConsumerKeyShared -} pulsar_consumer_type; - -typedef enum -{ - /** - * the latest position which means the start consuming position will be the last message - */ - initial_position_latest, - /** - * the earliest position which means the start consuming position will be the first message - */ - initial_position_earliest -} initial_position; - -typedef enum -{ - // This is the default option to fail consume until crypto succeeds - pulsar_ConsumerFail, - // Message is silently acknowledged and not delivered to the application - pulsar_ConsumerDiscard, - // Deliver the encrypted message to the application. It's the application's - // responsibility to decrypt the message. If message is also compressed, - // decompression will fail. If message contain batch messages, client will - // not be able to retrieve individual messages in the batch - pulsar_ConsumerConsume -} pulsar_consumer_crypto_failure_action; - -/// Callback definition for MessageListener -typedef void (*pulsar_message_listener)(pulsar_consumer_t *consumer, pulsar_message_t *msg, void *ctx); - -PULSAR_PUBLIC pulsar_consumer_configuration_t *pulsar_consumer_configuration_create(); - -PULSAR_PUBLIC void pulsar_consumer_configuration_free( - pulsar_consumer_configuration_t *consumer_configuration); - -/** - * Specify the consumer type. The consumer type enables - * specifying the type of subscription. In Exclusive subscription, - * only a single consumer is allowed to attach to the subscription. Other consumers - * will get an error message. In Shared subscription, multiple consumers will be - * able to use the same subscription name and the messages will be dispatched in a - * round robin fashion. In Failover subscription, a primary-failover subscription model - * allows for multiple consumers to attach to a single subscription, though only one - * of them will be “master” at a given time. Only the primary consumer will receive - * messages. When the primary consumer gets disconnected, one among the failover - * consumers will be promoted to primary and will start getting messages. - */ -PULSAR_PUBLIC void pulsar_consumer_configuration_set_consumer_type( - pulsar_consumer_configuration_t *consumer_configuration, pulsar_consumer_type consumerType); - -PULSAR_PUBLIC pulsar_consumer_type -pulsar_consumer_configuration_get_consumer_type(pulsar_consumer_configuration_t *consumer_configuration); - -PULSAR_PUBLIC void pulsar_consumer_configuration_set_schema_info( - pulsar_consumer_configuration_t *consumer_configuration, pulsar_schema_type schemaType, const char *name, - const char *schema, pulsar_string_map_t *properties); - -/** - * A message listener enables your application to configure how to process - * and acknowledge messages delivered. A listener will be called in order - * for every message received. - */ -PULSAR_PUBLIC void pulsar_consumer_configuration_set_message_listener( - pulsar_consumer_configuration_t *consumer_configuration, pulsar_message_listener messageListener, - void *ctx); - -PULSAR_PUBLIC int pulsar_consumer_configuration_has_message_listener( - pulsar_consumer_configuration_t *consumer_configuration); - -/** - * Sets the size of the consumer receive queue. - * - * The consumer receive queue controls how many messages can be accumulated by the Consumer before the - * application calls receive(). Using a higher value could potentially increase the consumer throughput - * at the expense of bigger memory utilization. - * - * Setting the consumer queue size as zero decreases the throughput of the consumer, by disabling - * pre-fetching of - * messages. This approach improves the message distribution on shared subscription, by pushing messages - * only to - * the consumers that are ready to process them. Neither receive with timeout nor Partitioned Topics can - * be - * used if the consumer queue size is zero. The receive() function call should not be interrupted when - * the consumer queue size is zero. - * - * Default value is 1000 messages and should be good for most use cases. - * - * @param size - * the new receiver queue size value - */ -PULSAR_PUBLIC void pulsar_consumer_configuration_set_receiver_queue_size( - pulsar_consumer_configuration_t *consumer_configuration, int size); - -PULSAR_PUBLIC int pulsar_consumer_configuration_get_receiver_queue_size( - pulsar_consumer_configuration_t *consumer_configuration); - -/** - * Set the max total receiver queue size across partitons. - *

- * This setting will be used to reduce the receiver queue size for individual partitions - * {@link #setReceiverQueueSize(int)} if the total exceeds this value (default: 50000). - * - * @param maxTotalReceiverQueueSizeAcrossPartitions - */ -PULSAR_PUBLIC void pulsar_consumer_set_max_total_receiver_queue_size_across_partitions( - pulsar_consumer_configuration_t *consumer_configuration, int maxTotalReceiverQueueSizeAcrossPartitions); - -/** - * @return the configured max total receiver queue size across partitions - */ -PULSAR_PUBLIC int pulsar_consumer_get_max_total_receiver_queue_size_across_partitions( - pulsar_consumer_configuration_t *consumer_configuration); - -PULSAR_PUBLIC void pulsar_consumer_set_consumer_name(pulsar_consumer_configuration_t *consumer_configuration, - const char *consumerName); - -PULSAR_PUBLIC const char *pulsar_consumer_get_consumer_name( - pulsar_consumer_configuration_t *consumer_configuration); - -/** - * Set the timeout in milliseconds for unacknowledged messages, the timeout needs to be greater than - * 10 seconds. An Exception is thrown if the given value is less than 10000 (10 seconds). - * If a successful acknowledgement is not sent within the timeout all the unacknowledged messages are - * redelivered. - * @param timeout in milliseconds - */ -PULSAR_PUBLIC void pulsar_consumer_set_unacked_messages_timeout_ms( - pulsar_consumer_configuration_t *consumer_configuration, const uint64_t milliSeconds); - -/** - * @return the configured timeout in milliseconds for unacked messages. - */ -PULSAR_PUBLIC long pulsar_consumer_get_unacked_messages_timeout_ms( - pulsar_consumer_configuration_t *consumer_configuration); - -/** - * Set the delay to wait before re-delivering messages that have failed to be process. - *

- * When application uses {@link Consumer#negativeAcknowledge(Message)}, the failed message - * will be redelivered after a fixed timeout. The default is 1 min. - * - * @param redeliveryDelay - * redelivery delay for failed messages - * @param timeUnit - * unit in which the timeout is provided. - * @return the consumer builder instance - */ -PULSAR_PUBLIC void pulsar_configure_set_negative_ack_redelivery_delay_ms( - pulsar_consumer_configuration_t *consumer_configuration, long redeliveryDelayMillis); - -/** - * Get the configured delay to wait before re-delivering messages that have failed to be process. - * - * @param consumer_configuration the consumer conf object - * @return redelivery delay for failed messages - */ -PULSAR_PUBLIC long pulsar_configure_get_negative_ack_redelivery_delay_ms( - pulsar_consumer_configuration_t *consumer_configuration); - -/** - * Set time window in milliseconds for grouping message ACK requests. An ACK request is not sent - * to broker until the time window reaches its end, or the number of grouped messages reaches - * limit. Default is 100 milliseconds. If it's set to a non-positive value, ACK requests will be - * directly sent to broker without grouping. - * - * @param consumer_configuration the consumer conf object - * @param ackGroupMillis time of ACK grouping window in milliseconds. - */ -PULSAR_PUBLIC void pulsar_configure_set_ack_grouping_time_ms( - pulsar_consumer_configuration_t *consumer_configuration, long ackGroupingMillis); - -/** - * Get grouping time window in milliseconds. - * - * @param consumer_configuration the consumer conf object - * @return grouping time window in milliseconds. - */ -PULSAR_PUBLIC long pulsar_configure_get_ack_grouping_time_ms( - pulsar_consumer_configuration_t *consumer_configuration); - -/** - * Set max number of grouped messages within one grouping time window. If it's set to a - * non-positive value, number of grouped messages is not limited. Default is 1000. - * - * @param consumer_configuration the consumer conf object - * @param maxGroupingSize max number of grouped messages with in one grouping time window. - */ -PULSAR_PUBLIC void pulsar_configure_set_ack_grouping_max_size( - pulsar_consumer_configuration_t *consumer_configuration, long maxGroupingSize); - -/** - * Get max number of grouped messages within one grouping time window. - * - * @param consumer_configuration the consumer conf object - * @return max number of grouped messages within one grouping time window. - */ -PULSAR_PUBLIC long pulsar_configure_get_ack_grouping_max_size( - pulsar_consumer_configuration_t *consumer_configuration); - -PULSAR_PUBLIC int pulsar_consumer_is_encryption_enabled( - pulsar_consumer_configuration_t *consumer_configuration); - -PULSAR_PUBLIC void pulsar_consumer_configuration_set_default_crypto_key_reader( - pulsar_consumer_configuration_t *consumer_configuration, const char *public_key_path, - const char *private_key_path); - -PULSAR_PUBLIC pulsar_consumer_crypto_failure_action pulsar_consumer_configuration_get_crypto_failure_action( - pulsar_consumer_configuration_t *consumer_configuration); - -PULSAR_PUBLIC void pulsar_consumer_configuration_set_crypto_failure_action( - pulsar_consumer_configuration_t *consumer_configuration, - pulsar_consumer_crypto_failure_action cryptoFailureAction); - -PULSAR_PUBLIC int pulsar_consumer_is_read_compacted(pulsar_consumer_configuration_t *consumer_configuration); - -PULSAR_PUBLIC void pulsar_consumer_set_read_compacted(pulsar_consumer_configuration_t *consumer_configuration, - int compacted); - -PULSAR_PUBLIC int pulsar_consumer_get_subscription_initial_position( - pulsar_consumer_configuration_t *consumer_configuration); - -PULSAR_PUBLIC void pulsar_consumer_set_subscription_initial_position( - pulsar_consumer_configuration_t *consumer_configuration, initial_position subscriptionInitialPosition); - -PULSAR_PUBLIC void pulsar_consumer_configuration_set_property(pulsar_consumer_configuration_t *conf, - const char *name, const char *value); - -PULSAR_PUBLIC void pulsar_consumer_configuration_set_priority_level( - pulsar_consumer_configuration_t *consumer_configuration, int priority_level); - -PULSAR_PUBLIC int pulsar_consumer_configuration_get_priority_level( - pulsar_consumer_configuration_t *consumer_configuration); - -PULSAR_PUBLIC void pulsar_consumer_configuration_set_max_pending_chunked_message( - pulsar_consumer_configuration_t *consumer_configuration, int max_pending_chunked_message); - -PULSAR_PUBLIC int pulsar_consumer_configuration_get_max_pending_chunked_message( - pulsar_consumer_configuration_t *consumer_configuration); - -PULSAR_PUBLIC void pulsar_consumer_configuration_set_auto_ack_oldest_chunked_message_on_queue_full( - pulsar_consumer_configuration_t *consumer_configuration, - int auto_ack_oldest_chunked_message_on_queue_full); - -PULSAR_PUBLIC int pulsar_consumer_configuration_is_auto_ack_oldest_chunked_message_on_queue_full( - pulsar_consumer_configuration_t *consumer_configuration); - -// const CryptoKeyReaderPtr getCryptoKeyReader() -// -// const; -// ConsumerConfiguration& -// setCryptoKeyReader(CryptoKeyReaderPtr -// cryptoKeyReader); -// -// ConsumerCryptoFailureAction getCryptoFailureAction() -// -// const; -// ConsumerConfiguration& -// setCryptoFailureAction(ConsumerCryptoFailureAction -// action); - -#ifdef __cplusplus -} -#endif diff --git a/pulsar-client-cpp/include/pulsar/c/message.h b/pulsar-client-cpp/include/pulsar/c/message.h deleted file mode 100644 index f54d0254e7895..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/message.h +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -#include -#include "string_map.h" - -typedef struct _pulsar_message pulsar_message_t; -typedef struct _pulsar_message_id pulsar_message_id_t; - -PULSAR_PUBLIC pulsar_message_t *pulsar_message_create(); -PULSAR_PUBLIC void pulsar_message_free(pulsar_message_t *message); - -/// Builder - -PULSAR_PUBLIC void pulsar_message_set_content(pulsar_message_t *message, const void *data, size_t size); - -/** - * Set content of the message to a buffer already allocated by the caller. No copies of - * this buffer will be made. The caller is responsible to ensure the memory buffer is - * valid until the message has been persisted (or an error is returned). - */ -PULSAR_PUBLIC void pulsar_message_set_allocated_content(pulsar_message_t *message, void *data, size_t size); - -PULSAR_PUBLIC void pulsar_message_set_property(pulsar_message_t *message, const char *name, - const char *value); - -/** - * set partition key for the message routing - * @param hash of this key is used to determine message's topic partition - */ -PULSAR_PUBLIC void pulsar_message_set_partition_key(pulsar_message_t *message, const char *partitionKey); - -/** - * Sets the ordering key of the message for message dispatch in Key_Shared mode. - * @param the ordering key for the message - */ -PULSAR_PUBLIC void pulsar_message_set_ordering_key(pulsar_message_t *message, const char *orderingKey); - -/** - * Set the event timestamp for the message. - */ -PULSAR_PUBLIC void pulsar_message_set_event_timestamp(pulsar_message_t *message, uint64_t eventTimestamp); - -/** - * Specify a custom sequence id for the message being published. - *

- * The sequence id can be used for deduplication purposes and it needs to follow these rules: - *

    - *
  1. sequenceId >= 0 - *
  2. Sequence id for a message needs to be greater than sequence id for earlier messages: - * sequenceId(N+1) > sequenceId(N) - *
  3. It's not necessary for sequence ids to be consecutive. There can be holes between messages. Eg. the - * sequenceId could represent an offset or a cumulative size. - *
- * - * @param sequenceId - * the sequence id to assign to the current message - */ -PULSAR_PUBLIC void pulsar_message_set_sequence_id(pulsar_message_t *message, int64_t sequenceId); - -/** - * Specify a delay for the delivery of the messages. - * - * @param delay the delay in milliseconds - */ -PULSAR_PUBLIC void pulsar_message_set_deliver_after(pulsar_message_t *message, uint64_t delayMillis); - -/** - * Specify the this message should not be delivered earlier than the - * specified timestamp. - * - * @param deliveryTimestamp UTC based timestamp in milliseconds - */ -PULSAR_PUBLIC void pulsar_message_set_deliver_at(pulsar_message_t *message, uint64_t deliveryTimestampMillis); - -/** - * override namespace replication clusters. note that it is the - * caller's responsibility to provide valid cluster names, and that - * all clusters have been previously configured as topics. - * - * given an empty list, the message will replicate per the namespace - * configuration. - * - * @param clusters where to send this message. - */ -PULSAR_PUBLIC void pulsar_message_set_replication_clusters(pulsar_message_t *message, const char **clusters, - size_t size); - -/** - * Do not replicate this message - * @param flag if true, disable replication, otherwise use default - * replication - */ -PULSAR_PUBLIC void pulsar_message_disable_replication(pulsar_message_t *message, int flag); - -/// Accessor for built messages - -/** - * Return the properties attached to the message. - * Properties are application defined key/value pairs that will be attached to the message - * - * @return an unmodifiable view of the properties map - */ -PULSAR_PUBLIC pulsar_string_map_t *pulsar_message_get_properties(pulsar_message_t *message); - -/** - * Check whether the message has a specific property attached. - * - * @param name the name of the property to check - * @return true if the message has the specified property - * @return false if the property is not defined - */ -PULSAR_PUBLIC int pulsar_message_has_property(pulsar_message_t *message, const char *name); - -/** - * Get the value of a specific property - * - * @param name the name of the property - * @return the value of the property or null if the property was not defined - */ -PULSAR_PUBLIC const char *pulsar_message_get_property(pulsar_message_t *message, const char *name); - -/** - * Get the content of the message - * - * - * @return the pointer to the message payload - */ -PULSAR_PUBLIC const void *pulsar_message_get_data(pulsar_message_t *message); - -/** - * Get the length of the message - * - * @return the length of the message payload - */ -PULSAR_PUBLIC uint32_t pulsar_message_get_length(pulsar_message_t *message); - -/** - * Get the unique message ID associated with this message. - * - * The message id can be used to univocally refer to a message without having to keep the entire payload - * in memory. - * - * Only messages received from the consumer will have a message id assigned. - * - */ -PULSAR_PUBLIC pulsar_message_id_t *pulsar_message_get_message_id(pulsar_message_t *message); - -/** - * Get the partition key for this message - * @return key string that is hashed to determine message's topic partition - */ -PULSAR_PUBLIC const char *pulsar_message_get_partitionKey(pulsar_message_t *message); -PULSAR_PUBLIC int pulsar_message_has_partition_key(pulsar_message_t *message); - -/** - * Get the ordering key of the message for message dispatch in Key_Shared mode. - * Partition key Will be used if ordering key not specified - */ -PULSAR_PUBLIC const char *pulsar_message_get_orderingKey(pulsar_message_t *message); -PULSAR_PUBLIC int pulsar_message_has_ordering_key(pulsar_message_t *message); - -/** - * Get the UTC based timestamp in milliseconds referring to when the message was published by the client - * producer - */ -PULSAR_PUBLIC uint64_t pulsar_message_get_publish_timestamp(pulsar_message_t *message); - -/** - * Get the event timestamp associated with this message. It is set by the client producer. - */ -PULSAR_PUBLIC uint64_t pulsar_message_get_event_timestamp(pulsar_message_t *message); - -PULSAR_PUBLIC const char *pulsar_message_get_topic_name(pulsar_message_t *message); - -PULSAR_PUBLIC int pulsar_message_get_redelivery_count(pulsar_message_t *message); - -PULSAR_PUBLIC int pulsar_message_has_schema_version(pulsar_message_t *message); - -PULSAR_PUBLIC const char *pulsar_message_get_schemaVersion(pulsar_message_t *message); - -PULSAR_PUBLIC void pulsar_message_set_schema_version(pulsar_message_t *message, const char *schemaVersion); - -#ifdef __cplusplus -} -#endif diff --git a/pulsar-client-cpp/include/pulsar/c/message_id.h b/pulsar-client-cpp/include/pulsar/c/message_id.h deleted file mode 100644 index 289c3bdc5fbd7..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/message_id.h +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -typedef struct _pulsar_message_id pulsar_message_id_t; - -/** - * MessageId representing the "earliest" or "oldest available" message stored in the topic - */ -PULSAR_PUBLIC const pulsar_message_id_t *pulsar_message_id_earliest(); - -/** - * MessageId representing the "latest" or "last published" message in the topic - */ -PULSAR_PUBLIC const pulsar_message_id_t *pulsar_message_id_latest(); - -/** - * Serialize the message id into a binary string for storing - */ -PULSAR_PUBLIC void *pulsar_message_id_serialize(pulsar_message_id_t *messageId, int *len); - -/** - * Deserialize a message id from a binary string - */ -PULSAR_PUBLIC pulsar_message_id_t *pulsar_message_id_deserialize(const void *buffer, uint32_t len); - -PULSAR_PUBLIC char *pulsar_message_id_str(pulsar_message_id_t *messageId); - -PULSAR_PUBLIC void pulsar_message_id_free(pulsar_message_id_t *messageId); - -#ifdef __cplusplus -} -#endif \ No newline at end of file diff --git a/pulsar-client-cpp/include/pulsar/c/message_router.h b/pulsar-client-cpp/include/pulsar/c/message_router.h deleted file mode 100644 index ed74f07014242..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/message_router.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct _pulsar_topic_metadata pulsar_topic_metadata_t; - -typedef int (*pulsar_message_router)(pulsar_message_t *msg, pulsar_topic_metadata_t *topicMetadata, - void *ctx); - -PULSAR_PUBLIC int pulsar_topic_metadata_get_num_partitions(pulsar_topic_metadata_t *topicMetadata); - -#ifdef __cplusplus -} -#endif \ No newline at end of file diff --git a/pulsar-client-cpp/include/pulsar/c/producer.h b/pulsar-client-cpp/include/pulsar/c/producer.h deleted file mode 100644 index bf51f5629d90a..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/producer.h +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -#include - -typedef struct _pulsar_producer pulsar_producer_t; - -typedef void (*pulsar_send_callback)(pulsar_result, pulsar_message_id_t *msgId, void *ctx); -typedef void (*pulsar_close_callback)(pulsar_result, void *ctx); -typedef void (*pulsar_flush_callback)(pulsar_result, void *ctx); - -/** - * @return the topic to which producer is publishing to - */ -PULSAR_PUBLIC const char *pulsar_producer_get_topic(pulsar_producer_t *producer); - -/** - * @return the producer name which could have been assigned by the system or specified by the client - */ -PULSAR_PUBLIC const char *pulsar_producer_get_producer_name(pulsar_producer_t *producer); - -/** - * Publish a message on the topic associated with this Producer. - * - * This method will block until the message will be accepted and persisted - * by the broker. In case of errors, the client library will try to - * automatically recover and use a different broker. - * - * If it wasn't possible to successfully publish the message within the sendTimeout, - * an error will be returned. - * - * This method is equivalent to asyncSend() and wait until the callback is triggered. - * - * @param msg message to publish - * @return ResultOk if the message was published successfully - * @return ResultWriteError if it wasn't possible to publish the message - */ -PULSAR_PUBLIC pulsar_result pulsar_producer_send(pulsar_producer_t *producer, pulsar_message_t *msg); - -/** - * Asynchronously publish a message on the topic associated with this Producer. - * - * This method will initiate the publish operation and return immediately. The - * provided callback will be triggered when the message has been be accepted and persisted - * by the broker. In case of errors, the client library will try to - * automatically recover and use a different broker. - * - * If it wasn't possible to successfully publish the message within the sendTimeout, the - * callback will be triggered with a Result::WriteError code. - * - * @param msg message to publish - * @param callback the callback to get notification of the completion - */ -PULSAR_PUBLIC void pulsar_producer_send_async(pulsar_producer_t *producer, pulsar_message_t *msg, - pulsar_send_callback callback, void *ctx); - -/** - * Get the last sequence id that was published by this producer. - * - * This represent either the automatically assigned or custom sequence id (set on the MessageBuilder) that - * was published and acknowledged by the broker. - * - * After recreating a producer with the same producer name, this will return the last message that was - * published in - * the previous producer session, or -1 if there no message was ever published. - * - * @return the last sequence id published by this producer - */ -PULSAR_PUBLIC int64_t pulsar_producer_get_last_sequence_id(pulsar_producer_t *producer); - -/** - * Close the producer and release resources allocated. - * - * No more writes will be accepted from this producer. Waits until - * all pending write requests are persisted. In case of errors, - * pending writes will not be retried. - * - * @return an error code to indicate the success or failure - */ -PULSAR_PUBLIC pulsar_result pulsar_producer_close(pulsar_producer_t *producer); - -/** - * Close the producer and release resources allocated. - * - * No more writes will be accepted from this producer. The provided callback will be - * triggered when all pending write requests are persisted. In case of errors, - * pending writes will not be retried. - */ -PULSAR_PUBLIC void pulsar_producer_close_async(pulsar_producer_t *producer, pulsar_close_callback callback, - void *ctx); - -// Flush all the messages buffered in the client and wait until all messages have been successfully persisted. -PULSAR_PUBLIC pulsar_result pulsar_producer_flush(pulsar_producer_t *producer); - -PULSAR_PUBLIC void pulsar_producer_flush_async(pulsar_producer_t *producer, pulsar_flush_callback callback, - void *ctx); - -PULSAR_PUBLIC void pulsar_producer_free(pulsar_producer_t *producer); - -PULSAR_PUBLIC int pulsar_producer_is_connected(pulsar_producer_t *producer); - -#ifdef __cplusplus -} -#endif diff --git a/pulsar-client-cpp/include/pulsar/c/producer_configuration.h b/pulsar-client-cpp/include/pulsar/c/producer_configuration.h deleted file mode 100644 index 9e5e5b0d0ef2e..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/producer_configuration.h +++ /dev/null @@ -1,226 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef enum -{ - pulsar_UseSinglePartition, - pulsar_RoundRobinDistribution, - pulsar_CustomPartition -} pulsar_partitions_routing_mode; - -typedef enum -{ - pulsar_Murmur3_32Hash, - pulsar_BoostHash, - pulsar_JavaStringHash -} pulsar_hashing_scheme; - -typedef enum -{ - pulsar_CompressionNone = 0, - pulsar_CompressionLZ4 = 1, - pulsar_CompressionZLib = 2, - pulsar_CompressionZSTD = 3, - pulsar_CompressionSNAPPY = 4 -} pulsar_compression_type; - -typedef enum -{ - pulsar_None = 0, - pulsar_String = 1, - pulsar_Json = 2, - pulsar_Protobuf = 3, - pulsar_Avro = 4, - pulsar_Boolean = 5, - pulsar_Int8 = 6, - pulsar_Int16 = 7, - pulsar_Int32 = 8, - pulsar_Int64 = 9, - pulsar_Float32 = 10, - pulsar_Float64 = 11, - pulsar_KeyValue = 15, - pulsar_Bytes = -1, - pulsar_AutoConsume = -3, - pulsar_AutoPublish = -4, -} pulsar_schema_type; - -typedef enum -{ - // This is the default option to fail send if crypto operation fails - pulsar_ProducerFail, - // Ignore crypto failure and proceed with sending unencrypted messages - pulsar_ProducerSend -} pulsar_producer_crypto_failure_action; - -typedef struct _pulsar_producer_configuration pulsar_producer_configuration_t; - -typedef struct _pulsar_crypto_key_reader pulsar_crypto_key_reader; - -PULSAR_PUBLIC pulsar_producer_configuration_t *pulsar_producer_configuration_create(); - -PULSAR_PUBLIC void pulsar_producer_configuration_free(pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_producer_name(pulsar_producer_configuration_t *conf, - const char *producerName); - -PULSAR_PUBLIC const char *pulsar_producer_configuration_get_producer_name( - pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_send_timeout(pulsar_producer_configuration_t *conf, - int sendTimeoutMs); - -PULSAR_PUBLIC int pulsar_producer_configuration_get_send_timeout(pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_initial_sequence_id( - pulsar_producer_configuration_t *conf, int64_t initialSequenceId); - -PULSAR_PUBLIC int64_t -pulsar_producer_configuration_get_initial_sequence_id(pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_compression_type( - pulsar_producer_configuration_t *conf, pulsar_compression_type compressionType); - -PULSAR_PUBLIC pulsar_compression_type -pulsar_producer_configuration_get_compression_type(pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_schema_info(pulsar_producer_configuration_t *conf, - pulsar_schema_type schemaType, - const char *name, const char *schema, - pulsar_string_map_t *properties); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_max_pending_messages( - pulsar_producer_configuration_t *conf, int maxPendingMessages); -PULSAR_PUBLIC int pulsar_producer_configuration_get_max_pending_messages( - pulsar_producer_configuration_t *conf); - -/** - * Set the number of max pending messages across all the partitions - *

- * This setting will be used to lower the max pending messages for each partition - * ({@link #setMaxPendingMessages(int)}), if the total exceeds the configured value. - * - * @param maxPendingMessagesAcrossPartitions - */ -PULSAR_PUBLIC void pulsar_producer_configuration_set_max_pending_messages_across_partitions( - pulsar_producer_configuration_t *conf, int maxPendingMessagesAcrossPartitions); - -/** - * - * @return the maximum number of pending messages allowed across all the partitions - */ -PULSAR_PUBLIC int pulsar_producer_configuration_get_max_pending_messages_across_partitions( - pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_partitions_routing_mode( - pulsar_producer_configuration_t *conf, pulsar_partitions_routing_mode mode); - -PULSAR_PUBLIC pulsar_partitions_routing_mode -pulsar_producer_configuration_get_partitions_routing_mode(pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_message_router(pulsar_producer_configuration_t *conf, - pulsar_message_router router, void *ctx); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_hashing_scheme(pulsar_producer_configuration_t *conf, - pulsar_hashing_scheme scheme); - -PULSAR_PUBLIC pulsar_hashing_scheme -pulsar_producer_configuration_get_hashing_scheme(pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_lazy_start_partitioned_producers( - pulsar_producer_configuration_t *conf, int useLazyStartPartitionedProducers); - -PULSAR_PUBLIC int pulsar_producer_configuration_get_lazy_start_partitioned_producers( - pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_block_if_queue_full( - pulsar_producer_configuration_t *conf, int blockIfQueueFull); - -PULSAR_PUBLIC int pulsar_producer_configuration_get_block_if_queue_full( - pulsar_producer_configuration_t *conf); - -// Zero queue size feature will not be supported on consumer end if batching is enabled -PULSAR_PUBLIC void pulsar_producer_configuration_set_batching_enabled(pulsar_producer_configuration_t *conf, - int batchingEnabled); - -PULSAR_PUBLIC int pulsar_producer_configuration_get_batching_enabled(pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_batching_max_messages( - pulsar_producer_configuration_t *conf, unsigned int batchingMaxMessages); - -PULSAR_PUBLIC unsigned int pulsar_producer_configuration_get_batching_max_messages( - pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_batching_max_allowed_size_in_bytes( - pulsar_producer_configuration_t *conf, unsigned long batchingMaxAllowedSizeInBytes); - -PULSAR_PUBLIC unsigned long pulsar_producer_configuration_get_batching_max_allowed_size_in_bytes( - pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_batching_max_publish_delay_ms( - pulsar_producer_configuration_t *conf, unsigned long batchingMaxPublishDelayMs); - -PULSAR_PUBLIC unsigned long pulsar_producer_configuration_get_batching_max_publish_delay_ms( - pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_property(pulsar_producer_configuration_t *conf, - const char *name, const char *value); - -PULSAR_PUBLIC int pulsar_producer_is_encryption_enabled(pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_default_crypto_key_reader( - pulsar_producer_configuration_t *conf, const char *public_key_path, const char *private_key_path); - -PULSAR_PUBLIC pulsar_producer_crypto_failure_action -pulsar_producer_configuration_get_crypto_failure_action(pulsar_producer_configuration_t *conf); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_crypto_failure_action( - pulsar_producer_configuration_t *conf, pulsar_producer_crypto_failure_action cryptoFailureAction); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_encryption_key(pulsar_producer_configuration_t *conf, - const char *key); - -PULSAR_PUBLIC void pulsar_producer_configuration_set_chunking_enabled(pulsar_producer_configuration_t *conf, - int chunkingEnabled); - -PULSAR_PUBLIC int pulsar_producer_configuration_is_chunking_enabled(pulsar_producer_configuration_t *conf); - -// const CryptoKeyReaderPtr getCryptoKeyReader() const; -// ProducerConfiguration &setCryptoKeyReader(CryptoKeyReaderPtr cryptoKeyReader); -// -// ProducerCryptoFailureAction getCryptoFailureAction() const; -// ProducerConfiguration &setCryptoFailureAction(ProducerCryptoFailureAction action); -// -// std::set &getEncryptionKeys(); -// int isEncryptionEnabled() const; -// ProducerConfiguration &addEncryptionKey(std::string key); - -#ifdef __cplusplus -} -#endif diff --git a/pulsar-client-cpp/include/pulsar/c/reader.h b/pulsar-client-cpp/include/pulsar/c/reader.h deleted file mode 100644 index 4c09ff5de9fae..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/reader.h +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct _pulsar_reader pulsar_reader_t; - -typedef void (*pulsar_result_callback)(pulsar_result, void *); - -/** - * @return the topic this reader is reading from - */ -PULSAR_PUBLIC const char *pulsar_reader_get_topic(pulsar_reader_t *reader); - -/** - * Read a single message. - * - * If a message is not immediately available, this method will block until a new - * message is available. - * - * @param msg a non-const reference where the received message will be copied - * @return ResultOk when a message is received - * @return ResultInvalidConfiguration if a message listener had been set in the configuration - */ -PULSAR_PUBLIC pulsar_result pulsar_reader_read_next(pulsar_reader_t *reader, pulsar_message_t **msg); - -/** - * Read a single message - * - * @param msg a non-const reference where the received message will be copied - * @param timeoutMs the receive timeout in milliseconds - * @return ResultOk if a message was received - * @return ResultTimeout if the receive timeout was triggered - * @return ResultInvalidConfiguration if a message listener had been set in the configuration - */ -PULSAR_PUBLIC pulsar_result pulsar_reader_read_next_with_timeout(pulsar_reader_t *reader, - pulsar_message_t **msg, int timeoutMs); - -PULSAR_PUBLIC pulsar_result pulsar_reader_close(pulsar_reader_t *reader); - -PULSAR_PUBLIC void pulsar_reader_close_async(pulsar_reader_t *reader, pulsar_result_callback callback, - void *ctx); - -PULSAR_PUBLIC void pulsar_reader_free(pulsar_reader_t *reader); - -PULSAR_PUBLIC pulsar_result pulsar_reader_has_message_available(pulsar_reader_t *reader, int *available); - -PULSAR_PUBLIC int pulsar_reader_is_connected(pulsar_reader_t *reader); - -#ifdef __cplusplus -} -#endif diff --git a/pulsar-client-cpp/include/pulsar/c/reader_configuration.h b/pulsar-client-cpp/include/pulsar/c/reader_configuration.h deleted file mode 100644 index cc8436cdc46b1..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/reader_configuration.h +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct _pulsar_reader_configuration pulsar_reader_configuration_t; - -typedef void (*pulsar_reader_listener)(pulsar_reader_t *reader, pulsar_message_t *msg, void *ctx); - -PULSAR_PUBLIC pulsar_reader_configuration_t *pulsar_reader_configuration_create(); - -PULSAR_PUBLIC void pulsar_reader_configuration_free(pulsar_reader_configuration_t *configuration); - -/** - * A message listener enables your application to configure how to process - * messages. A listener will be called in order for every message received. - */ -PULSAR_PUBLIC void pulsar_reader_configuration_set_reader_listener( - pulsar_reader_configuration_t *configuration, pulsar_reader_listener listener, void *ctx); - -PULSAR_PUBLIC int pulsar_reader_configuration_has_reader_listener( - pulsar_reader_configuration_t *configuration); - -/** - * Sets the size of the reader receive queue. - * - * The consumer receive queue controls how many messages can be accumulated by the Consumer before the - * application calls receive(). Using a higher value could potentially increase the consumer throughput - * at the expense of bigger memory utilization. - * - * Setting the consumer queue size as zero decreases the throughput of the consumer, by disabling - * pre-fetching of - * messages. This approach improves the message distribution on shared subscription, by pushing messages - * only to - * the consumers that are ready to process them. Neither receive with timeout nor Partitioned Topics can - * be - * used if the consumer queue size is zero. The receive() function call should not be interrupted when - * the consumer queue size is zero. - * - * Default value is 1000 messages and should be good for most use cases. - * - * @param size - * the new receiver queue size value - */ -PULSAR_PUBLIC void pulsar_reader_configuration_set_receiver_queue_size( - pulsar_reader_configuration_t *configuration, int size); - -PULSAR_PUBLIC int pulsar_reader_configuration_get_receiver_queue_size( - pulsar_reader_configuration_t *configuration); - -PULSAR_PUBLIC void pulsar_reader_configuration_set_reader_name(pulsar_reader_configuration_t *configuration, - const char *readerName); - -PULSAR_PUBLIC const char *pulsar_reader_configuration_get_reader_name( - pulsar_reader_configuration_t *configuration); - -PULSAR_PUBLIC void pulsar_reader_configuration_set_subscription_role_prefix( - pulsar_reader_configuration_t *configuration, const char *subscriptionRolePrefix); - -PULSAR_PUBLIC const char *pulsar_reader_configuration_get_subscription_role_prefix( - pulsar_reader_configuration_t *configuration); - -PULSAR_PUBLIC void pulsar_reader_configuration_set_read_compacted( - pulsar_reader_configuration_t *configuration, int readCompacted); - -PULSAR_PUBLIC int pulsar_reader_configuration_is_read_compacted(pulsar_reader_configuration_t *configuration); - -#ifdef __cplusplus -} -#endif diff --git a/pulsar-client-cpp/include/pulsar/c/result.h b/pulsar-client-cpp/include/pulsar/c/result.h deleted file mode 100644 index a95d1e5862fd9..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/result.h +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef enum -{ - pulsar_result_Ok, /// Operation successful - - pulsar_result_UnknownError, /// Unknown error happened on broker - - pulsar_result_InvalidConfiguration, /// Invalid configuration - - pulsar_result_Timeout, /// Operation timed out - pulsar_result_LookupError, /// Broker lookup failed - pulsar_result_ConnectError, /// Failed to connect to broker - pulsar_result_ReadError, /// Failed to read from socket - - pulsar_result_AuthenticationError, /// Authentication failed on broker - pulsar_result_AuthorizationError, /// Client is not authorized to create producer/consumer - pulsar_result_ErrorGettingAuthenticationData, /// Client cannot find authorization data - - pulsar_result_BrokerMetadataError, /// Broker failed in updating metadata - pulsar_result_BrokerPersistenceError, /// Broker failed to persist entry - pulsar_result_ChecksumError, /// Corrupt message checksum failure - - pulsar_result_ConsumerBusy, /// Exclusive consumer is already connected - pulsar_result_NotConnected, /// Producer/Consumer is not currently connected to broker - pulsar_result_AlreadyClosed, /// Producer/Consumer is already closed and not accepting any operation - - pulsar_result_InvalidMessage, /// Error in publishing an already used message - - pulsar_result_ConsumerNotInitialized, /// Consumer is not initialized - pulsar_result_ProducerNotInitialized, /// Producer is not initialized - pulsar_result_ProducerBusy, /// Producer with same name is already connected - pulsar_result_TooManyLookupRequestException, /// Too Many concurrent LookupRequest - - pulsar_result_InvalidTopicName, /// Invalid topic name - pulsar_result_InvalidUrl, /// Client Initialized with Invalid Broker Url (VIP Url passed to Client - /// Constructor) - pulsar_result_ServiceUnitNotReady, /// Service Unit unloaded between client did lookup and - /// producer/consumer got - /// created - pulsar_result_OperationNotSupported, - pulsar_result_ProducerBlockedQuotaExceededError, /// Producer is blocked - pulsar_result_ProducerBlockedQuotaExceededException, /// Producer is getting exception - pulsar_result_ProducerQueueIsFull, /// Producer queue is full - pulsar_result_MessageTooBig, /// Trying to send a messages exceeding the max size - pulsar_result_TopicNotFound, /// Topic not found - pulsar_result_SubscriptionNotFound, /// Subscription not found - pulsar_result_ConsumerNotFound, /// Consumer not found - pulsar_result_UnsupportedVersionError, /// Error when an older client/version doesn't support a required - /// feature - pulsar_result_TopicTerminated, /// Topic was already terminated - pulsar_result_CryptoError, /// Error when crypto operation fails - - pulsar_result_IncompatibleSchema, /// Specified schema is incompatible with the topic's schema - pulsar_result_ConsumerAssignError, /// Error when a new consumer connected but can't assign messages to - /// this - /// consumer - pulsar_result_CumulativeAcknowledgementNotAllowedError, /// Not allowed to call cumulativeAcknowledgement - /// in - /// Shared and Key_Shared subscription mode - pulsar_result_TransactionCoordinatorNotFoundError, /// Transaction coordinator not found - pulsar_result_InvalidTxnStatusError, /// Invalid txn status error - pulsar_result_NotAllowedError, /// Not allowed - pulsar_result_TransactionConflict, /// Transaction ack conflict - pulsar_result_TransactionNotFound, /// Transaction not found - pulsar_result_ProducerFenced, /// Producer was fenced by broker - - pulsar_result_MemoryBufferIsFull, /// Client-wide memory limit has been reached - pulsar_result_Interrupted, /// Interrupted while waiting to dequeue -} pulsar_result; - -// Return string representation of result code -PULSAR_PUBLIC const char *pulsar_result_str(pulsar_result result); - -#ifdef __cplusplus -} -#endif diff --git a/pulsar-client-cpp/include/pulsar/c/string_list.h b/pulsar-client-cpp/include/pulsar/c/string_list.h deleted file mode 100644 index fc27ae1337a6e..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/string_list.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct _pulsar_string_list pulsar_string_list_t; - -PULSAR_PUBLIC pulsar_string_list_t *pulsar_string_list_create(); -PULSAR_PUBLIC void pulsar_string_list_free(pulsar_string_list_t *list); - -PULSAR_PUBLIC int pulsar_string_list_size(pulsar_string_list_t *list); - -PULSAR_PUBLIC void pulsar_string_list_append(pulsar_string_list_t *list, const char *item); - -PULSAR_PUBLIC const char *pulsar_string_list_get(pulsar_string_list_t *map, int index); - -#ifdef __cplusplus -} -#endif \ No newline at end of file diff --git a/pulsar-client-cpp/include/pulsar/c/string_map.h b/pulsar-client-cpp/include/pulsar/c/string_map.h deleted file mode 100644 index 0f4b6327a0438..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/string_map.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct _pulsar_string_map pulsar_string_map_t; - -PULSAR_PUBLIC pulsar_string_map_t *pulsar_string_map_create(); -PULSAR_PUBLIC void pulsar_string_map_free(pulsar_string_map_t *map); - -PULSAR_PUBLIC int pulsar_string_map_size(pulsar_string_map_t *map); - -PULSAR_PUBLIC void pulsar_string_map_put(pulsar_string_map_t *map, const char *key, const char *value); - -PULSAR_PUBLIC const char *pulsar_string_map_get(pulsar_string_map_t *map, const char *key); - -PULSAR_PUBLIC const char *pulsar_string_map_get_key(pulsar_string_map_t *map, int idx); -PULSAR_PUBLIC const char *pulsar_string_map_get_value(pulsar_string_map_t *map, int idx); - -#ifdef __cplusplus -} -#endif \ No newline at end of file diff --git a/pulsar-client-cpp/include/pulsar/c/version.h b/pulsar-client-cpp/include/pulsar/c/version.h deleted file mode 100644 index ab63c8a708991..0000000000000 --- a/pulsar-client-cpp/include/pulsar/c/version.h +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include diff --git a/pulsar-client-cpp/include/pulsar/defines.h b/pulsar-client-cpp/include/pulsar/defines.h deleted file mode 100644 index fc099df6a1d04..0000000000000 --- a/pulsar-client-cpp/include/pulsar/defines.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_DEFINES_H_ -#define PULSAR_DEFINES_H_ - -#ifdef PULSAR_STATIC - -#define PULSAR_PUBLIC - -#else - -#ifdef _WIN32 - -#ifdef BUILDING_PULSAR -#define PULSAR_PUBLIC __declspec(dllexport) -#else -#define PULSAR_PUBLIC __declspec(dllimport) -#endif /*BUILDING_PULSAR*/ - -#else - -#define PULSAR_PUBLIC __attribute__((visibility("default"))) - -#endif /*_WIN32*/ - -#endif /*PULSAR_STATIC*/ - -#endif /* PULSAR_DEFINES_H_ */ diff --git a/pulsar-client-cpp/lib/AckGroupingTracker.cc b/pulsar-client-cpp/lib/AckGroupingTracker.cc deleted file mode 100644 index 7d1d706d745b4..0000000000000 --- a/pulsar-client-cpp/lib/AckGroupingTracker.cc +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "AckGroupingTracker.h" - -#include - -#include - -#include "Commands.h" -#include "LogUtils.h" -#include "PulsarApi.pb.h" -#include "ClientConnection.h" -#include - -namespace pulsar { - -DECLARE_LOG_OBJECT(); - -inline void sendAck(ClientConnectionPtr cnx, uint64_t consumerId, const MessageId& msgId, - proto::CommandAck_AckType ackType) { - proto::MessageIdData msgIdData; - msgIdData.set_ledgerid(msgId.ledgerId()); - msgIdData.set_entryid(msgId.entryId()); - auto cmd = Commands::newAck(consumerId, msgIdData, ackType, -1); - cnx->sendCommand(cmd); - LOG_DEBUG("ACK request is sent for message - [" << msgIdData.ledgerid() << ", " << msgIdData.entryid() - << "]"); -} - -bool AckGroupingTracker::doImmediateAck(ClientConnectionWeakPtr connWeakPtr, uint64_t consumerId, - const MessageId& msgId, proto::CommandAck_AckType ackType) { - auto cnx = connWeakPtr.lock(); - if (cnx == nullptr) { - LOG_DEBUG("Connection is not ready, ACK failed for message - [" << msgId.ledgerId() << ", " - << msgId.entryId() << "]"); - return false; - } - sendAck(cnx, consumerId, msgId, ackType); - return true; -} - -bool AckGroupingTracker::doImmediateAck(ClientConnectionWeakPtr connWeakPtr, uint64_t consumerId, - const std::set& msgIds) { - auto cnx = connWeakPtr.lock(); - if (cnx == nullptr) { - LOG_DEBUG("Connection is not ready, ACK failed."); - return false; - } - - for (const auto& msgId : msgIds) { - sendAck(cnx, consumerId, msgId, proto::CommandAck::Individual); - } - return true; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/AckGroupingTracker.h b/pulsar-client-cpp/lib/AckGroupingTracker.h deleted file mode 100644 index f4410e45fab7c..0000000000000 --- a/pulsar-client-cpp/lib/AckGroupingTracker.h +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_ACKGROUPINGTRACKER_H_ -#define LIB_ACKGROUPINGTRACKER_H_ - -#include - -#include -#include - -#include "PulsarApi.pb.h" -#include "ClientConnection.h" -#include - -namespace pulsar { - -/** - * @class AckGroupingTracker - * Default ACK grouping tracker, it actually neither tracks ACK requests nor sends them to brokers. - * It can be directly used by consumers for non-persistent topics. - */ -class AckGroupingTracker : public std::enable_shared_from_this { - public: - AckGroupingTracker() = default; - virtual ~AckGroupingTracker() = default; - - /** - * Start tracking the ACK requests. - */ - virtual void start() {} - - /** - * Since ACK requests are grouped and delayed, we need to do some best-effort duplicate check to - * discard messages that are being resent after a disconnection and for which the user has - * already sent an acknowledgement. - * @param[in] msgId message ID to be checked. - * @return true if given message ID is grouped, otherwise false. If using cumulative ACK and the - * given message ID has been ACKed in previous cumulative ACK, it also returns true; - */ - virtual bool isDuplicate(const MessageId& msgId) { return false; } - - /** - * Adding message ID into ACK group for individual ACK. - * @param[in] msgId ID of the message to be ACKed. - */ - virtual void addAcknowledge(const MessageId& msgId) {} - - /** - * Adding message ID into ACK group for cumulative ACK. - * @param[in] msgId ID of the message to be ACKed. - */ - virtual void addAcknowledgeCumulative(const MessageId& msgId) {} - - /** - * Flush all the pending grouped ACKs (as flush() does), and stop period ACKs sending. - */ - virtual void close() {} - - /** - * Flush all the pending grouped ACKs and send them to the broker. - */ - virtual void flush() {} - - /** - * Flush all the pending grouped ACKs (as flush() does), and clean all records about ACKed - * messages, such as last cumulative ACKed message ID. - */ - virtual void flushAndClean() {} - - protected: - /** - * Immediately send ACK request to broker. - * @param[in] connWeakPtr weak pointer of the client connection. - * @param[in] consumerId ID of the consumer that performs this ACK. - * @param[in] msgId message ID to be ACKed. - * @param[in] ackType ACK type, e.g. cumulative. - * @return true if the ACK is sent successfully, otherwise false. - */ - bool doImmediateAck(ClientConnectionWeakPtr connWeakPtr, uint64_t consumerId, const MessageId& msgId, - proto::CommandAck_AckType ackType); - - /** - * Immediately send a set of ACK requests one by one to the broker, it only supports individual - * ACK. - * @param[in] connWeakPtr weak pointer of the client connection. - * @param[in] consumerId ID of the consumer that performs this ACK. - * @param[in] msgIds message IDs to be ACKed. - * @return true if the ACK is sent successfully, otherwise false. - */ - bool doImmediateAck(ClientConnectionWeakPtr connWeakPtr, uint64_t consumerId, - const std::set& msgIds); -}; // class AckGroupingTracker - -using AckGroupingTrackerPtr = std::shared_ptr; - -} // namespace pulsar -#endif /* LIB_ACKGROUPINGTRACKER_H_ */ diff --git a/pulsar-client-cpp/lib/AckGroupingTrackerDisabled.cc b/pulsar-client-cpp/lib/AckGroupingTrackerDisabled.cc deleted file mode 100644 index 4c2b11fb7716d..0000000000000 --- a/pulsar-client-cpp/lib/AckGroupingTrackerDisabled.cc +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "AckGroupingTrackerDisabled.h" - -#include "HandlerBase.h" -#include "PulsarApi.pb.h" -#include - -namespace pulsar { - -DECLARE_LOG_OBJECT(); - -AckGroupingTrackerDisabled::AckGroupingTrackerDisabled(HandlerBase& handler, uint64_t consumerId) - : AckGroupingTracker(), handler_(handler), consumerId_(consumerId) { - LOG_INFO("ACK grouping is disabled."); -} - -void AckGroupingTrackerDisabled::addAcknowledge(const MessageId& msgId) { - this->doImmediateAck(this->handler_.getCnx(), this->consumerId_, msgId, proto::CommandAck::Individual); -} - -void AckGroupingTrackerDisabled::addAcknowledgeCumulative(const MessageId& msgId) { - this->doImmediateAck(this->handler_.getCnx(), this->consumerId_, msgId, proto::CommandAck::Cumulative); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/AckGroupingTrackerDisabled.h b/pulsar-client-cpp/lib/AckGroupingTrackerDisabled.h deleted file mode 100644 index 6e66718a9c856..0000000000000 --- a/pulsar-client-cpp/lib/AckGroupingTrackerDisabled.h +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_ACKGROUPINGTRACKERDISABLED_H_ -#define LIB_ACKGROUPINGTRACKERDISABLED_H_ - -#include - -#include "HandlerBase.h" -#include -#include "AckGroupingTracker.h" - -namespace pulsar { - -/** - * @class AckGroupingTrackerDisabled - * ACK grouping tracker that does not tracker or group ACK requests. The ACK requests are diretly - * sent to broker. - */ -class AckGroupingTrackerDisabled : public AckGroupingTracker { - public: - virtual ~AckGroupingTrackerDisabled() = default; - - /** - * Constructing ACK grouping tracker for peresistent topics that disabled ACK grouping. - * @param[in] handler the connection handler. - * @param[in] consumerId consumer ID that this tracker belongs to. - */ - AckGroupingTrackerDisabled(HandlerBase& handler, uint64_t consumerId); - - void addAcknowledge(const MessageId& msgId) override; - void addAcknowledgeCumulative(const MessageId& msgId) override; - - private: - //! The connection handler. - HandlerBase& handler_; - - //! ID of the consumer that this tracker belongs to. - uint64_t consumerId_; -}; // class AckGroupingTrackerDisabled - -} // namespace pulsar -#endif /* LIB_ACKGROUPINGTRACKERDISABLED_H_ */ diff --git a/pulsar-client-cpp/lib/AckGroupingTrackerEnabled.cc b/pulsar-client-cpp/lib/AckGroupingTrackerEnabled.cc deleted file mode 100644 index 5b6fe4e75924c..0000000000000 --- a/pulsar-client-cpp/lib/AckGroupingTrackerEnabled.cc +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "AckGroupingTrackerEnabled.h" - -#include -#include - -#include "Commands.h" -#include "LogUtils.h" -#include "ClientImpl.h" -#include "HandlerBase.h" -#include "PulsarApi.pb.h" -#include - -namespace pulsar { - -DECLARE_LOG_OBJECT(); - -AckGroupingTrackerEnabled::AckGroupingTrackerEnabled(ClientImplPtr clientPtr, - const HandlerBasePtr& handlerPtr, uint64_t consumerId, - long ackGroupingTimeMs, long ackGroupingMaxSize) - : AckGroupingTracker(), - handlerWeakPtr_(handlerPtr), - consumerId_(consumerId), - nextCumulativeAckMsgId_(MessageId::earliest()), - requireCumulativeAck_(false), - mutexCumulativeAckMsgId_(), - pendingIndividualAcks_(), - rmutexPendingIndAcks_(), - ackGroupingTimeMs_(ackGroupingTimeMs), - ackGroupingMaxSize_(ackGroupingMaxSize), - executor_(clientPtr->getIOExecutorProvider()->get()), - timer_(), - mutexTimer_() { - LOG_DEBUG("ACK grouping is enabled, grouping time " << ackGroupingTimeMs << "ms, grouping max size " - << ackGroupingMaxSize); -} - -void AckGroupingTrackerEnabled::start() { this->scheduleTimer(); } - -bool AckGroupingTrackerEnabled::isDuplicate(const MessageId& msgId) { - { - // Check if the message ID is already ACKed by a previous (or pending) cumulative request. - std::lock_guard lock(this->mutexCumulativeAckMsgId_); - if (msgId <= this->nextCumulativeAckMsgId_) { - return true; - } - } - - // Check existence in pending individual ACKs set. - std::lock_guard lock(this->rmutexPendingIndAcks_); - return this->pendingIndividualAcks_.count(msgId) > 0; -} - -void AckGroupingTrackerEnabled::addAcknowledge(const MessageId& msgId) { - std::lock_guard lock(this->rmutexPendingIndAcks_); - this->pendingIndividualAcks_.insert(msgId); - if (this->ackGroupingMaxSize_ > 0 && this->pendingIndividualAcks_.size() >= this->ackGroupingMaxSize_) { - this->flush(); - } -} - -void AckGroupingTrackerEnabled::addAcknowledgeCumulative(const MessageId& msgId) { - std::lock_guard lock(this->mutexCumulativeAckMsgId_); - if (msgId > this->nextCumulativeAckMsgId_) { - this->nextCumulativeAckMsgId_ = msgId; - this->requireCumulativeAck_ = true; - } -} - -void AckGroupingTrackerEnabled::close() { - this->flush(); - std::lock_guard lock(this->mutexTimer_); - if (this->timer_) { - boost::system::error_code ec; - this->timer_->cancel(ec); - } -} - -void AckGroupingTrackerEnabled::flush() { - auto handler = handlerWeakPtr_.lock(); - if (!handler) { - LOG_DEBUG("Reference to the HandlerBase is not valid."); - return; - } - auto cnx = handler->getCnx().lock(); - if (cnx == nullptr) { - LOG_DEBUG("Connection is not ready, grouping ACK failed."); - return; - } - - // Send ACK for cumulative ACK requests. - { - std::lock_guard lock(this->mutexCumulativeAckMsgId_); - if (this->requireCumulativeAck_) { - if (!this->doImmediateAck(cnx, this->consumerId_, this->nextCumulativeAckMsgId_, - proto::CommandAck::Cumulative)) { - // Failed to send ACK. - LOG_WARN("Failed to send cumulative ACK."); - return; - } - this->requireCumulativeAck_ = false; - } - } - - // Send ACK for individual ACK requests. - std::lock_guard lock(this->rmutexPendingIndAcks_); - if (!this->pendingIndividualAcks_.empty()) { - if (Commands::peerSupportsMultiMessageAcknowledgement(cnx->getServerProtocolVersion())) { - auto cmd = Commands::newMultiMessageAck(this->consumerId_, this->pendingIndividualAcks_); - cnx->sendCommand(cmd); - } else { - // Broker does not support multi-message ACK, use multiple individual ACK instead. - this->doImmediateAck(cnx, this->consumerId_, this->pendingIndividualAcks_); - } - this->pendingIndividualAcks_.clear(); - } -} - -void AckGroupingTrackerEnabled::flushAndClean() { - this->flush(); - { - std::lock_guard lock(this->mutexCumulativeAckMsgId_); - this->nextCumulativeAckMsgId_ = MessageId::earliest(); - this->requireCumulativeAck_ = false; - } - std::lock_guard lock(this->rmutexPendingIndAcks_); - this->pendingIndividualAcks_.clear(); -} - -void AckGroupingTrackerEnabled::scheduleTimer() { - std::lock_guard lock(this->mutexTimer_); - this->timer_ = this->executor_->createDeadlineTimer(); - this->timer_->expires_from_now(boost::posix_time::milliseconds(std::max(1L, this->ackGroupingTimeMs_))); - auto self = shared_from_this(); - this->timer_->async_wait([this, self](const boost::system::error_code& ec) -> void { - if (!ec) { - this->flush(); - this->scheduleTimer(); - } - }); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/AckGroupingTrackerEnabled.h b/pulsar-client-cpp/lib/AckGroupingTrackerEnabled.h deleted file mode 100644 index c3926aa492bf2..0000000000000 --- a/pulsar-client-cpp/lib/AckGroupingTrackerEnabled.h +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_ACKGROUPINGTRACKERENABLED_H_ -#define LIB_ACKGROUPINGTRACKERENABLED_H_ - -#include - -#include -#include - -#include "ClientImpl.h" -#include "HandlerBase.h" -#include -#include "AckGroupingTracker.h" - -namespace pulsar { - -/** - * @class AckGroupingTrackerEnabled - * Ack grouping tracker for consumers of persistent topics that enabled ACK grouping. - */ -class AckGroupingTrackerEnabled : public AckGroupingTracker { - public: - virtual ~AckGroupingTrackerEnabled() { this->close(); } - - /** - * Constructing ACK grouping tracker for peresistent topics. - * @param[in] clientPtr pointer to client object. - * @param[in] handlerPtr the shared pointer to connection handler. - * @param[in] consumerId consumer ID that this tracker belongs to. - * @param[in] ackGroupingTimeMs ACK grouping time window in milliseconds. - * @param[in] ackGroupingMaxSize max. number of ACK requests can be grouped. - */ - AckGroupingTrackerEnabled(ClientImplPtr clientPtr, const HandlerBasePtr& handlerPtr, uint64_t consumerId, - long ackGroupingTimeMs, long ackGroupingMaxSize); - - void start() override; - bool isDuplicate(const MessageId& msgId) override; - void addAcknowledge(const MessageId& msgId) override; - void addAcknowledgeCumulative(const MessageId& msgId) override; - void close() override; - void flush() override; - void flushAndClean() override; - - protected: - //! Method for scheduling grouping timer. - void scheduleTimer(); - - //! The connection handler. - HandlerBaseWeakPtr handlerWeakPtr_; - - //! ID of the consumer that this tracker belongs to. - uint64_t consumerId_; - - //! Next message ID to be cumulatively cumulatively. - MessageId nextCumulativeAckMsgId_; - bool requireCumulativeAck_; - std::mutex mutexCumulativeAckMsgId_; - - //! Individual ACK requests that have not been sent to broker. - std::set pendingIndividualAcks_; - std::recursive_mutex rmutexPendingIndAcks_; - - //! Time window in milliseconds for grouping ACK requests. - const long ackGroupingTimeMs_; - - //! Max number of ACK requests can be grouped. - const long ackGroupingMaxSize_; - - //! ACK request sender's scheduled executor. - ExecutorServicePtr executor_; - - //! Pointer to a deadline timer. - DeadlineTimerPtr timer_; - std::mutex mutexTimer_; -}; // class AckGroupingTrackerEnabled - -} // namespace pulsar -#endif /* LIB_ACKGROUPINGTRACKERENABLED_H_ */ diff --git a/pulsar-client-cpp/lib/Authentication.cc b/pulsar-client-cpp/lib/Authentication.cc deleted file mode 100644 index 8fc007dba0b6d..0000000000000 --- a/pulsar-client-cpp/lib/Authentication.cc +++ /dev/null @@ -1,228 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include -#include "auth/AuthTls.h" -#include "auth/AuthAthenz.h" -#include "auth/AuthToken.h" -#include "auth/AuthOauth2.h" -#include "auth/AuthBasic.h" -#include - -#include -#include -#include -#include -#include -#include -#include - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -AuthenticationDataProvider::AuthenticationDataProvider() {} - -AuthenticationDataProvider::~AuthenticationDataProvider() {} - -bool AuthenticationDataProvider::hasDataForTls() { return false; } - -std::string AuthenticationDataProvider::getTlsCertificates() { return "none"; } - -std::string AuthenticationDataProvider::getTlsPrivateKey() { return "none"; } - -bool AuthenticationDataProvider::hasDataForHttp() { return false; } - -std::string AuthenticationDataProvider::getHttpAuthType() { return "none"; } - -std::string AuthenticationDataProvider::getHttpHeaders() { return "none"; } - -bool AuthenticationDataProvider::hasDataFromCommand() { return false; } - -std::string AuthenticationDataProvider::getCommandData() { return "none"; } - -Authentication::Authentication() {} - -Authentication::~Authentication() {} - -ParamMap Authentication::parseDefaultFormatAuthParams(const std::string& authParamsString) { - ParamMap paramMap; - if (!authParamsString.empty()) { - std::vector params; - boost::algorithm::split(params, authParamsString, boost::is_any_of(",")); - for (int i = 0; i < params.size(); i++) { - std::vector kv; - boost::algorithm::split(kv, params[i], boost::is_any_of(":")); - if (kv.size() == 2) { - paramMap[kv[0]] = kv[1]; - } - } - } - return paramMap; -} - -class AuthDisabledData : public AuthenticationDataProvider { - public: - AuthDisabledData(ParamMap& params) {} -}; - -class AuthDisabled : public Authentication { - public: - AuthDisabled(AuthenticationDataPtr& authData) { authData_ = authData; } - - static AuthenticationPtr create(ParamMap& params) { - AuthenticationDataPtr authData = AuthenticationDataPtr(new AuthDisabledData(params)); - return AuthenticationPtr(new AuthDisabled(authData)); - } - - const std::string getAuthMethodName() const { return "none"; } -}; - -AuthenticationPtr AuthFactory::Disabled() { - ParamMap params; - return AuthDisabled::create(params); -} - -AuthenticationPtr AuthFactory::create(const std::string& pluginNameOrDynamicLibPath) { - ParamMap params; - return AuthFactory::create(pluginNameOrDynamicLibPath, params); -} - -std::mutex mutex; -std::vector AuthFactory::loadedLibrariesHandles_; -bool AuthFactory::isShutdownHookRegistered_ = false; - -void AuthFactory::release_handles() { - std::lock_guard lock(mutex); - for (std::vector::iterator ite = AuthFactory::loadedLibrariesHandles_.begin(); - ite != AuthFactory::loadedLibrariesHandles_.end(); ite++) { - dlclose(*ite); - } - loadedLibrariesHandles_.clear(); -} - -AuthenticationPtr tryCreateBuiltinAuth(const std::string& pluginName, ParamMap& paramMap) { - if (boost::iequals(pluginName, TLS_PLUGIN_NAME) || boost::iequals(pluginName, TLS_JAVA_PLUGIN_NAME)) { - return AuthTls::create(paramMap); - } else if (boost::iequals(pluginName, TOKEN_PLUGIN_NAME) || - boost::iequals(pluginName, TOKEN_JAVA_PLUGIN_NAME)) { - return AuthToken::create(paramMap); - } else if (boost::iequals(pluginName, ATHENZ_PLUGIN_NAME) || - boost::iequals(pluginName, ATHENZ_JAVA_PLUGIN_NAME)) { - return AuthAthenz::create(paramMap); - } else if (boost::iequals(pluginName, OAUTH2_TOKEN_PLUGIN_NAME) || - boost::iequals(pluginName, OAUTH2_TOKEN_JAVA_PLUGIN_NAME)) { - return AuthOauth2::create(paramMap); - } else if (boost::iequals(pluginName, BASIC_PLUGIN_NAME) || - boost::iequals(pluginName, BASIC_JAVA_PLUGIN_NAME)) { - return AuthBasic::create(paramMap); - } else { - return AuthenticationPtr(); - } -} - -AuthenticationPtr tryCreateBuiltinAuth(const std::string& pluginName, const std::string& authParamsString) { - if (boost::iequals(pluginName, TLS_PLUGIN_NAME) || boost::iequals(pluginName, TLS_JAVA_PLUGIN_NAME)) { - return AuthTls::create(authParamsString); - } else if (boost::iequals(pluginName, TOKEN_PLUGIN_NAME) || - boost::iequals(pluginName, TOKEN_JAVA_PLUGIN_NAME)) { - return AuthToken::create(authParamsString); - } else if (boost::iequals(pluginName, ATHENZ_PLUGIN_NAME) || - boost::iequals(pluginName, ATHENZ_JAVA_PLUGIN_NAME)) { - return AuthAthenz::create(authParamsString); - } else if (boost::iequals(pluginName, OAUTH2_TOKEN_PLUGIN_NAME) || - boost::iequals(pluginName, OAUTH2_TOKEN_JAVA_PLUGIN_NAME)) { - return AuthOauth2::create(authParamsString); - } else if (boost::iequals(pluginName, BASIC_PLUGIN_NAME) || - boost::iequals(pluginName, BASIC_JAVA_PLUGIN_NAME)) { - return AuthBasic::create(authParamsString); - } else { - return AuthenticationPtr(); - } -} - -AuthenticationPtr AuthFactory::create(const std::string& pluginNameOrDynamicLibPath, - const std::string& authParamsString) { - { - std::lock_guard lock(mutex); - if (!AuthFactory::isShutdownHookRegistered_) { - atexit(release_handles); - AuthFactory::isShutdownHookRegistered_ = true; - } - } - - AuthenticationPtr authPtr = tryCreateBuiltinAuth(pluginNameOrDynamicLibPath, authParamsString); - if (authPtr) { - return authPtr; - } - - Authentication* auth = NULL; - void* handle = dlopen(pluginNameOrDynamicLibPath.c_str(), RTLD_LAZY); - if (handle != NULL) { - { - std::lock_guard lock(mutex); - loadedLibrariesHandles_.push_back(handle); - } - Authentication* (*createAuthentication)(const std::string&); - *(void**)(&createAuthentication) = dlsym(handle, "create"); - if (createAuthentication != NULL) { - auth = createAuthentication(authParamsString); - } else { - ParamMap paramMap = Authentication::parseDefaultFormatAuthParams(authParamsString); - return AuthFactory::create(pluginNameOrDynamicLibPath, paramMap); - } - } - if (!auth) { - LOG_WARN("Couldn't load auth plugin " << pluginNameOrDynamicLibPath); - } - return AuthenticationPtr(auth); -} - -AuthenticationPtr AuthFactory::create(const std::string& pluginNameOrDynamicLibPath, ParamMap& params) { - { - std::lock_guard lock(mutex); - if (!AuthFactory::isShutdownHookRegistered_) { - atexit(release_handles); - AuthFactory::isShutdownHookRegistered_ = true; - } - } - - AuthenticationPtr authPtr = tryCreateBuiltinAuth(pluginNameOrDynamicLibPath, params); - if (authPtr) { - return authPtr; - } - - Authentication* auth = NULL; - void* handle = dlopen(pluginNameOrDynamicLibPath.c_str(), RTLD_LAZY); - if (handle != NULL) { - std::lock_guard lock(mutex); - loadedLibrariesHandles_.push_back(handle); - Authentication* (*createAuthentication)(ParamMap&); - *(void**)(&createAuthentication) = dlsym(handle, "createFromMap"); - if (createAuthentication != NULL) { - auth = createAuthentication(params); - } - } - if (!auth) { - LOG_WARN("Couldn't load auth plugin " << pluginNameOrDynamicLibPath); - } - - return AuthenticationPtr(auth); -} diff --git a/pulsar-client-cpp/lib/Backoff.cc b/pulsar-client-cpp/lib/Backoff.cc deleted file mode 100644 index 790d3f87ed664..0000000000000 --- a/pulsar-client-cpp/lib/Backoff.cc +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "Backoff.h" -#include -#include -#include /* time */ - -namespace pulsar { - -Backoff::Backoff(const TimeDuration& initial, const TimeDuration& max, const TimeDuration& mandatoryStop) - : initial_(initial), max_(max), next_(initial), mandatoryStop_(mandatoryStop), rng_(time(NULL)) {} - -TimeDuration Backoff::next() { - TimeDuration current = next_; - next_ = std::min(next_ * 2, max_); - - // Check for mandatory stop - if (!mandatoryStopMade_) { - const boost::posix_time::ptime& now = boost::posix_time::microsec_clock::universal_time(); - TimeDuration timeElapsedSinceFirstBackoff = boost::posix_time::milliseconds(0); - if (initial_ == current) { - firstBackoffTime_ = now; - } else { - timeElapsedSinceFirstBackoff = now - firstBackoffTime_; - } - if (timeElapsedSinceFirstBackoff + current > mandatoryStop_) { - current = std::max(initial_, mandatoryStop_ - timeElapsedSinceFirstBackoff); - mandatoryStopMade_ = true; - } - } - // Add Randomness - boost::random::uniform_int_distribution dist; - int randomNumber = dist(rng_); - - current = current - (current * (randomNumber % 10) / 100); - return std::max(initial_, current); -} - -void Backoff::reset() { - next_ = initial_; - mandatoryStopMade_ = false; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Backoff.h b/pulsar-client-cpp/lib/Backoff.h deleted file mode 100644 index 93b97adf401e4..0000000000000 --- a/pulsar-client-cpp/lib/Backoff.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _PULSAR_BACKOFF_HEADER_ -#define _PULSAR_BACKOFF_HEADER_ -#include -#include -#include - -namespace pulsar { - -typedef boost::posix_time::time_duration TimeDuration; - -class PULSAR_PUBLIC Backoff { - public: - Backoff(const TimeDuration&, const TimeDuration&, const TimeDuration&); - TimeDuration next(); - void reset(); - - private: - const TimeDuration initial_; - const TimeDuration max_; - TimeDuration next_; - TimeDuration mandatoryStop_; - boost::posix_time::ptime firstBackoffTime_; - boost::random::mt19937 rng_; - bool mandatoryStopMade_ = false; - - friend class PulsarFriend; -}; -} // namespace pulsar - -#endif //_PULSAR_BACKOFF_HEADER_ diff --git a/pulsar-client-cpp/lib/BatchAcknowledgementTracker.cc b/pulsar-client-cpp/lib/BatchAcknowledgementTracker.cc deleted file mode 100644 index 3d6d9208adc46..0000000000000 --- a/pulsar-client-cpp/lib/BatchAcknowledgementTracker.cc +++ /dev/null @@ -1,176 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "BatchAcknowledgementTracker.h" - -namespace pulsar { -DECLARE_LOG_OBJECT() - -BatchAcknowledgementTracker::BatchAcknowledgementTracker(const std::string topic, - const std::string subscription, - const long consumerId) - : greatestCumulativeAckSent_() { - std::stringstream consumerStrStream; - consumerStrStream << "BatchAcknowledgementTracker for [" << topic << ", " << subscription << ", " - << consumerId << "] "; - name_ = consumerStrStream.str(); - LOG_DEBUG(name_ << "Constructed BatchAcknowledgementTracker"); -} - -void BatchAcknowledgementTracker::clear() { - Lock lock(mutex_); - trackerMap_.clear(); - sendList_.clear(); -} - -void BatchAcknowledgementTracker::receivedMessage(const Message& message) { - // ignore message if it is not a batch message - if (!message.impl_->metadata.has_num_messages_in_batch()) { - return; - } - Lock lock(mutex_); - MessageId msgID = message.impl_->messageId; - - // ignore message if it is less than the last cumulative ack sent or messageID is already being tracked - TrackerMap::iterator pos = trackerMap_.find(msgID); - if (msgID < greatestCumulativeAckSent_ || pos != trackerMap_.end() || - std::find(sendList_.begin(), sendList_.end(), msgID) != sendList_.end()) { - return; - } - LOG_DEBUG("Initializing the trackerMap_ with Message ID = " - << msgID << " -- Map size: " << trackerMap_.size() << " -- List size: " << sendList_.size()); - - // Since dynamic_set (this version) doesn't have all() function, initializing all bits with 1 and then - // reseting them to 0 and using any() - trackerMap_.insert( - pos, - TrackerPair(msgID, boost::dynamic_bitset<>(message.impl_->metadata.num_messages_in_batch()).set())); -} - -void BatchAcknowledgementTracker::deleteAckedMessage(const MessageId& messageId, - proto::CommandAck_AckType ackType) { - // Not a batch message and a individual ack - if (messageId.batchIndex() == -1 && ackType == proto::CommandAck_AckType_Individual) { - return; - } - - MessageId batchMessageId = - MessageId(messageId.partition(), messageId.ledgerId(), messageId.entryId(), -1 /* Batch index */); - - Lock lock(mutex_); - if (ackType == proto::CommandAck_AckType_Cumulative) { - // delete from trackerMap and sendList all messageIDs less than or equal to this one - // equal to - since getGreatestCumulativeAckReady already gives us the exact message id to be acked - - TrackerMap::iterator it = trackerMap_.begin(); - TrackerMapRemoveCriteria criteria(messageId); - while (it != trackerMap_.end()) { - if (criteria(*it)) { - trackerMap_.erase(it++); - } else { - ++it; - } - } - - // std::remove shifts all to be deleted items to the end of the vector and returns an iterator to the - // first - // instance of item, then we erase all elements from this iterator to the end of the list - sendList_.erase( - std::remove_if(sendList_.begin(), sendList_.end(), SendRemoveCriteria(batchMessageId)), - sendList_.end()); - - if (greatestCumulativeAckSent_ < messageId) { - greatestCumulativeAckSent_ = messageId; - LOG_DEBUG(*this << " The greatestCumulativeAckSent_ is now " << greatestCumulativeAckSent_); - } - } else { - // Error - if it is a batch message and found in trackerMap_ - if (trackerMap_.find(messageId) != trackerMap_.end()) { - LOG_ERROR(*this << " - This should not happened - Message should have been removed from " - "trakerMap_ and moved to sendList_ " - << messageId); - } - - sendList_.erase(std::remove(sendList_.begin(), sendList_.end(), batchMessageId), sendList_.end()); - } -} - -bool BatchAcknowledgementTracker::isBatchReady(const MessageId& msgID, - const proto::CommandAck_AckType ackType) { - Lock lock(mutex_); - // Remove batch index - MessageId batchMessageId = - MessageId(msgID.partition(), msgID.ledgerId(), msgID.entryId(), -1 /* Batch index */); - - TrackerMap::iterator pos = trackerMap_.find(batchMessageId); - if (pos == trackerMap_.end() || - std::find(sendList_.begin(), sendList_.end(), batchMessageId) != sendList_.end()) { - LOG_DEBUG( - "Batch is ready since message present in sendList_ or not present in trackerMap_ [message ID = " - << batchMessageId << "]"); - return true; - } - - int batchIndex = msgID.batchIndex(); - assert(batchIndex < pos->second.size()); - pos->second.set(batchIndex, false); - - if (ackType == proto::CommandAck_AckType_Cumulative) { - for (int i = 0; i < batchIndex; i++) { - pos->second.set(i, false); - } - } - - if (pos->second.any()) { - return false; - } - sendList_.push_back(batchMessageId); - trackerMap_.erase(pos); - LOG_DEBUG("Batch is ready since message all bits are reset in trackerMap_ [message ID = " << msgID - << "]"); - return true; -} - -// returns -// - a batch message id < messageId -// - same messageId if it is the last message in the batch -const MessageId BatchAcknowledgementTracker::getGreatestCumulativeAckReady(const MessageId& messageId) { - Lock lock(mutex_); - - // Remove batch index - MessageId batchMessageId = - MessageId(messageId.partition(), messageId.ledgerId(), messageId.entryId(), -1 /* Batch index */); - TrackerMap::iterator pos = trackerMap_.find(batchMessageId); - - // element not found - if (pos == trackerMap_.end()) { - return MessageId(); - } - - if (pos->second.size() - 1 != messageId.batchIndex()) { - // Can't cumulatively ack this batch message - if (pos == trackerMap_.begin()) { - // This was the first message hence we can't decrement the iterator - return MessageId(); - } - pos--; - } - - return pos->first; -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/BatchAcknowledgementTracker.h b/pulsar-client-cpp/lib/BatchAcknowledgementTracker.h deleted file mode 100644 index 6a709b3e2a16b..0000000000000 --- a/pulsar-client-cpp/lib/BatchAcknowledgementTracker.h +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_BATCHACKNOWLEDGEMENTTRACKER_H_ -#define LIB_BATCHACKNOWLEDGEMENTTRACKER_H_ - -#include "MessageImpl.h" -#include -#include -#include -#include -#include -#include "LogUtils.h" -#include -#include -namespace pulsar { - -class ConsumerImpl; - -class BatchAcknowledgementTracker { - private: - typedef std::unique_lock Lock; - typedef std::pair > TrackerPair; - typedef std::map > TrackerMap; - std::mutex mutex_; - - TrackerMap trackerMap_; - - // SendList is used to reduce the time required to go over the dynamic_bitset and check if the entire - // batch is acked. - // It is useful in cases where the entire batch is acked but cnx is broken. In this case when any of the - // batch index - // is acked again, we just check the sendList to verify that the batch is acked w/o iterating over the - // dynamic_bitset. - std::vector sendList_; - - // we don't need to track MessageId < greatestCumulativeAckReceived - MessageId greatestCumulativeAckSent_; - std::string name_; - - public: - BatchAcknowledgementTracker(const std::string topic, const std::string subscription, - const long consumerId); - - bool isBatchReady(const MessageId& msgID, const proto::CommandAck_AckType ackType); - const MessageId getGreatestCumulativeAckReady(const MessageId& messageId); - - void deleteAckedMessage(const MessageId& messageId, proto::CommandAck_AckType ackType); - void receivedMessage(const Message& message); - - void clear(); - - inline friend std::ostream& operator<<(std::ostream& os, - const BatchAcknowledgementTracker& batchAcknowledgementTracker); - - // Used for Cumulative acks only - struct SendRemoveCriteria { - private: - const MessageId& messageId_; - - public: - SendRemoveCriteria(const MessageId& messageId) : messageId_(messageId) {} - - bool operator()(const MessageId& element) const { return (element <= messageId_); } - }; - - // Used for Cumulative acks only - struct TrackerMapRemoveCriteria { - private: - const MessageId& messageId_; - - public: - TrackerMapRemoveCriteria(const MessageId& messageId) : messageId_(messageId) {} - - bool operator()(std::pair >& element) const { - return (element.first <= messageId_); - } - }; -}; - -std::ostream& operator<<(std::ostream& os, const BatchAcknowledgementTracker& batchAcknowledgementTracker) { - os << "{ " << batchAcknowledgementTracker.name_ << " [greatestCumulativeAckReceived_-" - << batchAcknowledgementTracker.greatestCumulativeAckSent_ - << "] [trackerMap size = " << batchAcknowledgementTracker.trackerMap_.size() << " ]}"; - return os; -} -} // namespace pulsar - -#endif /* LIB_BATCHACKNOWLEDGEMENTTRACKER_H_ */ diff --git a/pulsar-client-cpp/lib/BatchMessageContainer.cc b/pulsar-client-cpp/lib/BatchMessageContainer.cc deleted file mode 100644 index e25b72ee268e8..0000000000000 --- a/pulsar-client-cpp/lib/BatchMessageContainer.cc +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "BatchMessageContainer.h" -#include "ClientConnection.h" -#include "Commands.h" -#include "LogUtils.h" -#include "MessageImpl.h" -#include "ProducerImpl.h" -#include "TimeUtils.h" -#include - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -BatchMessageContainer::BatchMessageContainer(const ProducerImpl& producer) - : BatchMessageContainerBase(producer) {} - -BatchMessageContainer::~BatchMessageContainer() { - LOG_DEBUG(*this << " destructed"); - LOG_DEBUG("[numberOfBatchesSent = " << numberOfBatchesSent_ - << "] [averageBatchSize_ = " << averageBatchSize_ << "]"); -} - -bool BatchMessageContainer::add(const Message& msg, const SendCallback& callback) { - LOG_DEBUG("Before add: " << *this << " [message = " << msg << "]"); - batch_.add(msg, callback); - updateStats(msg); - LOG_DEBUG("After add: " << *this); - return isFull(); -} - -void BatchMessageContainer::clear() { - averageBatchSize_ = - (batch_.size() + averageBatchSize_ * numberOfBatchesSent_) / (numberOfBatchesSent_ + 1); - numberOfBatchesSent_++; - batch_.clear(); - resetStats(); - LOG_DEBUG(*this << " clear() called"); -} - -Result BatchMessageContainer::createOpSendMsg(OpSendMsg& opSendMsg, - const FlushCallback& flushCallback) const { - return createOpSendMsgHelper(opSendMsg, flushCallback, batch_); -} - -std::vector BatchMessageContainer::createOpSendMsgs(std::vector& opSendMsgs, - const FlushCallback& flushCallback) const { - throw std::runtime_error("createOpSendMsgs is not supported for BatchMessageContainer"); -} - -void BatchMessageContainer::serialize(std::ostream& os) const { - os << "{ BatchMessageContainer [size = " << numMessages_ // - << "] [bytes = " << sizeInBytes_ // - << "] [maxSize = " << getMaxNumMessages() // - << "] [maxBytes = " << getMaxSizeInBytes() // - << "] [topicName = " << topicName_ // - << "] [numberOfBatchesSent_ = " << numberOfBatchesSent_ // - << "] [averageBatchSize_ = " << averageBatchSize_ // - << "] }"; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/BatchMessageContainer.h b/pulsar-client-cpp/lib/BatchMessageContainer.h deleted file mode 100644 index cd8a62cbe9c2f..0000000000000 --- a/pulsar-client-cpp/lib/BatchMessageContainer.h +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * \class BatchMessageContainer - * - * \brief This class is a container for holding individual messages being published until they are batched and - * sent to broker. - * - * \note This class is not thread safe. - */ - -#ifndef LIB_BATCHMESSAGECONTAINER_H_ -#define LIB_BATCHMESSAGECONTAINER_H_ - -#include "BatchMessageContainerBase.h" -#include "MessageAndCallbackBatch.h" - -namespace pulsar { - -class BatchMessageContainer : public BatchMessageContainerBase { - public: - BatchMessageContainer(const ProducerImpl& producer); - - ~BatchMessageContainer(); - - size_t getNumBatches() const override { return 1; } - - bool isFirstMessageToAdd(const Message& msg) const override { return batch_.empty(); } - - bool add(const Message& msg, const SendCallback& callback) override; - - void clear() override; - - Result createOpSendMsg(OpSendMsg& opSendMsg, const FlushCallback& flushCallback) const override; - - std::vector createOpSendMsgs(std::vector& opSendMsgs, - const FlushCallback& flushCallback) const override; - - void serialize(std::ostream& os) const override; - - private: - MessageAndCallbackBatch batch_; - size_t numberOfBatchesSent_ = 0; - double averageBatchSize_ = 0; -}; - -} // namespace pulsar -#endif /* LIB_BATCHMESSAGECONTAINER_H_ */ diff --git a/pulsar-client-cpp/lib/BatchMessageContainerBase.cc b/pulsar-client-cpp/lib/BatchMessageContainerBase.cc deleted file mode 100644 index e9e6b987a107e..0000000000000 --- a/pulsar-client-cpp/lib/BatchMessageContainerBase.cc +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "BatchMessageContainerBase.h" -#include "MessageCrypto.h" -#include "MessageImpl.h" -#include "ProducerImpl.h" -#include "SharedBuffer.h" -#include "PulsarApi.pb.h" - -namespace pulsar { - -BatchMessageContainerBase::BatchMessageContainerBase(const ProducerImpl& producer) - : topicName_(producer.topic_), - producerConfig_(producer.conf_), - producerName_(producer.producerName_), - producerId_(producer.producerId_), - msgCryptoWeakPtr_(producer.msgCrypto_) {} - -Result BatchMessageContainerBase::createOpSendMsgHelper(OpSendMsg& opSendMsg, - const FlushCallback& flushCallback, - const MessageAndCallbackBatch& batch) const { - opSendMsg.sendCallback_ = batch.createSendCallback(); - opSendMsg.messagesCount_ = batch.messagesCount(); - opSendMsg.messagesSize_ = batch.messagesSize(); - - if (flushCallback) { - auto sendCallback = opSendMsg.sendCallback_; - opSendMsg.sendCallback_ = [sendCallback, flushCallback](Result result, const MessageId& id) { - sendCallback(result, id); - flushCallback(result); - }; - } - - if (batch.empty()) { - return ResultOperationNotSupported; - } - - MessageImplPtr impl = batch.msgImpl(); - impl->metadata.set_num_messages_in_batch(batch.size()); - auto compressionType = producerConfig_.getCompressionType(); - if (compressionType != CompressionNone) { - impl->metadata.set_compression(CompressionCodecProvider::convertType(compressionType)); - impl->metadata.set_uncompressed_size(impl->payload.readableBytes()); - } - impl->payload = CompressionCodecProvider::getCodec(compressionType).encode(impl->payload); - - auto msgCrypto = msgCryptoWeakPtr_.lock(); - if (msgCrypto && producerConfig_.isEncryptionEnabled()) { - SharedBuffer encryptedPayload; - if (!msgCrypto->encrypt(producerConfig_.getEncryptionKeys(), producerConfig_.getCryptoKeyReader(), - impl->metadata, impl->payload, encryptedPayload)) { - return ResultCryptoError; - } - impl->payload = encryptedPayload; - } - - if (impl->payload.readableBytes() > ClientConnection::getMaxMessageSize()) { - return ResultMessageTooBig; - } - - opSendMsg.metadata_ = impl->metadata; - opSendMsg.payload_ = impl->payload; - opSendMsg.sequenceId_ = impl->metadata.sequence_id(); - opSendMsg.producerId_ = producerId_; - opSendMsg.timeout_ = TimeUtils::now() + milliseconds(producerConfig_.getSendTimeout()); - - return ResultOk; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/BatchMessageContainerBase.h b/pulsar-client-cpp/lib/BatchMessageContainerBase.h deleted file mode 100644 index 71eef5fab6287..0000000000000 --- a/pulsar-client-cpp/lib/BatchMessageContainerBase.h +++ /dev/null @@ -1,193 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_BATCHMESSAGECONTAINERBASE_H_ -#define LIB_BATCHMESSAGECONTAINERBASE_H_ - -#include -#include -#include -#include - -#include -#include - -#include - -#include "MessageAndCallbackBatch.h" -#include "OpSendMsg.h" - -namespace pulsar { - -class MessageCrypto; -class ProducerImpl; -class SharedBuffer; - -namespace proto { -class MessageMetadata; -} // namespace proto - -class BatchMessageContainerBase : public boost::noncopyable { - public: - BatchMessageContainerBase(const ProducerImpl& producer); - - virtual ~BatchMessageContainerBase() {} - - /** - * Get number of batches in the batch message container - * - * @return number of batches - */ - virtual size_t getNumBatches() const = 0; - - /** - * Check the message will be the 1st message to be added to the batch - * - * This method is used to check if the reversed spot should be released. Because we won't released the - * reserved spot for 1st message. The released spot is to contain the whole batched message. - * - * @param msg the message to be added to the batch - * @return true if `msg` is the 1st message to be added to the batch - */ - virtual bool isFirstMessageToAdd(const Message& msg) const = 0; - - /** - * Add a message to the batch message container - * - * @param msg message will add to the batch message container - * @param callback message send callback - * @return true if the batch is full, otherwise false - */ - virtual bool add(const Message& msg, const SendCallback& callback) = 0; - - /** - * Clear the batch message container - */ - virtual void clear() = 0; - - /** - * Create a OpSendMsg object to send - * - * @param opSendMsg the OpSendMsg object to create - * @param flushCallback the callback to trigger after the OpSendMsg was completed - * @return ResultOk if create successfully - * @note OpSendMsg's sendCallback_ must be set even if it failed - */ - virtual Result createOpSendMsg(OpSendMsg& opSendMsg, - const FlushCallback& flushCallback = nullptr) const = 0; - - /** - * Create a OpSendMsg list to send - * - * @param opSendMsgList the OpSendMsg list to create - * @param flushCallback the callback to trigger after the OpSendMsg was completed - * @return all create results of `opSendMsgs`, ResultOk means create successfully - * @note OpSendMsg's sendCallback_ must be set even if it failed - */ - virtual std::vector createOpSendMsgs(std::vector& opSendMsgs, - const FlushCallback& flushCallback = nullptr) const = 0; - - /** - * Serialize into a std::ostream for logging - * - * @param os the std::ostream to serialize current batch container - */ - virtual void serialize(std::ostream& os) const = 0; - - bool hasEnoughSpace(const Message& msg) const noexcept; - bool isEmpty() const noexcept; - - void processAndClear(std::function opSendMsgCallback, - FlushCallback flushCallback); - - protected: - // references to ProducerImpl's fields - const std::string& topicName_; - const ProducerConfiguration& producerConfig_; - const std::string& producerName_; - const uint64_t& producerId_; - const std::weak_ptr msgCryptoWeakPtr_; - - unsigned int getMaxNumMessages() const noexcept { return producerConfig_.getBatchingMaxMessages(); } - unsigned long getMaxSizeInBytes() const noexcept { - return producerConfig_.getBatchingMaxAllowedSizeInBytes(); - } - - unsigned int numMessages_ = 0; - unsigned long sizeInBytes_ = 0; - - bool isFull() const noexcept; - - void updateStats(const Message& msg); - void resetStats(); - - Result createOpSendMsgHelper(OpSendMsg& opSendMsg, const FlushCallback& flushCallback, - const MessageAndCallbackBatch& batch) const; -}; - -inline bool BatchMessageContainerBase::hasEnoughSpace(const Message& msg) const noexcept { - return (numMessages_ < getMaxNumMessages()) && (sizeInBytes_ + msg.getLength() <= getMaxSizeInBytes()); -} - -inline bool BatchMessageContainerBase::isFull() const noexcept { - return (numMessages_ >= getMaxNumMessages()) || (sizeInBytes_ >= getMaxSizeInBytes()); -} - -inline bool BatchMessageContainerBase::isEmpty() const noexcept { return numMessages_ == 0; } - -inline void BatchMessageContainerBase::updateStats(const Message& msg) { - numMessages_++; - sizeInBytes_ += msg.getLength(); -} - -inline void BatchMessageContainerBase::resetStats() { - numMessages_ = 0; - sizeInBytes_ = 0; -} - -inline void BatchMessageContainerBase::processAndClear( - std::function opSendMsgCallback, FlushCallback flushCallback) { - if (isEmpty()) { - if (flushCallback) { - flushCallback(ResultOk); - } - } else { - const auto numBatches = getNumBatches(); - if (numBatches == 1) { - OpSendMsg opSendMsg; - Result result = createOpSendMsg(opSendMsg, flushCallback); - opSendMsgCallback(result, opSendMsg); - } else if (numBatches > 1) { - std::vector opSendMsgs; - std::vector results = createOpSendMsgs(opSendMsgs, flushCallback); - for (size_t i = 0; i < results.size(); i++) { - opSendMsgCallback(results[i], opSendMsgs[i]); - } - } // else numBatches is 0, do nothing - } - clear(); -} - -inline std::ostream& operator<<(std::ostream& os, const BatchMessageContainerBase& container) { - container.serialize(os); - return os; -} - -} // namespace pulsar - -#endif // LIB_BATCHMESSAGECONTAINERBASE_H_ diff --git a/pulsar-client-cpp/lib/BatchMessageKeyBasedContainer.cc b/pulsar-client-cpp/lib/BatchMessageKeyBasedContainer.cc deleted file mode 100644 index 7441a3e4ad060..0000000000000 --- a/pulsar-client-cpp/lib/BatchMessageKeyBasedContainer.cc +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "BatchMessageKeyBasedContainer.h" -#include "ClientConnection.h" -#include "Commands.h" -#include "LogUtils.h" -#include "MessageImpl.h" -#include "ProducerImpl.h" -#include "TimeUtils.h" - -#include -#include - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -inline std::string getKey(const Message& msg) { - return msg.hasOrderingKey() ? msg.getOrderingKey() : msg.getPartitionKey(); -} - -BatchMessageKeyBasedContainer::BatchMessageKeyBasedContainer(const ProducerImpl& producer) - : BatchMessageContainerBase(producer) {} - -BatchMessageKeyBasedContainer::~BatchMessageKeyBasedContainer() { - LOG_DEBUG(*this << " destructed"); - LOG_INFO("[numberOfBatchesSent = " << numberOfBatchesSent_ - << "] [averageBatchSize_ = " << averageBatchSize_ << "]"); -} - -bool BatchMessageKeyBasedContainer::isFirstMessageToAdd(const Message& msg) const { - auto it = batches_.find(getKey(msg)); - if (it == batches_.end()) { - return true; - } else { - return it->second.empty(); - } -} - -bool BatchMessageKeyBasedContainer::add(const Message& msg, const SendCallback& callback) { - LOG_DEBUG("Before add: " << *this << " [message = " << msg << "]"); - batches_[getKey(msg)].add(msg, callback); - updateStats(msg); - LOG_DEBUG("After add: " << *this); - return isFull(); -} - -void BatchMessageKeyBasedContainer::clear() { - averageBatchSize_ = - (numMessages_ + averageBatchSize_ * numberOfBatchesSent_) / (numberOfBatchesSent_ + batches_.size()); - numberOfBatchesSent_ += batches_.size(); - batches_.clear(); - resetStats(); - LOG_DEBUG(*this << " clear() called"); -} - -Result BatchMessageKeyBasedContainer::createOpSendMsg(OpSendMsg& opSendMsg, - const FlushCallback& flushCallback) const { - if (batches_.size() < 1) { - return ResultOperationNotSupported; - } - return createOpSendMsgHelper(opSendMsg, flushCallback, batches_.begin()->second); -} - -std::vector BatchMessageKeyBasedContainer::createOpSendMsgs( - std::vector& opSendMsgs, const FlushCallback& flushCallback) const { - // Sorted the batches by sequence id - std::vector sortedBatches; - for (const auto& kv : batches_) { - sortedBatches.emplace_back(&kv.second); - } - std::sort(sortedBatches.begin(), sortedBatches.end(), - [](const MessageAndCallbackBatch* lhs, const MessageAndCallbackBatch* rhs) { - return lhs->sequenceId() < rhs->sequenceId(); - }); - - size_t numBatches = sortedBatches.size(); - opSendMsgs.resize(numBatches); - - std::vector results(numBatches); - for (size_t i = 0; i + 1 < numBatches; i++) { - results[i] = createOpSendMsgHelper(opSendMsgs[i], nullptr, *sortedBatches[i]); - } - if (numBatches > 0) { - // Add flush callback to the last batch - results.back() = createOpSendMsgHelper(opSendMsgs.back(), flushCallback, *sortedBatches.back()); - } - return results; -} - -void BatchMessageKeyBasedContainer::serialize(std::ostream& os) const { - os << "{ BatchMessageKeyBasedContainer [size = " << numMessages_ // - << "] [bytes = " << sizeInBytes_ // - << "] [maxSize = " << getMaxNumMessages() // - << "] [maxBytes = " << getMaxSizeInBytes() // - << "] [topicName = " << topicName_ // - << "] [numberOfBatchesSent_ = " << numberOfBatchesSent_ // - << "] [averageBatchSize_ = " << averageBatchSize_ // - << "]"; - - std::map sortedBatches; - for (const auto& kv : batches_) { - sortedBatches.emplace(kv.first, &kv.second); - } - for (const auto& kv : sortedBatches) { - const auto& key = kv.first; - const auto& batch = *(kv.second); - os << "\n key: " << key << " | numMessages: " << batch.size(); - } - os << " }"; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/BatchMessageKeyBasedContainer.h b/pulsar-client-cpp/lib/BatchMessageKeyBasedContainer.h deleted file mode 100644 index f580a05343b92..0000000000000 --- a/pulsar-client-cpp/lib/BatchMessageKeyBasedContainer.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_BATCHMESSAGEKEYBASEDCONTAINER_H_ -#define LIB_BATCHMESSAGEKEYBASEDCONTAINER_H_ - -#include - -#include "BatchMessageContainerBase.h" -#include "MessageAndCallbackBatch.h" - -namespace pulsar { - -class BatchMessageKeyBasedContainer : public BatchMessageContainerBase { - public: - BatchMessageKeyBasedContainer(const ProducerImpl& producer); - - ~BatchMessageKeyBasedContainer(); - - size_t getNumBatches() const override { return batches_.size(); } - - bool isFirstMessageToAdd(const Message& msg) const override; - - bool add(const Message& msg, const SendCallback& callback) override; - - void clear() override; - - Result createOpSendMsg(OpSendMsg& opSendMsg, const FlushCallback& flushCallback) const override; - - std::vector createOpSendMsgs(std::vector& opSendMsgs, - const FlushCallback& flushCallback) const override; - - void serialize(std::ostream& os) const override; - - private: - // key: message key, ordering key has higher priority than partitioned key - std::unordered_map batches_; - size_t numberOfBatchesSent_ = 0; - double averageBatchSize_ = 0; - - Result createOpSendMsg(OpSendMsg& opSendMsg, const FlushCallback& flushCallback, - MessageAndCallbackBatch& batch) const; -}; - -} // namespace pulsar - -#endif diff --git a/pulsar-client-cpp/lib/BinaryProtoLookupService.cc b/pulsar-client-cpp/lib/BinaryProtoLookupService.cc deleted file mode 100644 index ff42b91b3f748..0000000000000 --- a/pulsar-client-cpp/lib/BinaryProtoLookupService.cc +++ /dev/null @@ -1,185 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "BinaryProtoLookupService.h" -#include "SharedBuffer.h" - -#include - -#include "ConnectionPool.h" - -#include - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -auto BinaryProtoLookupService::getBroker(const TopicName& topicName) -> LookupResultFuture { - return findBroker(serviceNameResolver_.resolveHost(), false, topicName.toString()); -} - -auto BinaryProtoLookupService::findBroker(const std::string& address, bool authoritative, - const std::string& topic) -> LookupResultFuture { - LOG_DEBUG("find broker from " << address << ", authoritative: " << authoritative << ", topic: " << topic); - auto promise = std::make_shared>(); - // NOTE: we can use move capture for topic since C++14 - cnxPool_.getConnectionAsync(address).addListener([this, promise, topic, address]( - Result result, - const ClientConnectionWeakPtr& weakCnx) { - if (result != ResultOk) { - promise->setFailed(result); - return; - } - auto cnx = weakCnx.lock(); - if (!cnx) { - LOG_ERROR("Connection to " << address << " is expired before lookup"); - promise->setFailed(ResultNotConnected); - return; - } - auto lookupPromise = std::make_shared(); - cnx->newTopicLookup(topic, false, listenerName_, newRequestId(), lookupPromise); - lookupPromise->getFuture().addListener([this, cnx, promise, topic, address]( - Result result, const LookupDataResultPtr& data) { - if (result != ResultOk || !data) { - LOG_ERROR("Lookup failed for " << topic << ", result " << result); - promise->setFailed(result); - return; - } - - const auto responseBrokerAddress = - (serviceNameResolver_.useTls() ? data->getBrokerUrlTls() : data->getBrokerUrl()); - if (data->isRedirect()) { - LOG_DEBUG("Lookup request is for " << topic << " redirected to " << responseBrokerAddress); - findBroker(responseBrokerAddress, data->isAuthoritative(), topic) - .addListener([promise](Result result, const LookupResult& value) { - if (result == ResultOk) { - promise->setValue(value); - } else { - promise->setFailed(result); - } - }); - } else { - LOG_DEBUG("Lookup response for " << topic << ", lookup-broker-url " << data->getBrokerUrl()); - if (data->shouldProxyThroughServiceUrl()) { - // logicalAddress is the proxy's address, we should still connect through proxy - promise->setValue({responseBrokerAddress, address}); - } else { - promise->setValue({responseBrokerAddress, responseBrokerAddress}); - } - } - }); - }); - return promise->getFuture(); -} - -/* - * @param topicName topic to get number of partitions. - * - */ -Future BinaryProtoLookupService::getPartitionMetadataAsync( - const TopicNamePtr& topicName) { - LookupDataResultPromisePtr promise = std::make_shared(); - if (!topicName) { - promise->setFailed(ResultInvalidTopicName); - return promise->getFuture(); - } - std::string lookupName = topicName->toString(); - const auto address = serviceNameResolver_.resolveHost(); - cnxPool_.getConnectionAsync(address, address) - .addListener(std::bind(&BinaryProtoLookupService::sendPartitionMetadataLookupRequest, this, - lookupName, std::placeholders::_1, std::placeholders::_2, promise)); - return promise->getFuture(); -} - -void BinaryProtoLookupService::sendPartitionMetadataLookupRequest(const std::string& topicName, Result result, - const ClientConnectionWeakPtr& clientCnx, - LookupDataResultPromisePtr promise) { - if (result != ResultOk) { - promise->setFailed(result); - return; - } - LookupDataResultPromisePtr lookupPromise = std::make_shared(); - ClientConnectionPtr conn = clientCnx.lock(); - uint64_t requestId = newRequestId(); - conn->newPartitionedMetadataLookup(topicName, requestId, lookupPromise); - lookupPromise->getFuture().addListener(std::bind(&BinaryProtoLookupService::handlePartitionMetadataLookup, - this, topicName, std::placeholders::_1, - std::placeholders::_2, clientCnx, promise)); -} - -void BinaryProtoLookupService::handlePartitionMetadataLookup(const std::string& topicName, Result result, - LookupDataResultPtr data, - const ClientConnectionWeakPtr& clientCnx, - LookupDataResultPromisePtr promise) { - if (data) { - LOG_DEBUG("PartitionMetadataLookup response for " << topicName << ", lookup-broker-url " - << data->getBrokerUrl()); - promise->setValue(data); - } else { - LOG_DEBUG("PartitionMetadataLookup failed for " << topicName << ", result " << result); - promise->setFailed(result); - } -} - -uint64_t BinaryProtoLookupService::newRequestId() { - Lock lock(mutex_); - return ++requestIdGenerator_; -} - -Future BinaryProtoLookupService::getTopicsOfNamespaceAsync( - const NamespaceNamePtr& nsName) { - NamespaceTopicsPromisePtr promise = std::make_shared>(); - if (!nsName) { - promise->setFailed(ResultInvalidTopicName); - return promise->getFuture(); - } - std::string namespaceName = nsName->toString(); - cnxPool_.getConnectionAsync(serviceNameResolver_.resolveHost()) - .addListener(std::bind(&BinaryProtoLookupService::sendGetTopicsOfNamespaceRequest, this, - namespaceName, std::placeholders::_1, std::placeholders::_2, promise)); - return promise->getFuture(); -} - -void BinaryProtoLookupService::sendGetTopicsOfNamespaceRequest(const std::string& nsName, Result result, - const ClientConnectionWeakPtr& clientCnx, - NamespaceTopicsPromisePtr promise) { - if (result != ResultOk) { - promise->setFailed(result); - return; - } - - ClientConnectionPtr conn = clientCnx.lock(); - uint64_t requestId = newRequestId(); - LOG_DEBUG("sendGetTopicsOfNamespaceRequest. requestId: " << requestId << " nsName: " << nsName); - - conn->newGetTopicsOfNamespace(nsName, requestId) - .addListener(std::bind(&BinaryProtoLookupService::getTopicsOfNamespaceListener, this, - std::placeholders::_1, std::placeholders::_2, promise)); -} - -void BinaryProtoLookupService::getTopicsOfNamespaceListener(Result result, NamespaceTopicsPtr topicsPtr, - NamespaceTopicsPromisePtr promise) { - if (result != ResultOk) { - promise->setFailed(ResultLookupError); - return; - } - - promise->setValue(topicsPtr); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/BinaryProtoLookupService.h b/pulsar-client-cpp/lib/BinaryProtoLookupService.h deleted file mode 100644 index d068c3d0e646e..0000000000000 --- a/pulsar-client-cpp/lib/BinaryProtoLookupService.h +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _PULSAR_BINARY_LOOKUP_SERVICE_HEADER_ -#define _PULSAR_BINARY_LOOKUP_SERVICE_HEADER_ - -#include -#include -#include -#include "ConnectionPool.h" -#include "Backoff.h" -#include -#include -#include "ServiceNameResolver.h" - -namespace pulsar { -class LookupDataResult; - -class PULSAR_PUBLIC BinaryProtoLookupService : public LookupService { - public: - BinaryProtoLookupService(ServiceNameResolver& serviceNameResolver, ConnectionPool& pool, - const std::string& listenerName) - : serviceNameResolver_(serviceNameResolver), cnxPool_(pool), listenerName_(listenerName) {} - - LookupResultFuture getBroker(const TopicName& topicName) override; - - Future getPartitionMetadataAsync(const TopicNamePtr& topicName) override; - - Future getTopicsOfNamespaceAsync(const NamespaceNamePtr& nsName) override; - - private: - std::mutex mutex_; - uint64_t requestIdGenerator_ = 0; - - ServiceNameResolver& serviceNameResolver_; - ConnectionPool& cnxPool_; - std::string listenerName_; - - // TODO: limit the redirect count, see https://github.com/apache/pulsar/pull/7096 - LookupResultFuture findBroker(const std::string& address, bool authoritative, const std::string& topic); - - void sendPartitionMetadataLookupRequest(const std::string& topicName, Result result, - const ClientConnectionWeakPtr& clientCnx, - LookupDataResultPromisePtr promise); - - void handlePartitionMetadataLookup(const std::string& topicName, Result result, LookupDataResultPtr data, - const ClientConnectionWeakPtr& clientCnx, - LookupDataResultPromisePtr promise); - - void sendGetTopicsOfNamespaceRequest(const std::string& nsName, Result result, - const ClientConnectionWeakPtr& clientCnx, - NamespaceTopicsPromisePtr promise); - - void getTopicsOfNamespaceListener(Result result, NamespaceTopicsPtr topicsPtr, - NamespaceTopicsPromisePtr promise); - - uint64_t newRequestId(); -}; -typedef std::shared_ptr BinaryProtoLookupServicePtr; -} // namespace pulsar - -#endif //_PULSAR_BINARY_LOOKUP_SERVICE_HEADER_ diff --git a/pulsar-client-cpp/lib/BlockingQueue.h b/pulsar-client-cpp/lib/BlockingQueue.h deleted file mode 100644 index d09166fdf26d2..0000000000000 --- a/pulsar-client-cpp/lib/BlockingQueue.h +++ /dev/null @@ -1,193 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_BLOCKINGQUEUE_H_ -#define LIB_BLOCKINGQUEUE_H_ - -#include -#include -#include -#include - -/** - * Following structs are defined for holding a predicate in wait() call on condition variables. - * This is done in order to avoid spurious wake up problem. - * Details: https://www.justsoftwaresolutions.co.uk/threading/condition-variable-spurious-wakes.html - */ -template -struct QueueNotEmpty { - const Container& queue_; - QueueNotEmpty(const Container& queue) : queue_(queue) {} - bool operator()() const { return !queue_.isEmptyNoMutex() || queue_.isClosedNoMutex(); } -}; - -template -struct QueueNotFull { - const Container& queue_; - QueueNotFull(const Container& queue) : queue_(queue) {} - bool operator()() const { return !queue_.isFullNoMutex() || queue_.isClosedNoMutex(); } -}; - -template -class BlockingQueue { - public: - typedef typename boost::circular_buffer Container; - typedef typename Container::iterator iterator; - typedef typename Container::const_iterator const_iterator; - - BlockingQueue(size_t maxSize) : maxSize_(maxSize), mutex_(), queue_(maxSize) {} - - bool push(const T& value) { - Lock lock(mutex_); - - // If the queue is full, wait for space to be available - queueFullCondition.wait(lock, QueueNotFull >(*this)); - - if (isClosedNoMutex()) { - return false; - } - - bool wasEmpty = queue_.empty(); - queue_.push_back(value); - lock.unlock(); - if (wasEmpty) { - // Notify that an element is pushed - queueEmptyCondition.notify_all(); - } - - return true; - } - - bool pop(T& value) { - Lock lock(mutex_); - - // If the queue is empty, wait until an element is available to be popped - queueEmptyCondition.wait(lock, QueueNotEmpty >(*this)); - - if (isClosedNoMutex()) { - return false; - } - - value = queue_.front(); - bool wasFull = isFullNoMutex(); - queue_.pop_front(); - - lock.unlock(); - - if (wasFull) { - // Notify that an element is popped - queueFullCondition.notify_all(); - } - - return true; - } - - template - bool pop(T& value, const Duration& timeout) { - Lock lock(mutex_); - if (!queueEmptyCondition.wait_for(lock, timeout, QueueNotEmpty >(*this))) { - return false; - } - - if (isClosedNoMutex()) { - return false; - } - - bool wasFull = isFullNoMutex(); - value = queue_.front(); - queue_.pop_front(); - lock.unlock(); - - if (wasFull) { - // Notify that an element is popped - queueFullCondition.notify_all(); - } - - return true; - } - - // Check the 1st element of the queue - bool peek(T& value) { - Lock lock(mutex_); - if (queue_.empty()) { - return false; - } - - value = queue_.front(); - return true; - } - - // Remove all elements from the queue - void clear() { - Lock lock(mutex_); - queue_.clear(); - queueFullCondition.notify_all(); - } - - size_t size() const { - Lock lock(mutex_); - return queue_.size(); - } - - size_t maxSize() const { return maxSize_; } - - bool empty() const { - Lock lock(mutex_); - return isEmptyNoMutex(); - } - - bool full() const { - Lock lock(mutex_); - return isFullNoMutex(); - } - - const_iterator begin() const { return queue_.begin(); } - - const_iterator end() const { return queue_.end(); } - - iterator begin() { return queue_.begin(); } - - iterator end() { return queue_.end(); } - - void close() { - Lock lock(mutex_); - isClosed_ = true; - queueEmptyCondition.notify_all(); - queueFullCondition.notify_all(); - } - - private: - bool isEmptyNoMutex() const { return queue_.empty(); } - - bool isFullNoMutex() const { return queue_.size() == maxSize_; } - - bool isClosedNoMutex() const { return isClosed_; } - - const size_t maxSize_; - mutable std::mutex mutex_; - std::condition_variable queueFullCondition; - std::condition_variable queueEmptyCondition; - Container queue_; - bool isClosed_ = false; - - typedef std::unique_lock Lock; - friend struct QueueNotEmpty >; - friend struct QueueNotFull >; -}; - -#endif /* LIB_BLOCKINGQUEUE_H_ */ diff --git a/pulsar-client-cpp/lib/BoostHash.cc b/pulsar-client-cpp/lib/BoostHash.cc deleted file mode 100644 index 90876b77f8b0a..0000000000000 --- a/pulsar-client-cpp/lib/BoostHash.cc +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "BoostHash.h" - -namespace pulsar { - -BoostHash::BoostHash() : hash() {} - -int32_t BoostHash::makeHash(const std::string& key) { - return static_cast(hash(key) & std::numeric_limits::max()); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/BoostHash.h b/pulsar-client-cpp/lib/BoostHash.h deleted file mode 100644 index 10d62e129b4df..0000000000000 --- a/pulsar-client-cpp/lib/BoostHash.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef BOOST_HASH_HPP_ -#define BOOST_HASH_HPP_ - -#include -#include "Hash.h" - -#include -#include -#include - -namespace pulsar { -class PULSAR_PUBLIC BoostHash : public Hash { - public: - BoostHash(); - int32_t makeHash(const std::string &key); - - private: - boost::hash hash; -}; -} // namespace pulsar - -#endif /* BOOST_HASH_HPP_ */ diff --git a/pulsar-client-cpp/lib/BrokerConsumerStats.cc b/pulsar-client-cpp/lib/BrokerConsumerStats.cc deleted file mode 100644 index e3e1dea61afbe..0000000000000 --- a/pulsar-client-cpp/lib/BrokerConsumerStats.cc +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include - -namespace pulsar { -BrokerConsumerStats::BrokerConsumerStats(std::shared_ptr impl) : impl_(impl) {} - -std::shared_ptr BrokerConsumerStats::getImpl() const { return impl_; } - -bool BrokerConsumerStats::isValid() const { return impl_->isValid(); } - -PULSAR_PUBLIC std::ostream& operator<<(std::ostream& os, const BrokerConsumerStats& obj) { - os << "\nBrokerConsumerStats [" - << "validTill_ = " << obj.isValid() << ", msgRateOut_ = " << obj.getMsgRateOut() - << ", msgThroughputOut_ = " << obj.getMsgThroughputOut() - << ", msgRateRedeliver_ = " << obj.getMsgRateRedeliver() - << ", consumerName_ = " << obj.getConsumerName() - << ", availablePermits_ = " << obj.getAvailablePermits() - << ", unackedMessages_ = " << obj.getUnackedMessages() - << ", blockedConsumerOnUnackedMsgs_ = " << obj.isBlockedConsumerOnUnackedMsgs() - << ", address_ = " << obj.getAddress() << ", connectedSince_ = " << obj.getConnectedSince() - << ", type_ = " << obj.getType() << ", msgRateExpired_ = " << obj.getMsgRateExpired() - << ", msgBacklog_ = " << obj.getMsgBacklog() << "]"; - return os; -} - -double BrokerConsumerStats::getMsgRateOut() const { return impl_->getMsgRateOut(); } - -double BrokerConsumerStats::getMsgThroughputOut() const { return impl_->getMsgThroughputOut(); } - -double BrokerConsumerStats::getMsgRateRedeliver() const { return impl_->getMsgRateRedeliver(); } - -const std::string BrokerConsumerStats::getConsumerName() const { return impl_->getConsumerName(); } - -uint64_t BrokerConsumerStats::getAvailablePermits() const { return impl_->getAvailablePermits(); } - -uint64_t BrokerConsumerStats::getUnackedMessages() const { return impl_->getUnackedMessages(); } - -bool BrokerConsumerStats::isBlockedConsumerOnUnackedMsgs() const { - return impl_->isBlockedConsumerOnUnackedMsgs(); -} - -const std::string BrokerConsumerStats::getAddress() const { return impl_->getAddress(); } - -const std::string BrokerConsumerStats::getConnectedSince() const { return impl_->getConnectedSince(); } - -const ConsumerType BrokerConsumerStats::getType() const { return impl_->getType(); } - -double BrokerConsumerStats::getMsgRateExpired() const { return impl_->getMsgRateExpired(); } - -uint64_t BrokerConsumerStats::getMsgBacklog() const { return impl_->getMsgBacklog(); } -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/BrokerConsumerStatsImpl.cc b/pulsar-client-cpp/lib/BrokerConsumerStatsImpl.cc deleted file mode 100644 index 220415adcd511..0000000000000 --- a/pulsar-client-cpp/lib/BrokerConsumerStatsImpl.cc +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -namespace pulsar { -BrokerConsumerStatsImpl::BrokerConsumerStatsImpl() - : validTill_(boost::posix_time::microsec_clock::universal_time()){}; - -BrokerConsumerStatsImpl::BrokerConsumerStatsImpl(double msgRateOut, double msgThroughputOut, - double msgRateRedeliver, std::string consumerName, - uint64_t availablePermits, uint64_t unackedMessages, - bool blockedConsumerOnUnackedMsgs, std::string address, - std::string connectedSince, const std::string& type, - double msgRateExpired, uint64_t msgBacklog) - : msgRateOut_(msgRateOut), - msgThroughputOut_(msgThroughputOut), - msgRateRedeliver_(msgRateRedeliver), - consumerName_(consumerName), - availablePermits_(availablePermits), - unackedMessages_(unackedMessages), - blockedConsumerOnUnackedMsgs_(blockedConsumerOnUnackedMsgs), - address_(address), - connectedSince_(connectedSince), - type_(convertStringToConsumerType(type)), - msgRateExpired_(msgRateExpired), - msgBacklog_(msgBacklog) {} - -bool BrokerConsumerStatsImpl::isValid() const { - return boost::posix_time::microsec_clock::universal_time() <= validTill_; -} - -std::ostream& operator<<(std::ostream& os, const BrokerConsumerStatsImpl& obj) { - os << "\nBrokerConsumerStatsImpl [" - << "validTill_ = " << obj.isValid() << ", msgRateOut_ = " << obj.getMsgRateOut() - << ", msgThroughputOut_ = " << obj.getMsgThroughputOut() - << ", msgRateRedeliver_ = " << obj.getMsgRateRedeliver() - << ", consumerName_ = " << obj.getConsumerName() - << ", availablePermits_ = " << obj.getAvailablePermits() - << ", unackedMessages_ = " << obj.getUnackedMessages() - << ", blockedConsumerOnUnackedMsgs_ = " << obj.isBlockedConsumerOnUnackedMsgs() - << ", address_ = " << obj.getAddress() << ", connectedSince_ = " << obj.getConnectedSince() - << ", type_ = " << obj.getType() << ", msgRateExpired_ = " << obj.getMsgRateExpired() - << ", msgBacklog_ = " << obj.getMsgBacklog() << "]"; - return os; -} - -double BrokerConsumerStatsImpl::getMsgRateOut() const { return msgRateOut_; } - -double BrokerConsumerStatsImpl::getMsgThroughputOut() const { return msgThroughputOut_; } - -double BrokerConsumerStatsImpl::getMsgRateRedeliver() const { return msgRateRedeliver_; } - -const std::string BrokerConsumerStatsImpl::getConsumerName() const { return consumerName_; } - -uint64_t BrokerConsumerStatsImpl::getAvailablePermits() const { return availablePermits_; } - -uint64_t BrokerConsumerStatsImpl::getUnackedMessages() const { return unackedMessages_; } - -bool BrokerConsumerStatsImpl::isBlockedConsumerOnUnackedMsgs() const { return blockedConsumerOnUnackedMsgs_; } - -const std::string BrokerConsumerStatsImpl::getAddress() const { return address_; } - -const std::string BrokerConsumerStatsImpl::getConnectedSince() const { return connectedSince_; } - -const ConsumerType BrokerConsumerStatsImpl::getType() const { return type_; } - -double BrokerConsumerStatsImpl::getMsgRateExpired() const { return msgRateExpired_; } - -uint64_t BrokerConsumerStatsImpl::getMsgBacklog() const { return msgBacklog_; } - -void BrokerConsumerStatsImpl::setCacheTime(uint64_t cacehTimeInMs) { - validTill_ = - boost::posix_time::microsec_clock::universal_time() + boost::posix_time::milliseconds(cacehTimeInMs); -} - -ConsumerType BrokerConsumerStatsImpl::convertStringToConsumerType(const std::string& str) { - if (str == "ConsumerFailover" || str == "Failover") { - return ConsumerFailover; - } else if (str == "ConsumerShared" || str == "Shared") { - return ConsumerShared; - } else if (str == "ConsumerKeyShared" || str == "KeyShared") { - return ConsumerKeyShared; - } else { - return ConsumerExclusive; - } -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/BrokerConsumerStatsImpl.h b/pulsar-client-cpp/lib/BrokerConsumerStatsImpl.h deleted file mode 100644 index eb238c615c75a..0000000000000 --- a/pulsar-client-cpp/lib/BrokerConsumerStatsImpl.h +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CPP_BROKERCONSUMERSTATSIMPL_H -#define PULSAR_CPP_BROKERCONSUMERSTATSIMPL_H - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace pulsar { -class PULSAR_PUBLIC BrokerConsumerStatsImpl : public BrokerConsumerStatsImplBase { - private: - /** validTill_ - Stats will be valid till this time.*/ - boost::posix_time::ptime validTill_; - - /** Total rate of messages delivered to the consumer. msg/s */ - double msgRateOut_; - - /** Total throughput delivered to the consumer. bytes/s */ - double msgThroughputOut_; - - /** Total rate of messages redelivered by this consumer. msg/s */ - double msgRateRedeliver_; - - /** Name of the consumer */ - std::string consumerName_; - - /** Number of available message permits for the consumer */ - uint64_t availablePermits_; - - /** Number of unacknowledged messages for the consumer */ - uint64_t unackedMessages_; - - /** Flag to verify if consumer is blocked due to reaching threshold of unacked messages */ - bool blockedConsumerOnUnackedMsgs_; - - /** Address of this consumer */ - std::string address_; - - /** Timestamp of connection */ - std::string connectedSince_; - - /** Whether this subscription is Exclusive or Shared or Failover */ - ConsumerType type_; - - /** Total rate of messages expired on this subscription. msg/s */ - double msgRateExpired_; - - /** Number of messages in the subscription backlog */ - uint64_t msgBacklog_; - - public: - BrokerConsumerStatsImpl(); - - BrokerConsumerStatsImpl(double msgRateOut, double msgThroughputOut, double msgRateRedeliver, - std::string consumerName, uint64_t availablePermits, uint64_t unackedMessages, - bool blockedConsumerOnUnackedMsgs, std::string address, - std::string connectedSince, const std::string& type, double msgRateExpired, - uint64_t msgBacklog); - - /** Returns true if the Stats are still valid **/ - virtual bool isValid() const; - - /** Returns the rate of messages delivered to the consumer. msg/s */ - virtual double getMsgRateOut() const; - - /** Returns the throughput delivered to the consumer. bytes/s */ - virtual double getMsgThroughputOut() const; - - /** Returns the rate of messages redelivered by this consumer. msg/s */ - virtual double getMsgRateRedeliver() const; - - /** Returns the Name of the consumer */ - virtual const std::string getConsumerName() const; - - /** Returns the Number of available message permits for the consumer */ - virtual uint64_t getAvailablePermits() const; - - /** Returns the Number of unacknowledged messages for the consumer */ - virtual uint64_t getUnackedMessages() const; - - /** Returns true if the consumer is blocked due to unacked messages. */ - virtual bool isBlockedConsumerOnUnackedMsgs() const; - - /** Returns the Address of this consumer */ - virtual const std::string getAddress() const; - - /** Returns the Timestamp of connection */ - virtual const std::string getConnectedSince() const; - - /** Returns Whether this subscription is Exclusive or Shared or Failover */ - virtual const ConsumerType getType() const; - - /** Returns the rate of messages expired on this subscription. msg/s */ - virtual double getMsgRateExpired() const; - - /** Returns the Number of messages in the subscription backlog */ - virtual uint64_t getMsgBacklog() const; - - void setCacheTime(uint64_t cacehTimeInMs); - - friend std::ostream& operator<<(std::ostream& os, const BrokerConsumerStatsImpl& obj); - - static ConsumerType convertStringToConsumerType(const std::string& str); -}; -} // namespace pulsar - -#endif // PULSAR_CPP_BROKERCONSUMERSTATSIMPL_H diff --git a/pulsar-client-cpp/lib/BrokerConsumerStatsImplBase.h b/pulsar-client-cpp/lib/BrokerConsumerStatsImplBase.h deleted file mode 100644 index 282dfc0e49f53..0000000000000 --- a/pulsar-client-cpp/lib/BrokerConsumerStatsImplBase.h +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CPP_BROKERCONSUMERSTATSIMPLBASE_H -#define PULSAR_CPP_BROKERCONSUMERSTATSIMPLBASE_H - -#include -#include - -namespace pulsar { -class BrokerConsumerStatsImplBase { - public: - virtual ~BrokerConsumerStatsImplBase() = default; - /** Returns true if the Stats are still valid **/ - virtual bool isValid() const = 0; - - /** Returns the rate of messages delivered to the consumer. msg/s */ - virtual double getMsgRateOut() const = 0; - - /** Returns the throughput delivered to the consumer. bytes/s */ - virtual double getMsgThroughputOut() const = 0; - - /** Returns the rate of messages redelivered by this consumer. msg/s */ - virtual double getMsgRateRedeliver() const = 0; - - /** Returns the Name of the consumer */ - virtual const std::string getConsumerName() const = 0; - - /** Returns the Number of available message permits for the consumer */ - virtual uint64_t getAvailablePermits() const = 0; - - /** Returns the Number of unacknowledged messages for the consumer */ - virtual uint64_t getUnackedMessages() const = 0; - - /** Returns true if the consumer is blocked due to unacked messages. */ - virtual bool isBlockedConsumerOnUnackedMsgs() const = 0; - - /** Returns the Address of this consumer */ - virtual const std::string getAddress() const = 0; - - /** Returns the Timestamp of connection */ - virtual const std::string getConnectedSince() const = 0; - - /** Returns Whether this subscription is Exclusive or Shared or Failover */ - virtual const ConsumerType getType() const = 0; - - /** Returns the rate of messages expired on this subscription. msg/s */ - virtual double getMsgRateExpired() const = 0; - - /** Returns the Number of messages in the subscription backlog */ - virtual uint64_t getMsgBacklog() const = 0; -}; -typedef std::shared_ptr BrokerConsumerStatsImplBasePtr; -} // namespace pulsar - -#endif // PULSAR_CPP_BROKERCONSUMERSTATSIMPLBASE_H diff --git a/pulsar-client-cpp/lib/CMakeLists.txt b/pulsar-client-cpp/lib/CMakeLists.txt deleted file mode 100644 index f3f4f79bb45d7..0000000000000 --- a/pulsar-client-cpp/lib/CMakeLists.txt +++ /dev/null @@ -1,172 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -file(GLOB PULSAR_SOURCES *.cc *.h lz4/*.cc lz4/*.h checksum/*.cc checksum/*.h stats/*.cc stats/*.h c/*.cc c/*.h auth/*.cc auth/*.h auth/athenz/*.cc auth/athenz/*.h) - -execute_process(COMMAND ${CMAKE_SOURCE_DIR}/../src/get-project-version.py OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE PV) -set (CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -D_PULSAR_VERSION_INTERNAL_=\\\"${PV}\\\"") - -if (NOT PROTOC_PATH) - set(PROTOC_PATH protoc) -endif() - -set(LIB_AUTOGEN_DIR ${AUTOGEN_DIR}/lib) -file(MAKE_DIRECTORY ${LIB_AUTOGEN_DIR}) -include_directories(${LIB_AUTOGEN_DIR}) - -# Protobuf generation is only supported natively starting from CMake 3.8 -# Using custom command for now -set(PROTO_SOURCES ${LIB_AUTOGEN_DIR}/PulsarApi.pb.cc ${LIB_AUTOGEN_DIR}/PulsarApi.pb.h) -set(PULSAR_SOURCES ${PULSAR_SOURCES} ${PROTO_SOURCES}) -ADD_CUSTOM_COMMAND( - OUTPUT ${PROTO_SOURCES} - COMMAND ${PROTOC_PATH} -I ../../pulsar-common/src/main/proto ../../pulsar-common/src/main/proto/PulsarApi.proto --cpp_out=${LIB_AUTOGEN_DIR} - DEPENDS - ../../pulsar-common/src/main/proto/PulsarApi.proto - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) - -set(LIBRARY_VERSION $ENV{PULSAR_LIBRARY_VERSION}) -if (NOT LIBRARY_VERSION) - set(LIBRARY_VERSION ${PV}) -endif(NOT LIBRARY_VERSION) - -if (MSVC) - find_package(dlfcn-win32 REQUIRED) - set(CMAKE_DL_LIBS dlfcn-win32::dl psapi.lib) -endif(MSVC) - - -set(LIB_NAME_SHARED ${LIB_NAME}) - -# this is the "object library" target: compiles the sources only once -add_library(PULSAR_OBJECT_LIB OBJECT ${PULSAR_SOURCES}) -set_property(TARGET PULSAR_OBJECT_LIB PROPERTY POSITION_INDEPENDENT_CODE 1) - -if (BUILD_DYNAMIC_LIB) - add_library(pulsarShared SHARED $) - set_property(TARGET pulsarShared PROPERTY OUTPUT_NAME ${LIB_NAME_SHARED}) - set_property(TARGET pulsarShared PROPERTY VERSION ${LIBRARY_VERSION}) - target_link_libraries(pulsarShared ${COMMON_LIBS} ${CMAKE_DL_LIBS}) - if (MSVC) - target_include_directories(pulsarShared PRIVATE ${dlfcn-win32_INCLUDE_DIRS}) - target_link_options(pulsarShared PRIVATE $<$:/NODEFAULTLIB:MSVCRT>) - endif() -endif() - -include(CheckCXXSymbolExists) -check_cxx_symbol_exists(getauxval auvx.h HAVE_AUXV_GETAUXVAL) -if(HAVE_AUXV_GETAUXVAL) - add_definitions(-DPULSAR_AUXV_GETAUXVAL_PRESENT) -endif() - -### pulsarSharedNossl not static link ssl, it could avoid rebuild libpulsar when ssl lib need update. -### pulsarSharedNossl is build under condition LINK_STATIC=ON, we should replace static ssl libs with dynamic libs. -SET(COMMON_LIBS_NOSSL ${COMMON_LIBS}) -if (NOT ${RECORD_OPENSSL_SSL_LIBRARY} MATCHES ".+\\.a$") - LIST(REMOVE_ITEM COMMON_LIBS_NOSSL ${OPENSSL_SSL_LIBRARY}) - LIST(APPEND COMMON_LIBS_NOSSL ${RECORD_OPENSSL_SSL_LIBRARY}) -endif () -if (NOT ${RECORD_OPENSSL_CRYPTO_LIBRARY} MATCHES ".+\\.a$") - LIST(REMOVE_ITEM COMMON_LIBS_NOSSL ${OPENSSL_CRYPTO_LIBRARY}) - LIST(APPEND COMMON_LIBS_NOSSL ${RECORD_OPENSSL_CRYPTO_LIBRARY}) -endif () - -if (BUILD_DYNAMIC_LIB AND LINK_STATIC) - add_library(pulsarSharedNossl SHARED $) - set_property(TARGET pulsarSharedNossl PROPERTY OUTPUT_NAME ${LIB_NAME_SHARED}nossl) - set_property(TARGET pulsarSharedNossl PROPERTY VERSION ${LIBRARY_VERSION}) - target_link_libraries(pulsarSharedNossl ${COMMON_LIBS_NOSSL} ${CMAKE_DL_LIBS}) -endif() - -if (BUILD_STATIC_LIB) - add_library(pulsarStatic STATIC $) - if (MSVC) - set_property(TARGET pulsarStatic PROPERTY OUTPUT_NAME "${LIB_NAME}-static") - target_include_directories(pulsarStatic PRIVATE ${dlfcn-win32_INCLUDE_DIRS}) - else () - set_property(TARGET pulsarStatic PROPERTY OUTPUT_NAME ${LIB_NAME}) - endif() - set_property(TARGET pulsarStatic PROPERTY VERSION ${LIBRARY_VERSION}) - target_compile_definitions(pulsarStatic PRIVATE PULSAR_STATIC) -endif() - -# When linking statically, install a libpulsar.a that contains all the -# required dependencies except ssl -if (LINK_STATIC AND BUILD_STATIC_LIB) - if (MSVC) - - # This function is to remove either "debug" or "optimized" library names - # out of the COMMON_LIBS list and return the sanitized list of libraries - function(remove_libtype LIBLIST LIBTYPE OUTLIST) - list(FIND LIBLIST ${LIBTYPE} LIST_INDEX) - while(${LIST_INDEX} GREATER -1) - list(REMOVE_AT LIBLIST ${LIST_INDEX}) - list(REMOVE_AT LIBLIST ${LIST_INDEX}) - list(FIND LIBLIST ${LIBTYPE} LIST_INDEX) - endwhile() - list(REMOVE_ITEM LIBLIST "debug") - list(REMOVE_ITEM LIBLIST "optimized") - string(REPLACE ";" " " TEMP_OUT "${LIBLIST}") - set(${OUTLIST} ${TEMP_OUT} PARENT_SCOPE) - endfunction(remove_libtype) - - add_library(pulsarStaticWithDeps STATIC ${PULSAR_SOURCES}) - target_include_directories(pulsarStaticWithDeps PRIVATE ${dlfcn-win32_INCLUDE_DIRS}) - remove_libtype("${COMMON_LIBS}" "optimized" DEBUG_STATIC_LIBS) - remove_libtype("${COMMON_LIBS}" "debug" STATIC_LIBS) - set_property(TARGET pulsarStaticWithDeps PROPERTY STATIC_LIBRARY_FLAGS_DEBUG ${DEBUG_STATIC_LIBS}) - set_property(TARGET pulsarStaticWithDeps PROPERTY STATIC_LIBRARY_FLAGS_RELEASE ${STATIC_LIBS}) - set_property(TARGET pulsarStaticWithDeps PROPERTY OUTPUT_NAME ${LIB_NAME}WithDeps) - set_property(TARGET pulsarStaticWithDeps PROPERTY VERSION ${LIBRARY_VERSION}) - install(TARGETS pulsarStaticWithDeps DESTINATION lib) - else() - # Build a list of the requird .a libs (except ssl) to merge - SET(STATIC_LIBS "") - foreach (LIB IN LISTS COMMON_LIBS) - if (${LIB} MATCHES ".+\\.a$" AND NOT ${LIB} MATCHES ${OPENSSL_SSL_LIBRARY} AND NOT ${LIB} MATCHES ${OPENSSL_CRYPTO_LIBRARY}) - set(STATIC_LIBS "${STATIC_LIBS} ${LIB}") - endif() - endforeach() - - add_custom_target(pulsarStaticWithDeps - ALL - BYPRODUCTS merged-library - COMMAND ./build-support/merge_archives.sh libpulsar.a $ ${STATIC_LIBS} && mv merged-library/libpulsar.a lib/libpulsarwithdeps.a - DEPENDS pulsarStatic - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) - endif(MSVC) -elseif(BUILD_STATIC_LIB) - # Install regular libpulsar.a - target_link_libraries(pulsarStatic ${COMMON_LIBS}) - install(TARGETS pulsarStatic DESTINATION lib) -endif() - -if (BUILD_STATIC_LIB) - install(TARGETS pulsarStatic DESTINATION lib) -endif() - -if (BUILD_DYNAMIC_LIB) - install(TARGETS pulsarShared RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) -endif() - -if (BUILD_DYNAMIC_LIB AND LINK_STATIC) - install(TARGETS pulsarSharedNossl RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) -endif() - -install(DIRECTORY "../include/pulsar" DESTINATION include) diff --git a/pulsar-client-cpp/lib/Client.cc b/pulsar-client-cpp/lib/Client.cc deleted file mode 100644 index c72232a38c623..0000000000000 --- a/pulsar-client-cpp/lib/Client.cc +++ /dev/null @@ -1,181 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include - -#include - -#include "ClientImpl.h" -#include "Utils.h" -#include "ExecutorService.h" -#include "LogUtils.h" - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -Client::Client(const std::shared_ptr impl) : impl_(impl) {} - -Client::Client(const std::string& serviceUrl) - : impl_(std::make_shared(serviceUrl, ClientConfiguration(), true)) {} - -Client::Client(const std::string& serviceUrl, const ClientConfiguration& clientConfiguration) - : impl_(std::make_shared(serviceUrl, clientConfiguration, true)) {} - -Client::Client(const std::string& serviceUrl, const ClientConfiguration& clientConfiguration, - bool poolConnections) - : impl_(std::make_shared(serviceUrl, clientConfiguration, poolConnections)) {} - -Result Client::createProducer(const std::string& topic, Producer& producer) { - return createProducer(topic, ProducerConfiguration(), producer); -} - -Result Client::createProducer(const std::string& topic, const ProducerConfiguration& conf, - Producer& producer) { - Promise promise; - createProducerAsync(topic, conf, WaitForCallbackValue(promise)); - Future future = promise.getFuture(); - - return future.get(producer); -} - -void Client::createProducerAsync(const std::string& topic, CreateProducerCallback callback) { - createProducerAsync(topic, ProducerConfiguration(), callback); -} - -void Client::createProducerAsync(const std::string& topic, ProducerConfiguration conf, - CreateProducerCallback callback) { - impl_->createProducerAsync(topic, conf, callback); -} - -Result Client::subscribe(const std::string& topic, const std::string& subscriptionName, Consumer& consumer) { - return subscribe(topic, subscriptionName, ConsumerConfiguration(), consumer); -} - -Result Client::subscribe(const std::string& topic, const std::string& subscriptionName, - const ConsumerConfiguration& conf, Consumer& consumer) { - Promise promise; - subscribeAsync(topic, subscriptionName, conf, WaitForCallbackValue(promise)); - Future future = promise.getFuture(); - - return future.get(consumer); -} - -void Client::subscribeAsync(const std::string& topic, const std::string& subscriptionName, - SubscribeCallback callback) { - subscribeAsync(topic, subscriptionName, ConsumerConfiguration(), callback); -} - -void Client::subscribeAsync(const std::string& topic, const std::string& subscriptionName, - const ConsumerConfiguration& conf, SubscribeCallback callback) { - LOG_INFO("Subscribing on Topic :" << topic); - impl_->subscribeAsync(topic, subscriptionName, conf, callback); -} - -Result Client::subscribe(const std::vector& topics, const std::string& subscriptionName, - Consumer& consumer) { - return subscribe(topics, subscriptionName, ConsumerConfiguration(), consumer); -} - -Result Client::subscribe(const std::vector& topics, const std::string& subscriptionName, - const ConsumerConfiguration& conf, Consumer& consumer) { - Promise promise; - subscribeAsync(topics, subscriptionName, conf, WaitForCallbackValue(promise)); - Future future = promise.getFuture(); - - return future.get(consumer); -} - -void Client::subscribeAsync(const std::vector& topics, const std::string& subscriptionName, - SubscribeCallback callback) { - subscribeAsync(topics, subscriptionName, ConsumerConfiguration(), callback); -} - -void Client::subscribeAsync(const std::vector& topics, const std::string& subscriptionName, - const ConsumerConfiguration& conf, SubscribeCallback callback) { - impl_->subscribeAsync(topics, subscriptionName, conf, callback); -} - -Result Client::subscribeWithRegex(const std::string& regexPattern, const std::string& subscriptionName, - Consumer& consumer) { - return subscribeWithRegex(regexPattern, subscriptionName, ConsumerConfiguration(), consumer); -} - -Result Client::subscribeWithRegex(const std::string& regexPattern, const std::string& subscriptionName, - const ConsumerConfiguration& conf, Consumer& consumer) { - Promise promise; - subscribeWithRegexAsync(regexPattern, subscriptionName, conf, WaitForCallbackValue(promise)); - Future future = promise.getFuture(); - - return future.get(consumer); -} - -void Client::subscribeWithRegexAsync(const std::string& regexPattern, const std::string& subscriptionName, - SubscribeCallback callback) { - subscribeWithRegexAsync(regexPattern, subscriptionName, ConsumerConfiguration(), callback); -} - -void Client::subscribeWithRegexAsync(const std::string& regexPattern, const std::string& subscriptionName, - const ConsumerConfiguration& conf, SubscribeCallback callback) { - impl_->subscribeWithRegexAsync(regexPattern, subscriptionName, conf, callback); -} - -Result Client::createReader(const std::string& topic, const MessageId& startMessageId, - const ReaderConfiguration& conf, Reader& reader) { - Promise promise; - createReaderAsync(topic, startMessageId, conf, WaitForCallbackValue(promise)); - Future future = promise.getFuture(); - - return future.get(reader); -} - -void Client::createReaderAsync(const std::string& topic, const MessageId& startMessageId, - const ReaderConfiguration& conf, ReaderCallback callback) { - impl_->createReaderAsync(topic, startMessageId, conf, callback); -} - -Result Client::getPartitionsForTopic(const std::string& topic, std::vector& partitions) { - Promise > promise; - getPartitionsForTopicAsync(topic, WaitForCallbackValue >(promise)); - Future > future = promise.getFuture(); - - return future.get(partitions); -} - -void Client::getPartitionsForTopicAsync(const std::string& topic, GetPartitionsCallback callback) { - impl_->getPartitionsForTopicAsync(topic, callback); -} - -Result Client::close() { - Promise promise; - closeAsync(WaitForCallback(promise)); - - Result result; - promise.getFuture().get(result); - return result; -} - -void Client::closeAsync(CloseCallback callback) { impl_->closeAsync(callback); } - -void Client::shutdown() { impl_->shutdown(); } - -uint64_t Client::getNumberOfProducers() { return impl_->getNumberOfProducers(); } -uint64_t Client::getNumberOfConsumers() { return impl_->getNumberOfConsumers(); } -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ClientConfiguration.cc b/pulsar-client-cpp/lib/ClientConfiguration.cc deleted file mode 100644 index 4072f63136108..0000000000000 --- a/pulsar-client-cpp/lib/ClientConfiguration.cc +++ /dev/null @@ -1,153 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -namespace pulsar { - -ClientConfiguration::ClientConfiguration() : impl_(std::make_shared()) {} - -ClientConfiguration::~ClientConfiguration() {} - -ClientConfiguration::ClientConfiguration(const ClientConfiguration& x) : impl_(x.impl_) {} - -ClientConfiguration& ClientConfiguration::operator=(const ClientConfiguration& x) { - impl_ = x.impl_; - return *this; -} - -ClientConfiguration& ClientConfiguration::setMemoryLimit(uint64_t memoryLimitBytes) { - impl_->memoryLimit = memoryLimitBytes; - return *this; -} - -uint64_t ClientConfiguration::getMemoryLimit() const { return impl_->memoryLimit; } - -ClientConfiguration& ClientConfiguration::setAuth(const AuthenticationPtr& authentication) { - impl_->authenticationPtr = authentication; - return *this; -} - -Authentication& ClientConfiguration::getAuth() const { return *impl_->authenticationPtr; } - -const AuthenticationPtr& ClientConfiguration::getAuthPtr() const { return impl_->authenticationPtr; } - -ClientConfiguration& ClientConfiguration::setOperationTimeoutSeconds(int timeout) { - impl_->operationTimeoutSeconds = timeout; - return *this; -} - -int ClientConfiguration::getOperationTimeoutSeconds() const { return impl_->operationTimeoutSeconds; } - -ClientConfiguration& ClientConfiguration::setIOThreads(int threads) { - impl_->ioThreads = threads; - return *this; -} - -int ClientConfiguration::getIOThreads() const { return impl_->ioThreads; } - -ClientConfiguration& ClientConfiguration::setMessageListenerThreads(int threads) { - impl_->messageListenerThreads = threads; - return *this; -} - -int ClientConfiguration::getMessageListenerThreads() const { return impl_->messageListenerThreads; } - -ClientConfiguration& ClientConfiguration::setUseTls(bool useTls) { - impl_->useTls = useTls; - return *this; -} - -bool ClientConfiguration::isUseTls() const { return impl_->useTls; } - -ClientConfiguration& ClientConfiguration::setValidateHostName(bool validateHostName) { - impl_->validateHostName = validateHostName; - return *this; -} - -bool ClientConfiguration::isValidateHostName() const { return impl_->validateHostName; } - -ClientConfiguration& ClientConfiguration::setTlsTrustCertsFilePath(const std::string& filePath) { - impl_->tlsTrustCertsFilePath = filePath; - return *this; -} - -const std::string& ClientConfiguration::getTlsTrustCertsFilePath() const { - return impl_->tlsTrustCertsFilePath; -} - -ClientConfiguration& ClientConfiguration::setTlsAllowInsecureConnection(bool allowInsecure) { - impl_->tlsAllowInsecureConnection = allowInsecure; - return *this; -} - -bool ClientConfiguration::isTlsAllowInsecureConnection() const { return impl_->tlsAllowInsecureConnection; } - -ClientConfiguration& ClientConfiguration::setConcurrentLookupRequest(int concurrentLookupRequest) { - impl_->concurrentLookupRequest = concurrentLookupRequest; - return *this; -} - -int ClientConfiguration::getConcurrentLookupRequest() const { return impl_->concurrentLookupRequest; } - -ClientConfiguration& ClientConfiguration::setLogConfFilePath(const std::string& logConfFilePath) { - impl_->logConfFilePath = logConfFilePath; - return *this; -} - -const std::string& ClientConfiguration::getLogConfFilePath() const { return impl_->logConfFilePath; } - -ClientConfiguration& ClientConfiguration::setLogger(LoggerFactory* loggerFactory) { - impl_->loggerFactory.reset(loggerFactory); - return *this; -} - -ClientConfiguration& ClientConfiguration::setStatsIntervalInSeconds( - const unsigned int& statsIntervalInSeconds) { - impl_->statsIntervalInSeconds = statsIntervalInSeconds; - return *this; -} - -const unsigned int& ClientConfiguration::getStatsIntervalInSeconds() const { - return impl_->statsIntervalInSeconds; -} - -ClientConfiguration& ClientConfiguration::setPartititionsUpdateInterval(unsigned int intervalInSeconds) { - impl_->partitionsUpdateInterval = intervalInSeconds; - return *this; -} - -unsigned int ClientConfiguration::getPartitionsUpdateInterval() const { - return impl_->partitionsUpdateInterval; -} - -ClientConfiguration& ClientConfiguration::setListenerName(const std::string& listenerName) { - impl_->listenerName = listenerName; - return *this; -} - -const std::string& ClientConfiguration::getListenerName() const { return impl_->listenerName; } - -ClientConfiguration& ClientConfiguration::setConnectionTimeout(int timeoutMs) { - impl_->connectionTimeoutMs = timeoutMs; - return *this; -} - -int ClientConfiguration::getConnectionTimeout() const { return impl_->connectionTimeoutMs; } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ClientConfigurationImpl.h b/pulsar-client-cpp/lib/ClientConfigurationImpl.h deleted file mode 100644 index 887ecf2037851..0000000000000 --- a/pulsar-client-cpp/lib/ClientConfigurationImpl.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_CLIENTCONFIGURATIONIMPL_H_ -#define LIB_CLIENTCONFIGURATIONIMPL_H_ - -#include - -namespace pulsar { - -struct ClientConfigurationImpl { - AuthenticationPtr authenticationPtr{AuthFactory::Disabled()}; - uint64_t memoryLimit{0ull}; - int ioThreads{1}; - int operationTimeoutSeconds{30}; - int messageListenerThreads{1}; - int concurrentLookupRequest{50000}; - std::string logConfFilePath; - bool useTls{false}; - std::string tlsTrustCertsFilePath; - bool tlsAllowInsecureConnection{false}; - unsigned int statsIntervalInSeconds{600}; // 10 minutes - std::unique_ptr loggerFactory; - bool validateHostName{false}; - unsigned int partitionsUpdateInterval{60}; // 1 minute - std::string listenerName; - int connectionTimeoutMs{10000}; // 10 seconds - - std::unique_ptr takeLogger() { return std::move(loggerFactory); } -}; -} // namespace pulsar - -#endif /* LIB_CLIENTCONFIGURATIONIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/ClientConnection.cc b/pulsar-client-cpp/lib/ClientConnection.cc deleted file mode 100644 index 20e34586e00af..0000000000000 --- a/pulsar-client-cpp/lib/ClientConnection.cc +++ /dev/null @@ -1,1711 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "ClientConnection.h" - -#include "PulsarApi.pb.h" - -#include -#include -#include -#include -#include - -#include "ExecutorService.h" -#include "Commands.h" -#include "LogUtils.h" -#include "Url.h" - -#include -#include - -#include "ProducerImpl.h" -#include "ConsumerImpl.h" -#include "checksum/ChecksumProvider.h" -#include "MessageIdUtil.h" - -DECLARE_LOG_OBJECT() - -using namespace pulsar::proto; -using namespace boost::asio::ip; - -namespace pulsar { - -static const uint32_t DefaultBufferSize = 64 * 1024; - -static const int KeepAliveIntervalInSeconds = 30; - -// Convert error codes from protobuf to client API Result -static Result getResult(ServerError serverError, const std::string& message) { - switch (serverError) { - case UnknownError: - return ResultUnknownError; - - case MetadataError: - return ResultBrokerMetadataError; - - case ChecksumError: - return ResultChecksumError; - - case PersistenceError: - return ResultBrokerPersistenceError; - - case AuthenticationError: - return ResultAuthenticationError; - - case AuthorizationError: - return ResultAuthorizationError; - - case ConsumerBusy: - return ResultConsumerBusy; - - case ServiceNotReady: - // If the error is not caused by a PulsarServerException, treat it as retryable. - return (message.find("PulsarServerException") == std::string::npos) ? ResultRetryable - : ResultServiceUnitNotReady; - - case ProducerBlockedQuotaExceededError: - return ResultProducerBlockedQuotaExceededError; - - case ProducerBlockedQuotaExceededException: - return ResultProducerBlockedQuotaExceededException; - - case TopicNotFound: - return ResultTopicNotFound; - - case SubscriptionNotFound: - return ResultSubscriptionNotFound; - - case ConsumerNotFound: - return ResultConsumerNotFound; - - case UnsupportedVersionError: - return ResultUnsupportedVersionError; - - case TooManyRequests: - return ResultTooManyLookupRequestException; - - case TopicTerminatedError: - return ResultTopicTerminated; - - case ProducerBusy: - return ResultProducerBusy; - - case InvalidTopicName: - return ResultInvalidTopicName; - - case IncompatibleSchema: - return ResultIncompatibleSchema; - - case ConsumerAssignError: - return ResultConsumerAssignError; - - case TransactionCoordinatorNotFound: - return ResultTransactionCoordinatorNotFoundError; - - case InvalidTxnStatus: - return ResultInvalidTxnStatusError; - - case NotAllowedError: - return ResultNotAllowedError; - - case TransactionConflict: - return ResultTransactionConflict; - - case TransactionNotFound: - return ResultTransactionNotFound; - - case ProducerFenced: - return ResultProducerFenced; - } - // NOTE : Do not add default case in the switch above. In future if we get new cases for - // ServerError and miss them in the switch above we would like to get notified. Adding - // return here to make the compiler happy. - return ResultUnknownError; -} - -inline std::ostream& operator<<(std::ostream& os, ServerError error) { - os << getResult(error, ""); - return os; -} - -static bool file_exists(const std::string& path) { - if (path.empty()) { - return false; - } - std::ifstream f(path); - return f.good(); -} - -std::atomic ClientConnection::maxMessageSize_{Commands::DefaultMaxMessageSize}; - -ClientConnection::ClientConnection(const std::string& logicalAddress, const std::string& physicalAddress, - ExecutorServicePtr executor, - const ClientConfiguration& clientConfiguration, - const AuthenticationPtr& authentication) - : operationsTimeout_(seconds(clientConfiguration.getOperationTimeoutSeconds())), - authentication_(authentication), - serverProtocolVersion_(ProtocolVersion_MIN), - executor_(executor), - resolver_(executor_->createTcpResolver()), -#if BOOST_VERSION >= 107000 - strand_(boost::asio::make_strand(executor_->getIOService().get_executor())), -#elif BOOST_VERSION >= 106600 - strand_(executor_->getIOService().get_executor()), -#else - strand_(executor_->getIOService()), -#endif - logicalAddress_(logicalAddress), - physicalAddress_(physicalAddress), - cnxString_("[ -> " + physicalAddress + "] "), - incomingBuffer_(SharedBuffer::allocate(DefaultBufferSize)), - outgoingBuffer_(SharedBuffer::allocate(DefaultBufferSize)), - maxPendingLookupRequest_(clientConfiguration.getConcurrentLookupRequest()) { - - try { - socket_ = executor_->createSocket(); - connectTimeoutTask_ = std::make_shared(executor_->getIOService(), - clientConfiguration.getConnectionTimeout()); - consumerStatsRequestTimer_ = executor_->createDeadlineTimer(); - } catch (const boost::system::system_error& e) { - LOG_ERROR("Failed to initialize connection: " << e.what()); - close(ResultRetryable); - return; - } - - LOG_INFO(cnxString_ << "Create ClientConnection, timeout=" << clientConfiguration.getConnectionTimeout()); - if (clientConfiguration.isUseTls()) { -#if BOOST_VERSION >= 105400 - boost::asio::ssl::context ctx(boost::asio::ssl::context::tlsv12_client); -#else - boost::asio::ssl::context ctx(executor_->getIOService(), boost::asio::ssl::context::tlsv1_client); -#endif - Url serviceUrl; - Url::parse(physicalAddress, serviceUrl); - if (clientConfiguration.isTlsAllowInsecureConnection()) { - ctx.set_verify_mode(boost::asio::ssl::context::verify_none); - isTlsAllowInsecureConnection_ = true; - } else { - ctx.set_verify_mode(boost::asio::ssl::context::verify_peer); - - if (clientConfiguration.isValidateHostName()) { - LOG_DEBUG("Validating hostname for " << serviceUrl.host() << ":" << serviceUrl.port()); - ctx.set_verify_callback(boost::asio::ssl::rfc2818_verification(physicalAddress)); - } - - std::string trustCertFilePath = clientConfiguration.getTlsTrustCertsFilePath(); - if (!trustCertFilePath.empty()) { - if (file_exists(trustCertFilePath)) { - ctx.load_verify_file(trustCertFilePath); - } else { - LOG_ERROR(trustCertFilePath << ": No such trustCertFile"); - close(); - return; - } - } else { - ctx.set_default_verify_paths(); - } - } - - if (!authentication_) { - LOG_ERROR("Invalid authentication plugin"); - close(); - return; - } - - AuthenticationDataPtr authData; - if (authentication_->getAuthData(authData) == ResultOk && authData->hasDataForTls()) { - std::string tlsCertificates = authData->getTlsCertificates(); - std::string tlsPrivateKey = authData->getTlsPrivateKey(); - - if (file_exists(tlsCertificates)) { - ctx.use_certificate_file(tlsCertificates, boost::asio::ssl::context::pem); - } else { - LOG_ERROR(tlsCertificates << ": No such tlsCertificates"); - close(); - return; - } - - if (file_exists(tlsPrivateKey)) { - ctx.use_private_key_file(tlsPrivateKey, boost::asio::ssl::context::pem); - } else { - LOG_ERROR(tlsPrivateKey << ": No such tlsPrivateKey"); - close(); - return; - } - } - - tlsSocket_ = ExecutorService::createTlsSocket(socket_, ctx); - - LOG_DEBUG("TLS SNI Host: " << serviceUrl.host()); - if (!SSL_set_tlsext_host_name(tlsSocket_->native_handle(), serviceUrl.host().c_str())) { - boost::system::error_code ec{static_cast(::ERR_get_error()), - boost::asio::error::get_ssl_category()}; - LOG_ERROR(boost::system::system_error{ec}.what() << ": Error while setting TLS SNI"); - return; - } - } -} - -ClientConnection::~ClientConnection() { LOG_INFO(cnxString_ << "Destroyed connection"); } - -void ClientConnection::handlePulsarConnected(const CommandConnected& cmdConnected) { - if (!cmdConnected.has_server_version()) { - LOG_ERROR(cnxString_ << "Server version is not set"); - close(); - return; - } - - if (cmdConnected.has_max_message_size()) { - LOG_DEBUG("Connection has max message size setting: " << cmdConnected.max_message_size()); - maxMessageSize_.store(cmdConnected.max_message_size(), std::memory_order_release); - LOG_DEBUG("Current max message size is: " << maxMessageSize_); - } - - state_ = Ready; - connectTimeoutTask_->stop(); - serverProtocolVersion_ = cmdConnected.protocol_version(); - connectPromise_.setValue(shared_from_this()); - - if (serverProtocolVersion_ >= v1) { - // Only send keep-alive probes if the broker supports it - keepAliveTimer_ = executor_->createDeadlineTimer(); - Lock lock(mutex_); - if (keepAliveTimer_) { - keepAliveTimer_->expires_from_now(boost::posix_time::seconds(KeepAliveIntervalInSeconds)); - keepAliveTimer_->async_wait( - std::bind(&ClientConnection::handleKeepAliveTimeout, shared_from_this())); - } - lock.unlock(); - } - - if (serverProtocolVersion_ >= v8) { - startConsumerStatsTimer(std::vector()); - } -} - -void ClientConnection::startConsumerStatsTimer(std::vector consumerStatsRequests) { - std::vector> consumerStatsPromises; - Lock lock(mutex_); - - for (int i = 0; i < consumerStatsRequests.size(); i++) { - PendingConsumerStatsMap::iterator it = pendingConsumerStatsMap_.find(consumerStatsRequests[i]); - if (it != pendingConsumerStatsMap_.end()) { - LOG_DEBUG(cnxString_ << " removing request_id " << it->first - << " from the pendingConsumerStatsMap_"); - consumerStatsPromises.push_back(it->second); - pendingConsumerStatsMap_.erase(it); - } else { - LOG_DEBUG(cnxString_ << "request_id " << it->first << " already fulfilled - not removing it"); - } - } - - consumerStatsRequests.clear(); - for (PendingConsumerStatsMap::iterator it = pendingConsumerStatsMap_.begin(); - it != pendingConsumerStatsMap_.end(); ++it) { - consumerStatsRequests.push_back(it->first); - } - - // If the close operation has reset the consumerStatsRequestTimer_ then the use_count will be zero - // Check if we have a timer still before we set the request timer to pop again. - if (consumerStatsRequestTimer_) { - consumerStatsRequestTimer_->expires_from_now(operationsTimeout_); - consumerStatsRequestTimer_->async_wait(std::bind(&ClientConnection::handleConsumerStatsTimeout, - shared_from_this(), std::placeholders::_1, - consumerStatsRequests)); - } - lock.unlock(); - // Complex logic since promises need to be fulfilled outside the lock - for (int i = 0; i < consumerStatsPromises.size(); i++) { - consumerStatsPromises[i].setFailed(ResultTimeout); - LOG_WARN(cnxString_ << " Operation timedout, didn't get response from broker"); - } -} - -/// The number of unacknowledged probes to send before considering the connection dead and notifying the -/// application layer -typedef boost::asio::detail::socket_option::integer tcp_keep_alive_count; - -/// The interval between subsequential keepalive probes, regardless of what the connection has exchanged in -/// the meantime -typedef boost::asio::detail::socket_option::integer tcp_keep_alive_interval; - -/// The interval between the last data packet sent (simple ACKs are not considered data) and the first -/// keepalive -/// probe; after the connection is marked to need keepalive, this counter is not used any further -#ifdef __APPLE__ -typedef boost::asio::detail::socket_option::integer tcp_keep_alive_idle; -#else -typedef boost::asio::detail::socket_option::integer tcp_keep_alive_idle; -#endif - -/* - * TCP Connect handler - * - * if async_connect without any error, connected_ would be set to true - * at this point the connection is deemed valid to be used by clients of this class - */ -void ClientConnection::handleTcpConnected(const boost::system::error_code& err, - tcp::resolver::iterator endpointIterator) { - if (!err) { - std::stringstream cnxStringStream; - try { - cnxStringStream << "[" << socket_->local_endpoint() << " -> " << socket_->remote_endpoint() - << "] "; - cnxString_ = cnxStringStream.str(); - } catch (const boost::system::system_error& e) { - LOG_ERROR("Failed to get endpoint: " << e.what()); - close(ResultRetryable); - return; - } - if (logicalAddress_ == physicalAddress_) { - LOG_INFO(cnxString_ << "Connected to broker"); - } else { - LOG_INFO(cnxString_ << "Connected to broker through proxy. Logical broker: " << logicalAddress_); - } - state_ = TcpConnected; - - boost::system::error_code error; - socket_->set_option(tcp::no_delay(true), error); - if (error) { - LOG_WARN(cnxString_ << "Socket failed to set tcp::no_delay: " << error.message()); - } - - socket_->set_option(tcp::socket::keep_alive(true), error); - if (error) { - LOG_WARN(cnxString_ << "Socket failed to set tcp::socket::keep_alive: " << error.message()); - } - - // Start TCP keep-alive probes after connection has been idle after 1 minute. Ideally this - // should never happen, given that we're sending our own keep-alive probes (within the TCP - // connection) every 30 seconds - socket_->set_option(tcp_keep_alive_idle(1 * 60), error); - if (error) { - LOG_DEBUG(cnxString_ << "Socket failed to set tcp_keep_alive_idle: " << error.message()); - } - - // Send up to 10 probes before declaring the connection broken - socket_->set_option(tcp_keep_alive_count(10), error); - if (error) { - LOG_DEBUG(cnxString_ << "Socket failed to set tcp_keep_alive_count: " << error.message()); - } - - // Interval between probes: 6 seconds - socket_->set_option(tcp_keep_alive_interval(6), error); - if (error) { - LOG_DEBUG(cnxString_ << "Socket failed to set tcp_keep_alive_interval: " << error.message()); - } - - if (tlsSocket_) { - if (!isTlsAllowInsecureConnection_) { - boost::system::error_code err; - Url service_url; - if (!Url::parse(physicalAddress_, service_url)) { - LOG_ERROR(cnxString_ << "Invalid Url, unable to parse: " << err << " " << err.message()); - close(); - return; - } - } -#if BOOST_VERSION >= 106600 - tlsSocket_->async_handshake( - boost::asio::ssl::stream::client, - boost::asio::bind_executor(strand_, std::bind(&ClientConnection::handleHandshake, - shared_from_this(), std::placeholders::_1))); -#else - tlsSocket_->async_handshake(boost::asio::ssl::stream::client, - strand_.wrap(std::bind(&ClientConnection::handleHandshake, - shared_from_this(), std::placeholders::_1))); -#endif - } else { - handleHandshake(boost::system::errc::make_error_code(boost::system::errc::success)); - } - } else if (endpointIterator != tcp::resolver::iterator()) { - LOG_WARN(cnxString_ << "Failed to establish connection: " << err.message()); - // The connection failed. Try the next endpoint in the list. - boost::system::error_code closeError; - socket_->close(closeError); // ignore the error of close - if (closeError) { - LOG_WARN(cnxString_ << "Failed to close socket: " << err.message()); - } - connectTimeoutTask_->stop(); - ++endpointIterator; - if (endpointIterator != tcp::resolver::iterator()) { - LOG_DEBUG(cnxString_ << "Connecting to " << endpointIterator->endpoint() << "..."); - connectTimeoutTask_->start(); - tcp::endpoint endpoint = *endpointIterator; - socket_->async_connect(endpoint, - std::bind(&ClientConnection::handleTcpConnected, shared_from_this(), - std::placeholders::_1, ++endpointIterator)); - } else { - if (err == boost::asio::error::operation_aborted) { - // TCP connect timeout, which is not retryable - close(); - } else { - close(ResultRetryable); - } - } - } else { - LOG_ERROR(cnxString_ << "Failed to establish connection: " << err.message()); - close(ResultRetryable); - } -} - -void ClientConnection::handleHandshake(const boost::system::error_code& err) { - if (err) { - LOG_ERROR(cnxString_ << "Handshake failed: " << err.message()); - close(); - return; - } - - bool connectingThroughProxy = logicalAddress_ != physicalAddress_; - Result result = ResultOk; - SharedBuffer buffer = - Commands::newConnect(authentication_, logicalAddress_, connectingThroughProxy, result); - if (result != ResultOk) { - LOG_ERROR(cnxString_ << "Failed to establish connection: " << result); - close(result); - return; - } - // Send CONNECT command to broker - asyncWrite(buffer.const_asio_buffer(), std::bind(&ClientConnection::handleSentPulsarConnect, - shared_from_this(), std::placeholders::_1, buffer)); -} - -void ClientConnection::handleSentPulsarConnect(const boost::system::error_code& err, - const SharedBuffer& buffer) { - if (err) { - LOG_ERROR(cnxString_ << "Failed to establish connection: " << err.message()); - close(); - return; - } - - // Schedule the reading of CONNECTED command from broker - readNextCommand(); -} - -void ClientConnection::handleSentAuthResponse(const boost::system::error_code& err, - const SharedBuffer& buffer) { - if (err) { - LOG_WARN(cnxString_ << "Failed to send auth response: " << err.message()); - close(); - return; - } -} - -/* - * Async method to establish TCP connection with broker - * - * tcpConnectCompletionHandler is notified when the result of this call is available. - * - */ -void ClientConnection::tcpConnectAsync() { - if (isClosed()) { - return; - } - - boost::system::error_code err; - Url service_url; - if (!Url::parse(physicalAddress_, service_url)) { - LOG_ERROR(cnxString_ << "Invalid Url, unable to parse: " << err << " " << err.message()); - close(); - return; - } - - if (service_url.protocol() != "pulsar" && service_url.protocol() != "pulsar+ssl") { - LOG_ERROR(cnxString_ << "Invalid Url protocol '" << service_url.protocol() - << "'. Valid values are 'pulsar' and 'pulsar+ssl'"); - close(); - return; - } - - LOG_DEBUG(cnxString_ << "Resolving " << service_url.host() << ":" << service_url.port()); - tcp::resolver::query query(service_url.host(), std::to_string(service_url.port())); - resolver_->async_resolve(query, std::bind(&ClientConnection::handleResolve, shared_from_this(), - std::placeholders::_1, std::placeholders::_2)); -} - -void ClientConnection::handleResolve(const boost::system::error_code& err, - tcp::resolver::iterator endpointIterator) { - if (err) { - LOG_ERROR(cnxString_ << "Resolve error: " << err << " : " << err.message()); - close(); - return; - } - - auto self = ClientConnectionWeakPtr(shared_from_this()); - - connectTimeoutTask_->setCallback([self](const PeriodicTask::ErrorCode& ec) { - ClientConnectionPtr ptr = self.lock(); - if (!ptr) { - // Connection was already destroyed - return; - } - - if (ptr->state_ != Ready) { - LOG_ERROR(ptr->cnxString_ << "Connection was not established in " - << ptr->connectTimeoutTask_->getPeriodMs() << " ms, close the socket"); - PeriodicTask::ErrorCode err; - ptr->socket_->close(err); - if (err) { - LOG_WARN(ptr->cnxString_ << "Failed to close socket: " << err.message()); - } - } - ptr->connectTimeoutTask_->stop(); - }); - - LOG_DEBUG(cnxString_ << "Connecting to " << endpointIterator->endpoint() << "..."); - connectTimeoutTask_->start(); - if (endpointIterator != tcp::resolver::iterator()) { - LOG_DEBUG(cnxString_ << "Resolved hostname " << endpointIterator->host_name() // - << " to " << endpointIterator->endpoint()); - socket_->async_connect(*endpointIterator, - std::bind(&ClientConnection::handleTcpConnected, shared_from_this(), - std::placeholders::_1, endpointIterator)); - } else { - LOG_WARN(cnxString_ << "No IP address found"); - close(); - return; - } -} - -void ClientConnection::readNextCommand() { - const static uint32_t minReadSize = sizeof(uint32_t); - asyncReceive( - incomingBuffer_.asio_buffer(), - customAllocReadHandler(std::bind(&ClientConnection::handleRead, shared_from_this(), - std::placeholders::_1, std::placeholders::_2, minReadSize))); -} - -void ClientConnection::handleRead(const boost::system::error_code& err, size_t bytesTransferred, - uint32_t minReadSize) { - // Update buffer write idx with new data - incomingBuffer_.bytesWritten(bytesTransferred); - - if (err || bytesTransferred == 0) { - if (err) { - if (err == boost::asio::error::operation_aborted) { - LOG_DEBUG(cnxString_ << "Read operation was canceled: " << err.message()); - } else { - LOG_ERROR(cnxString_ << "Read operation failed: " << err.message()); - } - } // else: bytesTransferred == 0, which means server has closed the connection - close(); - } else if (bytesTransferred < minReadSize) { - // Read the remaining part, use a slice of buffer to write on the next - // region - SharedBuffer buffer = incomingBuffer_.slice(bytesTransferred); - asyncReceive(buffer.asio_buffer(), - customAllocReadHandler(std::bind(&ClientConnection::handleRead, shared_from_this(), - std::placeholders::_1, std::placeholders::_2, - minReadSize - bytesTransferred))); - } else { - processIncomingBuffer(); - } -} - -void ClientConnection::processIncomingBuffer() { - // Process all the available frames from the incoming buffer - while (incomingBuffer_.readableBytes() >= sizeof(uint32_t)) { - // Extract message frames from incoming buffer - // At this point we have at least 4 bytes in the buffer - uint32_t frameSize = incomingBuffer_.readUnsignedInt(); - - if (frameSize > incomingBuffer_.readableBytes()) { - // We don't have the entire frame yet - const uint32_t bytesToReceive = frameSize - incomingBuffer_.readableBytes(); - - // Rollback the reading of frameSize (when the frame will be complete, - // we'll read it again - incomingBuffer_.rollback(sizeof(uint32_t)); - - if (bytesToReceive <= incomingBuffer_.writableBytes()) { - // The rest of the frame still fits in the current buffer - asyncReceive(incomingBuffer_.asio_buffer(), - customAllocReadHandler(std::bind(&ClientConnection::handleRead, - shared_from_this(), std::placeholders::_1, - std::placeholders::_2, bytesToReceive))); - return; - } else { - // Need to allocate a buffer big enough for the frame - uint32_t newBufferSize = std::max(DefaultBufferSize, frameSize + sizeof(uint32_t)); - incomingBuffer_ = SharedBuffer::copyFrom(incomingBuffer_, newBufferSize); - - asyncReceive(incomingBuffer_.asio_buffer(), - customAllocReadHandler(std::bind(&ClientConnection::handleRead, - shared_from_this(), std::placeholders::_1, - std::placeholders::_2, bytesToReceive))); - return; - } - } - - // At this point, we have at least one complete frame available in the buffer - uint32_t cmdSize = incomingBuffer_.readUnsignedInt(); - if (!incomingCmd_.ParseFromArray(incomingBuffer_.data(), cmdSize)) { - LOG_ERROR(cnxString_ << "Error parsing protocol buffer command"); - close(); - return; - } - - incomingBuffer_.consume(cmdSize); - - if (incomingCmd_.type() == BaseCommand::MESSAGE) { - // Parse message metadata and extract payload - MessageMetadata msgMetadata; - - // read checksum - uint32_t remainingBytes = frameSize - (cmdSize + 4); - bool isChecksumValid = verifyChecksum(incomingBuffer_, remainingBytes, incomingCmd_); - - uint32_t metadataSize = incomingBuffer_.readUnsignedInt(); - if (!msgMetadata.ParseFromArray(incomingBuffer_.data(), metadataSize)) { - LOG_ERROR(cnxString_ << "[consumer id " << incomingCmd_.message().consumer_id() // - << ", message ledger id " - << incomingCmd_.message().message_id().ledgerid() // - << ", entry id " << incomingCmd_.message().message_id().entryid() - << "] Error parsing message metadata"); - close(); - return; - } - - incomingBuffer_.consume(metadataSize); - remainingBytes -= (4 + metadataSize); - - uint32_t payloadSize = remainingBytes; - SharedBuffer payload = SharedBuffer::copy(incomingBuffer_.data(), payloadSize); - incomingBuffer_.consume(payloadSize); - handleIncomingMessage(incomingCmd_.message(), isChecksumValid, msgMetadata, payload); - } else { - handleIncomingCommand(); - } - } - if (incomingBuffer_.readableBytes() > 0) { - // We still have 1 to 3 bytes from the next frame - assert(incomingBuffer_.readableBytes() < sizeof(uint32_t)); - - // Restart with a new buffer and copy the the few bytes at the beginning - incomingBuffer_ = SharedBuffer::copyFrom(incomingBuffer_, DefaultBufferSize); - - // At least we need to read 4 bytes to have the complete frame size - uint32_t minReadSize = sizeof(uint32_t) - incomingBuffer_.readableBytes(); - - asyncReceive( - incomingBuffer_.asio_buffer(), - customAllocReadHandler(std::bind(&ClientConnection::handleRead, shared_from_this(), - std::placeholders::_1, std::placeholders::_2, minReadSize))); - return; - } - - // We have read everything we had in the buffer - // Rollback the indexes to reuse the same buffer - incomingBuffer_.reset(); - - readNextCommand(); -} - -bool ClientConnection::verifyChecksum(SharedBuffer& incomingBuffer_, uint32_t& remainingBytes, - proto::BaseCommand& incomingCmd_) { - int readerIndex = incomingBuffer_.readerIndex(); - bool isChecksumValid = true; - - if (incomingBuffer_.readUnsignedShort() == Commands::magicCrc32c) { - uint32_t storedChecksum = incomingBuffer_.readUnsignedInt(); - remainingBytes -= (2 + 4) /* subtract size of checksum itself */; - - // compute metadata-payload checksum - int metadataPayloadSize = remainingBytes; - uint32_t computedChecksum = computeChecksum(0, incomingBuffer_.data(), metadataPayloadSize); - // verify checksum - isChecksumValid = (storedChecksum == computedChecksum); - - if (!isChecksumValid) { - LOG_ERROR("[consumer id " - << incomingCmd_.message().consumer_id() // - << ", message ledger id " << incomingCmd_.message().message_id().ledgerid() // - << ", entry id " << incomingCmd_.message().message_id().entryid() // - << "stored-checksum" << storedChecksum << "computedChecksum" << computedChecksum // - << "] Checksum verification failed"); - } - } else { - incomingBuffer_.setReaderIndex(readerIndex); - } - return isChecksumValid; -} - -void ClientConnection::handleActiveConsumerChange(const proto::CommandActiveConsumerChange& change) { - Lock lock(mutex_); - ConsumersMap::iterator it = consumers_.find(change.consumer_id()); - if (it != consumers_.end()) { - ConsumerImplPtr consumer = it->second.lock(); - - if (consumer) { - lock.unlock(); - consumer->activeConsumerChanged(change.is_active()); - } else { - consumers_.erase(change.consumer_id()); - LOG_DEBUG(cnxString_ << "Ignoring incoming message for already destroyed consumer " - << change.consumer_id()); - } - } else { - LOG_DEBUG(cnxString_ << "Got invalid consumer Id in " << change.consumer_id() - << " -- isActive: " << change.is_active()); - } -} - -void ClientConnection::handleIncomingMessage(const proto::CommandMessage& msg, bool isChecksumValid, - proto::MessageMetadata& msgMetadata, SharedBuffer& payload) { - LOG_DEBUG(cnxString_ << "Received a message from the server for consumer: " << msg.consumer_id()); - - Lock lock(mutex_); - ConsumersMap::iterator it = consumers_.find(msg.consumer_id()); - if (it != consumers_.end()) { - ConsumerImplPtr consumer = it->second.lock(); - - if (consumer) { - // Unlock the mutex before notifying the consumer of the - // new received message - lock.unlock(); - consumer->messageReceived(shared_from_this(), msg, isChecksumValid, msgMetadata, payload); - } else { - consumers_.erase(msg.consumer_id()); - LOG_DEBUG(cnxString_ << "Ignoring incoming message for already destroyed consumer " - << msg.consumer_id()); - } - } else { - LOG_DEBUG(cnxString_ << "Got invalid consumer Id in " // - << msg.consumer_id() << " -- msg: " << msgMetadata.sequence_id()); - } -} - -void ClientConnection::handleIncomingCommand() { - LOG_DEBUG(cnxString_ << "Handling incoming command: " << Commands::messageType(incomingCmd_.type())); - - switch (state_) { - case Pending: { - LOG_ERROR(cnxString_ << "Connection is not ready yet"); - break; - } - - case TcpConnected: { - // Handle Pulsar Connected - if (incomingCmd_.type() != BaseCommand::CONNECTED) { - // Wrong cmd - close(); - } else { - handlePulsarConnected(incomingCmd_.connected()); - } - break; - } - - case Disconnected: { - LOG_ERROR(cnxString_ << "Connection already disconnected"); - break; - } - - case Ready: { - // Since we are receiving data from the connection, we are assuming that for now the connection is - // still working well. - havePendingPingRequest_ = false; - - // Handle normal commands - switch (incomingCmd_.type()) { - case BaseCommand::SEND_RECEIPT: { - const CommandSendReceipt& sendReceipt = incomingCmd_.send_receipt(); - int producerId = sendReceipt.producer_id(); - uint64_t sequenceId = sendReceipt.sequence_id(); - const proto::MessageIdData& messageIdData = sendReceipt.message_id(); - MessageId messageId = MessageId(messageIdData.partition(), messageIdData.ledgerid(), - messageIdData.entryid(), messageIdData.batch_index()); - - LOG_DEBUG(cnxString_ << "Got receipt for producer: " << producerId - << " -- msg: " << sequenceId << "-- message id: " << messageId); - - Lock lock(mutex_); - ProducersMap::iterator it = producers_.find(producerId); - if (it != producers_.end()) { - ProducerImplPtr producer = it->second.lock(); - lock.unlock(); - - if (producer) { - if (!producer->ackReceived(sequenceId, messageId)) { - // If the producer fails to process the ack, we need to close the connection - // to give it a chance to recover from there - close(); - } - } - } else { - LOG_ERROR(cnxString_ << "Got invalid producer Id in SendReceipt: " // - << producerId << " -- msg: " << sequenceId); - } - - break; - } - - case BaseCommand::SEND_ERROR: { - const CommandSendError& error = incomingCmd_.send_error(); - LOG_WARN(cnxString_ << "Received send error from server: " << error.message()); - if (ChecksumError == error.error()) { - long producerId = error.producer_id(); - long sequenceId = error.sequence_id(); - Lock lock(mutex_); - ProducersMap::iterator it = producers_.find(producerId); - if (it != producers_.end()) { - ProducerImplPtr producer = it->second.lock(); - lock.unlock(); - - if (producer) { - if (!producer->removeCorruptMessage(sequenceId)) { - // If the producer fails to remove corrupt msg, we need to close the - // connection to give it a chance to recover from there - close(); - } - } - } - } else { - close(); - } - break; - } - - case BaseCommand::SUCCESS: { - const CommandSuccess& success = incomingCmd_.success(); - LOG_DEBUG(cnxString_ << "Received success response from server. req_id: " - << success.request_id()); - - Lock lock(mutex_); - PendingRequestsMap::iterator it = pendingRequests_.find(success.request_id()); - if (it != pendingRequests_.end()) { - PendingRequestData requestData = it->second; - pendingRequests_.erase(it); - lock.unlock(); - - requestData.promise.setValue({}); - requestData.timer->cancel(); - } - break; - } - - case BaseCommand::PARTITIONED_METADATA_RESPONSE: { - const CommandPartitionedTopicMetadataResponse& partitionMetadataResponse = - incomingCmd_.partitionmetadataresponse(); - LOG_DEBUG(cnxString_ << "Received partition-metadata response from server. req_id: " - << partitionMetadataResponse.request_id()); - - Lock lock(mutex_); - PendingLookupRequestsMap::iterator it = - pendingLookupRequests_.find(partitionMetadataResponse.request_id()); - if (it != pendingLookupRequests_.end()) { - it->second.timer->cancel(); - LookupDataResultPromisePtr lookupDataPromise = it->second.promise; - pendingLookupRequests_.erase(it); - numOfPendingLookupRequest_--; - lock.unlock(); - - if (!partitionMetadataResponse.has_response() || - (partitionMetadataResponse.response() == - CommandPartitionedTopicMetadataResponse::Failed)) { - if (partitionMetadataResponse.has_error()) { - LOG_ERROR(cnxString_ << "Failed partition-metadata lookup req_id: " - << partitionMetadataResponse.request_id() - << " error: " << partitionMetadataResponse.error() - << " msg: " << partitionMetadataResponse.message()); - checkServerError(partitionMetadataResponse.error()); - lookupDataPromise->setFailed(getResult(partitionMetadataResponse.error(), - partitionMetadataResponse.message())); - } else { - LOG_ERROR(cnxString_ << "Failed partition-metadata lookup req_id: " - << partitionMetadataResponse.request_id() - << " with empty response: "); - lookupDataPromise->setFailed(ResultConnectError); - } - } else { - LookupDataResultPtr lookupResultPtr = std::make_shared(); - lookupResultPtr->setPartitions(partitionMetadataResponse.partitions()); - lookupDataPromise->setValue(lookupResultPtr); - } - - } else { - LOG_WARN("Received unknown request id from server: " - << partitionMetadataResponse.request_id()); - } - break; - } - - case BaseCommand::CONSUMER_STATS_RESPONSE: { - const CommandConsumerStatsResponse& consumerStatsResponse = - incomingCmd_.consumerstatsresponse(); - LOG_DEBUG(cnxString_ << "ConsumerStatsResponse command - Received consumer stats " - "response from server. req_id: " - << consumerStatsResponse.request_id()); - Lock lock(mutex_); - PendingConsumerStatsMap::iterator it = - pendingConsumerStatsMap_.find(consumerStatsResponse.request_id()); - if (it != pendingConsumerStatsMap_.end()) { - Promise consumerStatsPromise = it->second; - pendingConsumerStatsMap_.erase(it); - lock.unlock(); - - if (consumerStatsResponse.has_error_code()) { - if (consumerStatsResponse.has_error_message()) { - LOG_ERROR(cnxString_ << " Failed to get consumer stats - " - << consumerStatsResponse.error_message()); - } - consumerStatsPromise.setFailed(getResult(consumerStatsResponse.error_code(), - consumerStatsResponse.error_message())); - } else { - LOG_DEBUG(cnxString_ << "ConsumerStatsResponse command - Received consumer stats " - "response from server. req_id: " - << consumerStatsResponse.request_id() << " Stats: "); - BrokerConsumerStatsImpl brokerStats( - consumerStatsResponse.msgrateout(), consumerStatsResponse.msgthroughputout(), - consumerStatsResponse.msgrateredeliver(), - consumerStatsResponse.consumername(), - consumerStatsResponse.availablepermits(), - consumerStatsResponse.unackedmessages(), - consumerStatsResponse.blockedconsumeronunackedmsgs(), - consumerStatsResponse.address(), consumerStatsResponse.connectedsince(), - consumerStatsResponse.type(), consumerStatsResponse.msgrateexpired(), - consumerStatsResponse.msgbacklog()); - consumerStatsPromise.setValue(brokerStats); - } - } else { - LOG_WARN("ConsumerStatsResponse command - Received unknown request id from server: " - << consumerStatsResponse.request_id()); - } - break; - } - - case BaseCommand::LOOKUP_RESPONSE: { - const CommandLookupTopicResponse& lookupTopicResponse = - incomingCmd_.lookuptopicresponse(); - LOG_DEBUG(cnxString_ << "Received lookup response from server. req_id: " - << lookupTopicResponse.request_id()); - - Lock lock(mutex_); - PendingLookupRequestsMap::iterator it = - pendingLookupRequests_.find(lookupTopicResponse.request_id()); - if (it != pendingLookupRequests_.end()) { - it->second.timer->cancel(); - LookupDataResultPromisePtr lookupDataPromise = it->second.promise; - pendingLookupRequests_.erase(it); - numOfPendingLookupRequest_--; - lock.unlock(); - - if (!lookupTopicResponse.has_response() || - (lookupTopicResponse.response() == CommandLookupTopicResponse::Failed)) { - if (lookupTopicResponse.has_error()) { - LOG_ERROR(cnxString_ - << "Failed lookup req_id: " << lookupTopicResponse.request_id() - << " error: " << lookupTopicResponse.error() - << " msg: " << lookupTopicResponse.message()); - checkServerError(lookupTopicResponse.error()); - lookupDataPromise->setFailed( - getResult(lookupTopicResponse.error(), lookupTopicResponse.message())); - } else { - LOG_ERROR(cnxString_ - << "Failed lookup req_id: " << lookupTopicResponse.request_id() - << " with empty response: "); - lookupDataPromise->setFailed(ResultConnectError); - } - } else { - LOG_DEBUG(cnxString_ - << "Received lookup response from server. req_id: " - << lookupTopicResponse.request_id() // - << " -- broker-url: " << lookupTopicResponse.brokerserviceurl() - << " -- broker-tls-url: " // - << lookupTopicResponse.brokerserviceurltls() - << " authoritative: " << lookupTopicResponse.authoritative() // - << " redirect: " << lookupTopicResponse.response()); - LookupDataResultPtr lookupResultPtr = std::make_shared(); - - if (tlsSocket_) { - lookupResultPtr->setBrokerUrl(lookupTopicResponse.brokerserviceurltls()); - } else { - lookupResultPtr->setBrokerUrl(lookupTopicResponse.brokerserviceurl()); - } - - lookupResultPtr->setBrokerUrlTls(lookupTopicResponse.brokerserviceurltls()); - lookupResultPtr->setAuthoritative(lookupTopicResponse.authoritative()); - lookupResultPtr->setRedirect(lookupTopicResponse.response() == - CommandLookupTopicResponse::Redirect); - lookupResultPtr->setShouldProxyThroughServiceUrl( - lookupTopicResponse.proxy_through_service_url()); - lookupDataPromise->setValue(lookupResultPtr); - } - - } else { - LOG_WARN( - "Received unknown request id from server: " << lookupTopicResponse.request_id()); - } - break; - } - - case BaseCommand::PRODUCER_SUCCESS: { - const CommandProducerSuccess& producerSuccess = incomingCmd_.producer_success(); - LOG_DEBUG(cnxString_ << "Received success producer response from server. req_id: " - << producerSuccess.request_id() // - << " -- producer name: " << producerSuccess.producer_name()); - - Lock lock(mutex_); - PendingRequestsMap::iterator it = pendingRequests_.find(producerSuccess.request_id()); - if (it != pendingRequests_.end()) { - PendingRequestData requestData = it->second; - pendingRequests_.erase(it); - lock.unlock(); - - ResponseData data; - data.producerName = producerSuccess.producer_name(); - data.lastSequenceId = producerSuccess.last_sequence_id(); - if (producerSuccess.has_schema_version()) { - data.schemaVersion = producerSuccess.schema_version(); - } - if (producerSuccess.has_topic_epoch()) { - data.topicEpoch = Optional::of(producerSuccess.topic_epoch()); - } else { - data.topicEpoch = Optional::empty(); - } - requestData.promise.setValue(data); - requestData.timer->cancel(); - } - break; - } - - case BaseCommand::ERROR: { - const CommandError& error = incomingCmd_.error(); - Result result = getResult(error.error(), error.message()); - LOG_WARN(cnxString_ << "Received error response from server: " << result - << (error.has_message() ? (" (" + error.message() + ")") : "") - << " -- req_id: " << error.request_id()); - - Lock lock(mutex_); - - PendingRequestsMap::iterator it = pendingRequests_.find(error.request_id()); - if (it != pendingRequests_.end()) { - PendingRequestData requestData = it->second; - pendingRequests_.erase(it); - lock.unlock(); - - requestData.promise.setFailed(result); - requestData.timer->cancel(); - } else { - PendingGetLastMessageIdRequestsMap::iterator it = - pendingGetLastMessageIdRequests_.find(error.request_id()); - if (it != pendingGetLastMessageIdRequests_.end()) { - auto getLastMessageIdPromise = it->second; - pendingGetLastMessageIdRequests_.erase(it); - lock.unlock(); - - getLastMessageIdPromise.setFailed(result); - } else { - PendingGetNamespaceTopicsMap::iterator it = - pendingGetNamespaceTopicsRequests_.find(error.request_id()); - if (it != pendingGetNamespaceTopicsRequests_.end()) { - Promise getNamespaceTopicsPromise = it->second; - pendingGetNamespaceTopicsRequests_.erase(it); - lock.unlock(); - - getNamespaceTopicsPromise.setFailed(result); - } else { - lock.unlock(); - } - } - } - break; - } - - case BaseCommand::CLOSE_PRODUCER: { - const CommandCloseProducer& closeProducer = incomingCmd_.close_producer(); - int producerId = closeProducer.producer_id(); - - LOG_DEBUG("Broker notification of Closed producer: " << producerId); - - Lock lock(mutex_); - ProducersMap::iterator it = producers_.find(producerId); - if (it != producers_.end()) { - ProducerImplPtr producer = it->second.lock(); - producers_.erase(it); - lock.unlock(); - - if (producer) { - producer->disconnectProducer(); - } - } else { - LOG_ERROR(cnxString_ << "Got invalid producer Id in closeProducer command: " - << producerId); - } - - break; - } - - case BaseCommand::CLOSE_CONSUMER: { - const CommandCloseConsumer& closeconsumer = incomingCmd_.close_consumer(); - int consumerId = closeconsumer.consumer_id(); - - LOG_DEBUG("Broker notification of Closed consumer: " << consumerId); - - Lock lock(mutex_); - ConsumersMap::iterator it = consumers_.find(consumerId); - if (it != consumers_.end()) { - ConsumerImplPtr consumer = it->second.lock(); - consumers_.erase(it); - lock.unlock(); - - if (consumer) { - consumer->disconnectConsumer(); - } - } else { - LOG_ERROR(cnxString_ << "Got invalid consumer Id in closeConsumer command: " - << consumerId); - } - - break; - } - - case BaseCommand::PING: { - // Respond to ping request - LOG_DEBUG(cnxString_ << "Replying to ping command"); - sendCommand(Commands::newPong()); - break; - } - - case BaseCommand::PONG: { - LOG_DEBUG(cnxString_ << "Received response to ping message"); - break; - } - - case BaseCommand::AUTH_CHALLENGE: { - LOG_DEBUG(cnxString_ << "Received auth challenge from broker"); - - Result result; - SharedBuffer buffer = Commands::newAuthResponse(authentication_, result); - if (result != ResultOk) { - LOG_ERROR(cnxString_ << "Failed to send auth response: " << result); - close(result); - break; - } - asyncWrite(buffer.const_asio_buffer(), - std::bind(&ClientConnection::handleSentAuthResponse, shared_from_this(), - std::placeholders::_1, buffer)); - break; - } - - case BaseCommand::ACTIVE_CONSUMER_CHANGE: { - const CommandActiveConsumerChange& change = incomingCmd_.active_consumer_change(); - LOG_DEBUG(cnxString_ - << "Received notification about active consumer change, consumer_id: " - << change.consumer_id() << " isActive: " << change.is_active()); - handleActiveConsumerChange(change); - break; - } - - case BaseCommand::GET_LAST_MESSAGE_ID_RESPONSE: { - const CommandGetLastMessageIdResponse& getLastMessageIdResponse = - incomingCmd_.getlastmessageidresponse(); - LOG_DEBUG(cnxString_ << "Received getLastMessageIdResponse from server. req_id: " - << getLastMessageIdResponse.request_id()); - - Lock lock(mutex_); - PendingGetLastMessageIdRequestsMap::iterator it = - pendingGetLastMessageIdRequests_.find(getLastMessageIdResponse.request_id()); - - if (it != pendingGetLastMessageIdRequests_.end()) { - auto getLastMessageIdPromise = it->second; - pendingGetLastMessageIdRequests_.erase(it); - lock.unlock(); - - if (getLastMessageIdResponse.has_consumer_mark_delete_position()) { - getLastMessageIdPromise.setValue( - {toMessageId(getLastMessageIdResponse.last_message_id()), - toMessageId(getLastMessageIdResponse.consumer_mark_delete_position())}); - } else { - getLastMessageIdPromise.setValue( - {toMessageId(getLastMessageIdResponse.last_message_id())}); - } - } else { - lock.unlock(); - LOG_WARN( - "getLastMessageIdResponse command - Received unknown request id from server: " - << getLastMessageIdResponse.request_id()); - } - break; - } - - case BaseCommand::GET_TOPICS_OF_NAMESPACE_RESPONSE: { - const CommandGetTopicsOfNamespaceResponse& response = - incomingCmd_.gettopicsofnamespaceresponse(); - - LOG_DEBUG(cnxString_ << "Received GetTopicsOfNamespaceResponse from server. req_id: " - << response.request_id() << " topicsSize" << response.topics_size()); - - Lock lock(mutex_); - PendingGetNamespaceTopicsMap::iterator it = - pendingGetNamespaceTopicsRequests_.find(response.request_id()); - - if (it != pendingGetNamespaceTopicsRequests_.end()) { - Promise getTopicsPromise = it->second; - pendingGetNamespaceTopicsRequests_.erase(it); - lock.unlock(); - - int numTopics = response.topics_size(); - std::set topicSet; - // get all topics - for (int i = 0; i < numTopics; i++) { - // remove partition part - const std::string& topicName = response.topics(i); - int pos = topicName.find("-partition-"); - std::string filteredName = topicName.substr(0, pos); - - // filter duped topic name - if (topicSet.find(filteredName) == topicSet.end()) { - topicSet.insert(filteredName); - } - } - - NamespaceTopicsPtr topicsPtr = - std::make_shared>(topicSet.begin(), topicSet.end()); - - getTopicsPromise.setValue(topicsPtr); - } else { - lock.unlock(); - LOG_WARN( - "GetTopicsOfNamespaceResponse command - Received unknown request id from " - "server: " - << response.request_id()); - } - break; - } - - default: { - LOG_WARN(cnxString_ << "Received invalid message from server"); - close(); - break; - } - } - } - } -} - -Future ClientConnection::newConsumerStats(uint64_t consumerId, - uint64_t requestId) { - Lock lock(mutex_); - Promise promise; - if (isClosed()) { - lock.unlock(); - LOG_ERROR(cnxString_ << " Client is not connected to the broker"); - promise.setFailed(ResultNotConnected); - } - pendingConsumerStatsMap_.insert(std::make_pair(requestId, promise)); - lock.unlock(); - sendCommand(Commands::newConsumerStats(consumerId, requestId)); - return promise.getFuture(); -} - -void ClientConnection::newTopicLookup(const std::string& topicName, bool authoritative, - const std::string& listenerName, const uint64_t requestId, - LookupDataResultPromisePtr promise) { - newLookup(Commands::newLookup(topicName, authoritative, requestId, listenerName), requestId, promise); -} - -void ClientConnection::newPartitionedMetadataLookup(const std::string& topicName, const uint64_t requestId, - LookupDataResultPromisePtr promise) { - newLookup(Commands::newPartitionMetadataRequest(topicName, requestId), requestId, promise); -} - -void ClientConnection::newLookup(const SharedBuffer& cmd, const uint64_t requestId, - LookupDataResultPromisePtr promise) { - Lock lock(mutex_); - std::shared_ptr lookupDataResult; - lookupDataResult = std::make_shared(); - if (isClosed()) { - lock.unlock(); - promise->setFailed(ResultNotConnected); - return; - } else if (numOfPendingLookupRequest_ >= maxPendingLookupRequest_) { - lock.unlock(); - promise->setFailed(ResultTooManyLookupRequestException); - return; - } - LookupRequestData requestData; - requestData.promise = promise; - requestData.timer = executor_->createDeadlineTimer(); - requestData.timer->expires_from_now(operationsTimeout_); - requestData.timer->async_wait(std::bind(&ClientConnection::handleLookupTimeout, shared_from_this(), - std::placeholders::_1, requestData)); - - pendingLookupRequests_.insert(std::make_pair(requestId, requestData)); - numOfPendingLookupRequest_++; - lock.unlock(); - sendCommand(cmd); -} - -void ClientConnection::sendCommand(const SharedBuffer& cmd) { - Lock lock(mutex_); - - if (pendingWriteOperations_++ == 0) { - // Write immediately to socket - if (tlsSocket_) { -#if BOOST_VERSION >= 106600 - boost::asio::post(strand_, - std::bind(&ClientConnection::sendCommandInternal, shared_from_this(), cmd)); -#else - strand_.post(std::bind(&ClientConnection::sendCommandInternal, shared_from_this(), cmd)); -#endif - } else { - sendCommandInternal(cmd); - } - } else { - // Queue to send later - pendingWriteBuffers_.push_back(cmd); - } -} - -void ClientConnection::sendCommandInternal(const SharedBuffer& cmd) { - asyncWrite(cmd.const_asio_buffer(), - customAllocWriteHandler( - std::bind(&ClientConnection::handleSend, shared_from_this(), std::placeholders::_1, cmd))); -} - -void ClientConnection::sendMessage(const OpSendMsg& opSend) { - Lock lock(mutex_); - - if (pendingWriteOperations_++ == 0) { - // Write immediately to socket - if (tlsSocket_) { -#if BOOST_VERSION >= 106600 - boost::asio::post(strand_, - std::bind(&ClientConnection::sendMessageInternal, shared_from_this(), opSend)); -#else - strand_.post(std::bind(&ClientConnection::sendMessageInternal, shared_from_this(), opSend)); -#endif - } else { - sendMessageInternal(opSend); - } - } else { - // Queue to send later - pendingWriteBuffers_.push_back(opSend); - } -} - -void ClientConnection::sendMessageInternal(const OpSendMsg& opSend) { - PairSharedBuffer buffer = - Commands::newSend(outgoingBuffer_, outgoingCmd_, opSend.producerId_, opSend.sequenceId_, - getChecksumType(), opSend.metadata_, opSend.payload_); - - asyncWrite(buffer, customAllocWriteHandler(std::bind(&ClientConnection::handleSendPair, - shared_from_this(), std::placeholders::_1))); -} - -void ClientConnection::handleSend(const boost::system::error_code& err, const SharedBuffer&) { - if (err) { - LOG_WARN(cnxString_ << "Could not send message on connection: " << err << " " << err.message()); - close(); - } else { - sendPendingCommands(); - } -} - -void ClientConnection::handleSendPair(const boost::system::error_code& err) { - if (err) { - LOG_WARN(cnxString_ << "Could not send pair message on connection: " << err << " " << err.message()); - close(); - } else { - sendPendingCommands(); - } -} - -void ClientConnection::sendPendingCommands() { - Lock lock(mutex_); - - if (--pendingWriteOperations_ > 0) { - assert(!pendingWriteBuffers_.empty()); - boost::any any = pendingWriteBuffers_.front(); - pendingWriteBuffers_.pop_front(); - - if (any.type() == typeid(SharedBuffer)) { - SharedBuffer buffer = boost::any_cast(any); - asyncWrite(buffer.const_asio_buffer(), - customAllocWriteHandler(std::bind(&ClientConnection::handleSend, shared_from_this(), - std::placeholders::_1, buffer))); - } else { - assert(any.type() == typeid(OpSendMsg)); - - const OpSendMsg& op = boost::any_cast(any); - PairSharedBuffer buffer = - Commands::newSend(outgoingBuffer_, outgoingCmd_, op.producerId_, op.sequenceId_, - getChecksumType(), op.metadata_, op.payload_); - - asyncWrite(buffer, customAllocWriteHandler(std::bind(&ClientConnection::handleSendPair, - shared_from_this(), std::placeholders::_1))); - } - } else { - // No more pending writes - outgoingBuffer_.reset(); - } -} - -Future ClientConnection::sendRequestWithId(SharedBuffer cmd, int requestId) { - Lock lock(mutex_); - - if (isClosed()) { - lock.unlock(); - Promise promise; - promise.setFailed(ResultNotConnected); - return promise.getFuture(); - } - - PendingRequestData requestData; - requestData.timer = executor_->createDeadlineTimer(); - requestData.timer->expires_from_now(operationsTimeout_); - requestData.timer->async_wait(std::bind(&ClientConnection::handleRequestTimeout, shared_from_this(), - std::placeholders::_1, requestData)); - - pendingRequests_.insert(std::make_pair(requestId, requestData)); - lock.unlock(); - - sendCommand(cmd); - return requestData.promise.getFuture(); -} - -void ClientConnection::handleRequestTimeout(const boost::system::error_code& ec, - PendingRequestData pendingRequestData) { - if (!ec) { - pendingRequestData.promise.setFailed(ResultTimeout); - } -} - -void ClientConnection::handleLookupTimeout(const boost::system::error_code& ec, - LookupRequestData pendingRequestData) { - if (!ec) { - pendingRequestData.promise->setFailed(ResultTimeout); - } -} - -void ClientConnection::handleKeepAliveTimeout() { - if (isClosed()) { - return; - } - - if (havePendingPingRequest_) { - LOG_WARN(cnxString_ << "Forcing connection to close after keep-alive timeout"); - close(); - } else { - // Send keep alive probe to peer - LOG_DEBUG(cnxString_ << "Sending ping message"); - havePendingPingRequest_ = true; - sendCommand(Commands::newPing()); - - // If the close operation has already called the keepAliveTimer_.reset() then the use_count will be - // zero And we do not attempt to dereference the pointer. - Lock lock(mutex_); - if (keepAliveTimer_) { - keepAliveTimer_->expires_from_now(boost::posix_time::seconds(KeepAliveIntervalInSeconds)); - keepAliveTimer_->async_wait( - std::bind(&ClientConnection::handleKeepAliveTimeout, shared_from_this())); - } - lock.unlock(); - } -} - -void ClientConnection::handleConsumerStatsTimeout(const boost::system::error_code& ec, - std::vector consumerStatsRequests) { - if (ec) { - LOG_DEBUG(cnxString_ << " Ignoring timer cancelled event, code[" << ec << "]"); - return; - } - startConsumerStatsTimer(consumerStatsRequests); -} - -void ClientConnection::close(Result result) { - Lock lock(mutex_); - if (isClosed()) { - return; - } - state_ = Disconnected; - - closeSocket(); - if (tlsSocket_) { - boost::system::error_code err; - tlsSocket_->lowest_layer().close(err); - if (err) { - LOG_WARN(cnxString_ << "Failed to close TLS socket: " << err.message()); - } - } - - if (executor_) { - executor_.reset(); - } - - // Move the internal fields to process them after `mutex_` was unlocked - auto consumers = std::move(consumers_); - auto producers = std::move(producers_); - auto pendingRequests = std::move(pendingRequests_); - auto pendingLookupRequests = std::move(pendingLookupRequests_); - auto pendingConsumerStatsMap = std::move(pendingConsumerStatsMap_); - auto pendingGetLastMessageIdRequests = std::move(pendingGetLastMessageIdRequests_); - auto pendingGetNamespaceTopicsRequests = std::move(pendingGetNamespaceTopicsRequests_); - - numOfPendingLookupRequest_ = 0; - - if (keepAliveTimer_) { - keepAliveTimer_->cancel(); - keepAliveTimer_.reset(); - } - - if (consumerStatsRequestTimer_) { - consumerStatsRequestTimer_->cancel(); - consumerStatsRequestTimer_.reset(); - } - - if (connectTimeoutTask_) { - connectTimeoutTask_->stop(); - } - - lock.unlock(); - LOG_INFO(cnxString_ << "Connection closed with " << result); - - for (ProducersMap::iterator it = producers.begin(); it != producers.end(); ++it) { - HandlerBase::handleDisconnection(result, shared_from_this(), it->second); - } - - for (ConsumersMap::iterator it = consumers.begin(); it != consumers.end(); ++it) { - HandlerBase::handleDisconnection(result, shared_from_this(), it->second); - } - - connectPromise_.setFailed(result); - - // Fail all pending requests, all these type are map whose value type contains the Promise object - for (auto& kv : pendingRequests) { - kv.second.promise.setFailed(result); - } - for (auto& kv : pendingLookupRequests) { - kv.second.promise->setFailed(result); - } - for (auto& kv : pendingConsumerStatsMap) { - LOG_ERROR(cnxString_ << " Closing Client Connection, please try again later"); - kv.second.setFailed(result); - } - for (auto& kv : pendingGetLastMessageIdRequests) { - kv.second.setFailed(result); - } - for (auto& kv : pendingGetNamespaceTopicsRequests) { - kv.second.setFailed(result); - } -} - -bool ClientConnection::isClosed() const { return state_ == Disconnected; } - -Future ClientConnection::getConnectFuture() { - return connectPromise_.getFuture(); -} - -void ClientConnection::registerProducer(int producerId, ProducerImplPtr producer) { - Lock lock(mutex_); - producers_.insert(std::make_pair(producerId, producer)); -} - -void ClientConnection::registerConsumer(int consumerId, ConsumerImplPtr consumer) { - Lock lock(mutex_); - consumers_.insert(std::make_pair(consumerId, consumer)); -} - -void ClientConnection::removeProducer(int producerId) { - Lock lock(mutex_); - producers_.erase(producerId); -} - -void ClientConnection::removeConsumer(int consumerId) { - Lock lock(mutex_); - consumers_.erase(consumerId); -} - -const std::string& ClientConnection::brokerAddress() const { return physicalAddress_; } - -const std::string& ClientConnection::cnxString() const { return cnxString_; } - -int ClientConnection::getServerProtocolVersion() const { return serverProtocolVersion_; } - -int32_t ClientConnection::getMaxMessageSize() { return maxMessageSize_.load(std::memory_order_acquire); } - -Commands::ChecksumType ClientConnection::getChecksumType() const { - return getServerProtocolVersion() >= proto::v6 ? Commands::Crc32c : Commands::None; -} - -Future ClientConnection::newGetLastMessageId(uint64_t consumerId, - uint64_t requestId) { - Lock lock(mutex_); - Promise promise; - if (isClosed()) { - lock.unlock(); - LOG_ERROR(cnxString_ << " Client is not connected to the broker"); - promise.setFailed(ResultNotConnected); - return promise.getFuture(); - } - - pendingGetLastMessageIdRequests_.insert(std::make_pair(requestId, promise)); - lock.unlock(); - sendRequestWithId(Commands::newGetLastMessageId(consumerId, requestId), requestId) - .addListener([promise](Result result, const ResponseData& data) { - if (result != ResultOk) { - promise.setFailed(result); - } - }); - return promise.getFuture(); -} - -Future ClientConnection::newGetTopicsOfNamespace(const std::string& nsName, - uint64_t requestId) { - Lock lock(mutex_); - Promise promise; - if (isClosed()) { - lock.unlock(); - LOG_ERROR(cnxString_ << "Client is not connected to the broker"); - promise.setFailed(ResultNotConnected); - return promise.getFuture(); - } - - pendingGetNamespaceTopicsRequests_.insert(std::make_pair(requestId, promise)); - lock.unlock(); - sendCommand(Commands::newGetTopicsOfNamespace(nsName, requestId)); - return promise.getFuture(); -} - -void ClientConnection::closeSocket() { - boost::system::error_code err; - if (socket_) { - socket_->close(err); - if (err) { - LOG_WARN(cnxString_ << "Failed to close socket: " << err.message()); - } - } -} - -void ClientConnection::checkServerError(const proto::ServerError& error) { - switch (error) { - case proto::ServerError::ServiceNotReady: - closeSocket(); - break; - case proto::ServerError::TooManyRequests: - // TODO: Implement maxNumberOfRejectedRequestPerConnection like - // https://github.com/apache/pulsar/pull/274 - closeSocket(); - break; - default: - break; - } -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ClientConnection.h b/pulsar-client-cpp/lib/ClientConnection.h deleted file mode 100644 index 418a58313975c..0000000000000 --- a/pulsar-client-cpp/lib/ClientConnection.h +++ /dev/null @@ -1,350 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _PULSAR_CLIENT_CONNECTION_HEADER_ -#define _PULSAR_CLIENT_CONNECTION_HEADER_ - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ExecutorService.h" -#include "Future.h" -#include "PulsarApi.pb.h" -#include -#include "SharedBuffer.h" -#include "Backoff.h" -#include "Commands.h" -#include "LookupDataResult.h" -#include "UtilAllocator.h" -#include -#include -#include -#include "lib/PeriodicTask.h" -#include "lib/GetLastMessageIdResponse.h" - -using namespace pulsar; - -namespace pulsar { - -class PulsarFriend; - -class ExecutorService; - -class ClientConnection; -typedef std::shared_ptr ClientConnectionPtr; -typedef std::weak_ptr ClientConnectionWeakPtr; - -class ProducerImpl; -typedef std::shared_ptr ProducerImplPtr; -typedef std::weak_ptr ProducerImplWeakPtr; - -class ConsumerImpl; -typedef std::shared_ptr ConsumerImplPtr; -typedef std::weak_ptr ConsumerImplWeakPtr; - -class LookupDataResult; - -struct OpSendMsg; - -// Data returned on the request operation. Mostly used on create-producer command -struct ResponseData { - std::string producerName; - int64_t lastSequenceId; - std::string schemaVersion; - Optional topicEpoch; -}; - -typedef std::shared_ptr> NamespaceTopicsPtr; - -class PULSAR_PUBLIC ClientConnection : public std::enable_shared_from_this { - enum State - { - Pending, - TcpConnected, - Ready, - Disconnected - }; - - public: - typedef std::shared_ptr SocketPtr; - typedef std::shared_ptr> TlsSocketPtr; - typedef std::shared_ptr ConnectionPtr; - typedef std::function ConnectionListener; - typedef std::vector::iterator ListenerIterator; - - /* - * logicalAddress - url of the service, for ex. pulsar://localhost:6650 - * physicalAddress - the address to connect to, it could be different from the logical address if proxy - * comes into play connected - set when tcp connection is established - * - */ - ClientConnection(const std::string& logicalAddress, const std::string& physicalAddress, - ExecutorServicePtr executor, const ClientConfiguration& clientConfiguration, - const AuthenticationPtr& authentication); - ~ClientConnection(); - - /* - * starts tcp connect_async - * @return future which is not yet set - */ - void tcpConnectAsync(); - - void close(Result result = ResultConnectError); - - bool isClosed() const; - - Future getConnectFuture(); - - Future getCloseFuture(); - - void newTopicLookup(const std::string& topicName, bool authoritative, const std::string& listenerName, - const uint64_t requestId, LookupDataResultPromisePtr promise); - - void newPartitionedMetadataLookup(const std::string& topicName, const uint64_t requestId, - LookupDataResultPromisePtr promise); - - void sendCommand(const SharedBuffer& cmd); - void sendCommandInternal(const SharedBuffer& cmd); - void sendMessage(const OpSendMsg& opSend); - void sendMessageInternal(const OpSendMsg& opSend); - - void registerProducer(int producerId, ProducerImplPtr producer); - void registerConsumer(int consumerId, ConsumerImplPtr consumer); - - void removeProducer(int producerId); - void removeConsumer(int consumerId); - - /** - * Send a request with a specific Id over the connection. The future will be - * triggered when the response for this request is received - */ - Future sendRequestWithId(SharedBuffer cmd, int requestId); - - const std::string& brokerAddress() const; - - const std::string& cnxString() const; - - int getServerProtocolVersion() const; - - static int32_t getMaxMessageSize(); - - Commands::ChecksumType getChecksumType() const; - - Future newConsumerStats(uint64_t consumerId, uint64_t requestId); - - Future newGetLastMessageId(uint64_t consumerId, uint64_t requestId); - - Future newGetTopicsOfNamespace(const std::string& nsName, uint64_t requestId); - - private: - struct PendingRequestData { - Promise promise; - DeadlineTimerPtr timer; - }; - - struct LookupRequestData { - LookupDataResultPromisePtr promise; - DeadlineTimerPtr timer; - }; - - /* - * handler for connectAsync - * creates a ConnectionPtr which has a valid ClientConnection object - * although not usable at this point, since this is just tcp connection - * Pulsar - Connect/Connected has yet to happen - */ - void handleTcpConnected(const boost::system::error_code& err, - boost::asio::ip::tcp::resolver::iterator endpointIterator); - - void handleHandshake(const boost::system::error_code& err); - - void handleSentPulsarConnect(const boost::system::error_code& err, const SharedBuffer& buffer); - void handleSentAuthResponse(const boost::system::error_code& err, const SharedBuffer& buffer); - - void readNextCommand(); - - void handleRead(const boost::system::error_code& err, size_t bytesTransferred, uint32_t minReadSize); - - void processIncomingBuffer(); - bool verifyChecksum(SharedBuffer& incomingBuffer_, uint32_t& remainingBytes, - proto::BaseCommand& incomingCmd_); - - void handleActiveConsumerChange(const proto::CommandActiveConsumerChange& change); - void handleIncomingCommand(); - void handleIncomingMessage(const proto::CommandMessage& msg, bool isChecksumValid, - proto::MessageMetadata& msgMetadata, SharedBuffer& payload); - - void handlePulsarConnected(const proto::CommandConnected& cmdConnected); - - void handleResolve(const boost::system::error_code& err, - boost::asio::ip::tcp::resolver::iterator endpointIterator); - - void handleSend(const boost::system::error_code& err, const SharedBuffer& cmd); - void handleSendPair(const boost::system::error_code& err); - void sendPendingCommands(); - void newLookup(const SharedBuffer& cmd, const uint64_t requestId, LookupDataResultPromisePtr promise); - - void handleRequestTimeout(const boost::system::error_code& ec, PendingRequestData pendingRequestData); - - void handleLookupTimeout(const boost::system::error_code&, LookupRequestData); - - void handleKeepAliveTimeout(); - - template - inline AllocHandler customAllocReadHandler(Handler h) { - return AllocHandler(readHandlerAllocator_, h); - } - - template - inline AllocHandler customAllocWriteHandler(Handler h) { - return AllocHandler(writeHandlerAllocator_, h); - } - - template - inline void asyncWrite(const ConstBufferSequence& buffers, WriteHandler handler) { - if (tlsSocket_) { -#if BOOST_VERSION >= 106600 - boost::asio::async_write(*tlsSocket_, buffers, boost::asio::bind_executor(strand_, handler)); -#else - boost::asio::async_write(*tlsSocket_, buffers, strand_.wrap(handler)); -#endif - } else { - boost::asio::async_write(*socket_, buffers, handler); - } - } - - template - inline void asyncReceive(const MutableBufferSequence& buffers, ReadHandler handler) { - if (tlsSocket_) { -#if BOOST_VERSION >= 106600 - tlsSocket_->async_read_some(buffers, boost::asio::bind_executor(strand_, handler)); -#else - tlsSocket_->async_read_some(buffers, strand_.wrap(handler)); -#endif - } else { - socket_->async_receive(buffers, handler); - } - } - - State state_ = Pending; - TimeDuration operationsTimeout_; - AuthenticationPtr authentication_; - int serverProtocolVersion_; - static std::atomic maxMessageSize_; - - ExecutorServicePtr executor_; - - TcpResolverPtr resolver_; - - /* - * tcp connection socket to the pulsar broker - */ - SocketPtr socket_; - TlsSocketPtr tlsSocket_; -#if BOOST_VERSION >= 106600 - boost::asio::strand strand_; -#else - boost::asio::io_service::strand strand_; -#endif - - const std::string logicalAddress_; - /* - * stores address of the service, for ex. pulsar://localhost:6650 - */ - const std::string physicalAddress_; - - // Represent both endpoint of the tcp connection. eg: [client:1234 -> server:6650] - std::string cnxString_; - - /* - * indicates if async connection establishment failed - */ - boost::system::error_code error_; - - SharedBuffer incomingBuffer_; - proto::BaseCommand incomingCmd_; - - Promise connectPromise_; - std::shared_ptr connectTimeoutTask_; - - typedef std::map PendingRequestsMap; - PendingRequestsMap pendingRequests_; - - typedef std::map PendingLookupRequestsMap; - PendingLookupRequestsMap pendingLookupRequests_; - - typedef std::map ProducersMap; - ProducersMap producers_; - - typedef std::map ConsumersMap; - ConsumersMap consumers_; - - typedef std::map> PendingConsumerStatsMap; - PendingConsumerStatsMap pendingConsumerStatsMap_; - - typedef std::map> PendingGetLastMessageIdRequestsMap; - PendingGetLastMessageIdRequestsMap pendingGetLastMessageIdRequests_; - - typedef std::map> PendingGetNamespaceTopicsMap; - PendingGetNamespaceTopicsMap pendingGetNamespaceTopicsRequests_; - - std::mutex mutex_; - typedef std::unique_lock Lock; - - // Pending buffers to write on the socket - std::deque pendingWriteBuffers_; - int pendingWriteOperations_ = 0; - - SharedBuffer outgoingBuffer_; - proto::BaseCommand outgoingCmd_; - - HandlerAllocator readHandlerAllocator_; - HandlerAllocator writeHandlerAllocator_; - - // Signals whether we're waiting for a response from broker - bool havePendingPingRequest_ = false; - DeadlineTimerPtr keepAliveTimer_; - DeadlineTimerPtr consumerStatsRequestTimer_; - - void handleConsumerStatsTimeout(const boost::system::error_code& ec, - std::vector consumerStatsRequests); - - void startConsumerStatsTimer(std::vector consumerStatsRequests); - uint32_t maxPendingLookupRequest_; - uint32_t numOfPendingLookupRequest_ = 0; - friend class PulsarFriend; - - bool isTlsAllowInsecureConnection_ = false; - - void closeSocket(); - void checkServerError(const proto::ServerError& error); -}; -} // namespace pulsar - -#endif //_PULSAR_CLIENT_CONNECTION_HEADER_ diff --git a/pulsar-client-cpp/lib/ClientImpl.cc b/pulsar-client-cpp/lib/ClientImpl.cc deleted file mode 100644 index 29e92f3b815a6..0000000000000 --- a/pulsar-client-cpp/lib/ClientImpl.cc +++ /dev/null @@ -1,659 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "ClientImpl.h" -#include "ClientConfigurationImpl.h" -#include "LogUtils.h" -#include "ConsumerImpl.h" -#include "ProducerImpl.h" -#include "ReaderImpl.h" -#include "PartitionedProducerImpl.h" -#include "MultiTopicsConsumerImpl.h" -#include "PatternMultiTopicsConsumerImpl.h" -#include "TimeUtils.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef USE_LOG4CXX -#include "Log4CxxLogger.h" -#endif - -#ifdef PULSAR_USE_BOOST_REGEX -#include -#define PULSAR_REGEX_NAMESPACE boost -#else -#include -#define PULSAR_REGEX_NAMESPACE std -#endif - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -static const char hexDigits[] = {'0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; -static std::uniform_int_distribution<> hexDigitsDist(0, sizeof(hexDigits) - 1); -static std::mt19937 randomEngine = - std::mt19937(std::chrono::high_resolution_clock::now().time_since_epoch().count()); - -std::string generateRandomName() { - const int randomNameLength = 10; - - std::string randomName; - for (int i = 0; i < randomNameLength; ++i) { - randomName += hexDigits[hexDigitsDist(randomEngine)]; - } - return randomName; -} - -typedef std::unique_lock Lock; - -typedef std::vector StringList; - -ClientImpl::ClientImpl(const std::string& serviceUrl, const ClientConfiguration& clientConfiguration, - bool poolConnections) - : mutex_(), - state_(Open), - serviceNameResolver_(serviceUrl), - clientConfiguration_(ClientConfiguration(clientConfiguration).setUseTls(serviceNameResolver_.useTls())), - memoryLimitController_(clientConfiguration.getMemoryLimit()), - ioExecutorProvider_(std::make_shared(clientConfiguration_.getIOThreads())), - listenerExecutorProvider_( - std::make_shared(clientConfiguration_.getMessageListenerThreads())), - partitionListenerExecutorProvider_( - std::make_shared(clientConfiguration_.getMessageListenerThreads())), - pool_(clientConfiguration_, ioExecutorProvider_, clientConfiguration_.getAuthPtr(), poolConnections), - producerIdGenerator_(0), - consumerIdGenerator_(0), - requestIdGenerator_(0), - closingError(ResultOk) { - std::unique_ptr loggerFactory = clientConfiguration_.impl_->takeLogger(); - if (!loggerFactory) { -#ifdef USE_LOG4CXX - if (!clientConfiguration_.getLogConfFilePath().empty()) { - // A log4cxx log file was passed through deprecated parameter. Use that to configure Log4CXX - loggerFactory = Log4CxxLoggerFactory::create(clientConfiguration_.getLogConfFilePath()); - } else { - // Use default simple console logger - loggerFactory.reset(new ConsoleLoggerFactory); - } -#else - // Use default simple console logger - loggerFactory.reset(new ConsoleLoggerFactory); -#endif - } - LogUtils::setLoggerFactory(std::move(loggerFactory)); - - LookupServicePtr underlyingLookupServicePtr; - if (serviceNameResolver_.useHttp()) { - LOG_DEBUG("Using HTTP Lookup"); - underlyingLookupServicePtr = std::make_shared( - std::ref(serviceNameResolver_), std::cref(clientConfiguration_), - std::cref(clientConfiguration_.getAuthPtr())); - } else { - LOG_DEBUG("Using Binary Lookup"); - underlyingLookupServicePtr = - std::make_shared(std::ref(serviceNameResolver_), std::ref(pool_), - std::cref(clientConfiguration_.getListenerName())); - } - - lookupServicePtr_ = RetryableLookupService::create( - underlyingLookupServicePtr, clientConfiguration_.getOperationTimeoutSeconds(), ioExecutorProvider_); -} - -ClientImpl::~ClientImpl() { shutdown(); } - -const ClientConfiguration& ClientImpl::conf() const { return clientConfiguration_; } - -MemoryLimitController& ClientImpl::getMemoryLimitController() { return memoryLimitController_; } - -ExecutorServiceProviderPtr ClientImpl::getIOExecutorProvider() { return ioExecutorProvider_; } - -ExecutorServiceProviderPtr ClientImpl::getListenerExecutorProvider() { return listenerExecutorProvider_; } - -ExecutorServiceProviderPtr ClientImpl::getPartitionListenerExecutorProvider() { - return partitionListenerExecutorProvider_; -} - -LookupServicePtr ClientImpl::getLookup() { return lookupServicePtr_; } - -void ClientImpl::createProducerAsync(const std::string& topic, ProducerConfiguration conf, - CreateProducerCallback callback) { - if (conf.isChunkingEnabled() && conf.getBatchingEnabled()) { - throw std::invalid_argument("Batching and chunking of messages can't be enabled together"); - } - TopicNamePtr topicName; - { - Lock lock(mutex_); - if (state_ != Open) { - lock.unlock(); - callback(ResultAlreadyClosed, Producer()); - return; - } else if (!(topicName = TopicName::get(topic))) { - lock.unlock(); - callback(ResultInvalidTopicName, Producer()); - return; - } - } - lookupServicePtr_->getPartitionMetadataAsync(topicName).addListener( - std::bind(&ClientImpl::handleCreateProducer, shared_from_this(), std::placeholders::_1, - std::placeholders::_2, topicName, conf, callback)); -} - -void ClientImpl::handleCreateProducer(const Result result, const LookupDataResultPtr partitionMetadata, - TopicNamePtr topicName, ProducerConfiguration conf, - CreateProducerCallback callback) { - if (!result) { - ProducerImplBasePtr producer; - if (partitionMetadata->getPartitions() > 0) { - producer = std::make_shared(shared_from_this(), topicName, - partitionMetadata->getPartitions(), conf); - } else { - producer = std::make_shared(shared_from_this(), *topicName, conf); - } - producer->getProducerCreatedFuture().addListener( - std::bind(&ClientImpl::handleProducerCreated, shared_from_this(), std::placeholders::_1, - std::placeholders::_2, callback, producer)); - producer->start(); - } else { - LOG_ERROR("Error Checking/Getting Partition Metadata while creating producer on " - << topicName->toString() << " -- " << result); - callback(result, Producer()); - } -} - -void ClientImpl::handleProducerCreated(Result result, ProducerImplBaseWeakPtr producerBaseWeakPtr, - CreateProducerCallback callback, ProducerImplBasePtr producer) { - if (result == ResultOk) { - Lock lock(mutex_); - producers_.push_back(producer); - lock.unlock(); - callback(result, Producer(producer)); - } else { - callback(result, {}); - } -} - -void ClientImpl::createReaderAsync(const std::string& topic, const MessageId& startMessageId, - const ReaderConfiguration& conf, ReaderCallback callback) { - TopicNamePtr topicName; - { - Lock lock(mutex_); - if (state_ != Open) { - lock.unlock(); - callback(ResultAlreadyClosed, Reader()); - return; - } else if (!(topicName = TopicName::get(topic))) { - lock.unlock(); - callback(ResultInvalidTopicName, Reader()); - return; - } - } - - MessageId msgId(startMessageId); - lookupServicePtr_->getPartitionMetadataAsync(topicName).addListener( - std::bind(&ClientImpl::handleReaderMetadataLookup, shared_from_this(), std::placeholders::_1, - std::placeholders::_2, topicName, msgId, conf, callback)); -} - -void ClientImpl::handleReaderMetadataLookup(const Result result, const LookupDataResultPtr partitionMetadata, - TopicNamePtr topicName, MessageId startMessageId, - ReaderConfiguration conf, ReaderCallback callback) { - if (result != ResultOk) { - LOG_ERROR("Error Checking/Getting Partition Metadata while creating readeron " - << topicName->toString() << " -- " << result); - callback(result, Reader()); - return; - } - - if (partitionMetadata->getPartitions() > 0) { - LOG_ERROR("Topic reader cannot be created on a partitioned topic: " << topicName->toString()); - callback(ResultOperationNotSupported, Reader()); - return; - } - - ReaderImplPtr reader = std::make_shared(shared_from_this(), topicName->toString(), conf, - getListenerExecutorProvider()->get(), callback); - ConsumerImplBasePtr consumer = reader->getConsumer().lock(); - auto self = shared_from_this(); - reader->start(startMessageId, [this, self](const ConsumerImplBaseWeakPtr& weakConsumerPtr) { - Lock lock(mutex_); - consumers_.push_back(weakConsumerPtr); - lock.unlock(); - }); -} - -void ClientImpl::subscribeWithRegexAsync(const std::string& regexPattern, const std::string& subscriptionName, - const ConsumerConfiguration& conf, SubscribeCallback callback) { - TopicNamePtr topicNamePtr = TopicName::get(regexPattern); - - Lock lock(mutex_); - if (state_ != Open) { - lock.unlock(); - callback(ResultAlreadyClosed, Consumer()); - return; - } else { - lock.unlock(); - if (!topicNamePtr) { - LOG_ERROR("Topic pattern not valid: " << regexPattern); - callback(ResultInvalidTopicName, Consumer()); - return; - } - } - - NamespaceNamePtr nsName = topicNamePtr->getNamespaceName(); - - lookupServicePtr_->getTopicsOfNamespaceAsync(nsName).addListener( - std::bind(&ClientImpl::createPatternMultiTopicsConsumer, shared_from_this(), std::placeholders::_1, - std::placeholders::_2, regexPattern, subscriptionName, conf, callback)); -} - -void ClientImpl::createPatternMultiTopicsConsumer(const Result result, const NamespaceTopicsPtr topics, - const std::string& regexPattern, - const std::string& subscriptionName, - const ConsumerConfiguration& conf, - SubscribeCallback callback) { - if (result == ResultOk) { - ConsumerImplBasePtr consumer; - - PULSAR_REGEX_NAMESPACE::regex pattern(regexPattern); - - NamespaceTopicsPtr matchTopics = - PatternMultiTopicsConsumerImpl::topicsPatternFilter(*topics, pattern); - - consumer = std::make_shared( - shared_from_this(), regexPattern, *matchTopics, subscriptionName, conf, lookupServicePtr_); - - consumer->getConsumerCreatedFuture().addListener( - std::bind(&ClientImpl::handleConsumerCreated, shared_from_this(), std::placeholders::_1, - std::placeholders::_2, callback, consumer)); - consumer->start(); - } else { - LOG_ERROR("Error Getting topicsOfNameSpace while createPatternMultiTopicsConsumer: " << result); - callback(result, Consumer()); - } -} - -void ClientImpl::subscribeAsync(const std::vector& topics, const std::string& subscriptionName, - const ConsumerConfiguration& conf, SubscribeCallback callback) { - TopicNamePtr topicNamePtr; - - Lock lock(mutex_); - if (state_ != Open) { - lock.unlock(); - callback(ResultAlreadyClosed, Consumer()); - return; - } else { - if (!topics.empty() && !(topicNamePtr = MultiTopicsConsumerImpl::topicNamesValid(topics))) { - lock.unlock(); - callback(ResultInvalidTopicName, Consumer()); - return; - } - } - lock.unlock(); - - if (topicNamePtr) { - std::string randomName = generateRandomName(); - std::stringstream consumerTopicNameStream; - consumerTopicNameStream << topicNamePtr->toString() << "-TopicsConsumerFakeName-" << randomName; - topicNamePtr = TopicName::get(consumerTopicNameStream.str()); - } - - ConsumerImplBasePtr consumer = std::make_shared( - shared_from_this(), topics, subscriptionName, topicNamePtr, conf, lookupServicePtr_); - - consumer->getConsumerCreatedFuture().addListener(std::bind(&ClientImpl::handleConsumerCreated, - shared_from_this(), std::placeholders::_1, - std::placeholders::_2, callback, consumer)); - consumer->start(); -} - -void ClientImpl::subscribeAsync(const std::string& topic, const std::string& subscriptionName, - const ConsumerConfiguration& conf, SubscribeCallback callback) { - TopicNamePtr topicName; - { - Lock lock(mutex_); - if (state_ != Open) { - lock.unlock(); - callback(ResultAlreadyClosed, Consumer()); - return; - } else if (!(topicName = TopicName::get(topic))) { - lock.unlock(); - callback(ResultInvalidTopicName, Consumer()); - return; - } else if (conf.isReadCompacted() && (topicName->getDomain().compare("persistent") != 0 || - (conf.getConsumerType() != ConsumerExclusive && - conf.getConsumerType() != ConsumerFailover))) { - lock.unlock(); - callback(ResultInvalidConfiguration, Consumer()); - return; - } - } - - lookupServicePtr_->getPartitionMetadataAsync(topicName).addListener( - std::bind(&ClientImpl::handleSubscribe, shared_from_this(), std::placeholders::_1, - std::placeholders::_2, topicName, subscriptionName, conf, callback)); -} - -void ClientImpl::handleSubscribe(const Result result, const LookupDataResultPtr partitionMetadata, - TopicNamePtr topicName, const std::string& subscriptionName, - ConsumerConfiguration conf, SubscribeCallback callback) { - if (result == ResultOk) { - // generate random name if not supplied by the customer. - if (conf.getConsumerName().empty()) { - conf.setConsumerName(generateRandomName()); - } - ConsumerImplBasePtr consumer; - if (partitionMetadata->getPartitions() > 0) { - if (conf.getReceiverQueueSize() == 0) { - LOG_ERROR("Can't use partitioned topic if the queue size is 0."); - callback(ResultInvalidConfiguration, Consumer()); - return; - } - consumer = std::make_shared(shared_from_this(), topicName, - partitionMetadata->getPartitions(), - subscriptionName, conf, lookupServicePtr_); - } else { - auto consumerImpl = std::make_shared( - shared_from_this(), topicName->toString(), subscriptionName, conf, topicName->isPersistent()); - consumerImpl->setPartitionIndex(topicName->getPartitionIndex()); - consumer = consumerImpl; - } - consumer->getConsumerCreatedFuture().addListener( - std::bind(&ClientImpl::handleConsumerCreated, shared_from_this(), std::placeholders::_1, - std::placeholders::_2, callback, consumer)); - consumer->start(); - } else { - LOG_ERROR("Error Checking/Getting Partition Metadata while Subscribing on " << topicName->toString() - << " -- " << result); - callback(result, Consumer()); - } -} - -void ClientImpl::handleConsumerCreated(Result result, ConsumerImplBaseWeakPtr consumerImplBaseWeakPtr, - SubscribeCallback callback, ConsumerImplBasePtr consumer) { - if (result == ResultOk) { - Lock lock(mutex_); - consumers_.push_back(consumer); - lock.unlock(); - callback(result, Consumer(consumer)); - } else { - callback(result, {}); - } -} - -Future ClientImpl::getConnection(const std::string& topic) { - Promise promise; - - const auto topicNamePtr = TopicName::get(topic); - if (!topicNamePtr) { - LOG_ERROR("Unable to parse topic - " << topic); - promise.setFailed(ResultInvalidTopicName); - return promise.getFuture(); - } - - auto self = shared_from_this(); - lookupServicePtr_->getBroker(*topicNamePtr) - .addListener([this, self, promise](Result result, const LookupService::LookupResult& data) { - if (result != ResultOk) { - promise.setFailed(result); - return; - } - pool_.getConnectionAsync(data.logicalAddress, data.physicalAddress) - .addListener([promise](Result result, const ClientConnectionWeakPtr& weakCnx) { - if (result == ResultOk) { - promise.setValue(weakCnx); - } else { - promise.setFailed(result); - } - }); - }); - - return promise.getFuture(); -} - -void ClientImpl::handleGetPartitions(const Result result, const LookupDataResultPtr partitionMetadata, - TopicNamePtr topicName, GetPartitionsCallback callback) { - if (result != ResultOk) { - LOG_ERROR("Error getting topic partitions metadata: " << result); - callback(result, StringList()); - return; - } - - StringList partitions; - - if (partitionMetadata->getPartitions() > 0) { - for (unsigned int i = 0; i < partitionMetadata->getPartitions(); i++) { - partitions.push_back(topicName->getTopicPartitionName(i)); - } - } else { - partitions.push_back(topicName->toString()); - } - - callback(ResultOk, partitions); -} - -void ClientImpl::getPartitionsForTopicAsync(const std::string& topic, GetPartitionsCallback callback) { - TopicNamePtr topicName; - { - Lock lock(mutex_); - if (state_ != Open) { - lock.unlock(); - callback(ResultAlreadyClosed, StringList()); - return; - } else if (!(topicName = TopicName::get(topic))) { - lock.unlock(); - callback(ResultInvalidTopicName, StringList()); - return; - } - } - lookupServicePtr_->getPartitionMetadataAsync(topicName).addListener( - std::bind(&ClientImpl::handleGetPartitions, shared_from_this(), std::placeholders::_1, - std::placeholders::_2, topicName, callback)); -} - -void ClientImpl::closeAsync(CloseCallback callback) { - Lock lock(mutex_); - ProducersList producers(producers_); - ConsumersList consumers(consumers_); - - if (state_ != Open && callback) { - lock.unlock(); - callback(ResultAlreadyClosed); - return; - } - // Set the state to Closing so that no producers could get added - state_ = Closing; - lock.unlock(); - - memoryLimitController_.close(); - - SharedInt numberOfOpenHandlers = std::make_shared(producers.size() + consumers.size()); - LOG_INFO("Closing Pulsar client with " << producers.size() << " producers and " << consumers.size() - << " consumers"); - - for (ProducersList::iterator it = producers.begin(); it != producers.end(); ++it) { - ProducerImplBasePtr producer = it->lock(); - if (producer && !producer->isClosed()) { - producer->closeAsync(std::bind(&ClientImpl::handleClose, shared_from_this(), - std::placeholders::_1, numberOfOpenHandlers, callback)); - } else { - // Since the connection is already closed - (*numberOfOpenHandlers)--; - } - } - - for (ConsumersList::iterator it = consumers.begin(); it != consumers.end(); ++it) { - ConsumerImplBasePtr consumer = it->lock(); - if (consumer && !consumer->isClosed()) { - consumer->closeAsync(std::bind(&ClientImpl::handleClose, shared_from_this(), - std::placeholders::_1, numberOfOpenHandlers, callback)); - } else { - // Since the connection is already closed - (*numberOfOpenHandlers)--; - } - } - - if (*numberOfOpenHandlers == 0 && callback) { - handleClose(ResultOk, numberOfOpenHandlers, callback); - } -} - -void ClientImpl::handleClose(Result result, SharedInt numberOfOpenHandlers, ResultCallback callback) { - Result expected = ResultOk; - if (!closingError.compare_exchange_strong(expected, result)) { - LOG_DEBUG("Tried to updated closingError, but already set to " - << expected << ". This means multiple errors have occurred while closing the client"); - } - - if (*numberOfOpenHandlers > 0) { - --(*numberOfOpenHandlers); - } - if (*numberOfOpenHandlers == 0) { - Lock lock(mutex_); - if (state_ == Closed) { - LOG_DEBUG("Client is already shutting down, possible race condition in handleClose"); - return; - } else { - state_ = Closed; - lock.unlock(); - } - - LOG_DEBUG("Shutting down producers and consumers for client"); - // handleClose() is called in ExecutorService's event loop, while shutdown() tried to wait the event - // loop exits. So here we use another thread to call shutdown(). - auto self = shared_from_this(); - std::thread shutdownTask{[this, self, callback] { - shutdown(); - if (callback) { - if (closingError != ResultOk) { - LOG_DEBUG( - "Problem in closing client, could not close one or more consumers or producers"); - } - callback(closingError); - } - }}; - shutdownTask.detach(); - } -} - -void ClientImpl::shutdown() { - Lock lock(mutex_); - ProducersList producers; - ConsumersList consumers; - - producers.swap(producers_); - consumers.swap(consumers_); - lock.unlock(); - - for (ProducersList::iterator it = producers.begin(); it != producers.end(); ++it) { - ProducerImplBasePtr producer = it->lock(); - if (producer) { - producer->shutdown(); - } - } - - for (ConsumersList::iterator it = consumers.begin(); it != consumers.end(); ++it) { - ConsumerImplBasePtr consumer = it->lock(); - if (consumer) { - consumer->shutdown(); - } - } - - if (producers.size() + consumers.size() > 0) { - LOG_DEBUG(producers.size() << " producers and " << consumers.size() - << " consumers have been shutdown."); - } - if (!pool_.close()) { - // pool_ has already been closed. It means shutdown() has been called before. - return; - } - LOG_DEBUG("ConnectionPool is closed"); - - // 500ms as the timeout is long enough because ExecutorService::close calls io_service::stop() internally - // and waits until io_service::run() in another thread returns, which should be as soon as possible after - // stop() is called. - TimeoutProcessor timeoutProcessor{500}; - - timeoutProcessor.tik(); - ioExecutorProvider_->close(timeoutProcessor.getLeftTimeout()); - timeoutProcessor.tok(); - LOG_DEBUG("ioExecutorProvider_ is closed"); - - timeoutProcessor.tik(); - listenerExecutorProvider_->close(timeoutProcessor.getLeftTimeout()); - timeoutProcessor.tok(); - LOG_DEBUG("listenerExecutorProvider_ is closed"); - - timeoutProcessor.tik(); - partitionListenerExecutorProvider_->close(timeoutProcessor.getLeftTimeout()); - timeoutProcessor.tok(); - LOG_DEBUG("partitionListenerExecutorProvider_ is closed"); -} - -uint64_t ClientImpl::newProducerId() { - Lock lock(mutex_); - return producerIdGenerator_++; -} - -uint64_t ClientImpl::newConsumerId() { - Lock lock(mutex_); - return consumerIdGenerator_++; -} - -uint64_t ClientImpl::newRequestId() { - Lock lock(mutex_); - return requestIdGenerator_++; -} - -uint64_t ClientImpl::getNumberOfProducers() { - Lock lock(mutex_); - uint64_t numberOfAliveProducers = 0; - for (const auto& producer : producers_) { - const auto& producerImpl = producer.lock(); - if (producerImpl) { - numberOfAliveProducers += producerImpl->getNumberOfConnectedProducer(); - } - } - return numberOfAliveProducers; -} - -uint64_t ClientImpl::getNumberOfConsumers() { - Lock lock(mutex_); - uint64_t numberOfAliveConsumers = 0; - for (const auto& consumer : consumers_) { - const auto consumerImpl = consumer.lock(); - if (consumerImpl) { - numberOfAliveConsumers += consumerImpl->getNumberOfConnectedConsumer(); - } - } - return numberOfAliveConsumers; -} - -const ClientConfiguration& ClientImpl::getClientConfig() const { return clientConfiguration_; } - -} /* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/ClientImpl.h b/pulsar-client-cpp/lib/ClientImpl.h deleted file mode 100644 index 466461ae71ea0..0000000000000 --- a/pulsar-client-cpp/lib/ClientImpl.h +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_CLIENTIMPL_H_ -#define LIB_CLIENTIMPL_H_ - -#include -#include "ExecutorService.h" -#include "LookupService.h" -#include "MemoryLimitController.h" -#include "ConnectionPool.h" -#include "LookupDataResult.h" -#include -#include -#include "ProducerImplBase.h" -#include "ConsumerImplBase.h" -#include -#include -#include "ServiceNameResolver.h" - -namespace pulsar { - -class ClientImpl; -class PulsarFriend; -typedef std::shared_ptr ClientImplPtr; -typedef std::weak_ptr ClientImplWeakPtr; - -class ReaderImpl; -typedef std::shared_ptr ReaderImplPtr; -typedef std::weak_ptr ReaderImplWeakPtr; - -std::string generateRandomName(); - -class ClientImpl : public std::enable_shared_from_this { - public: - ClientImpl(const std::string& serviceUrl, const ClientConfiguration& clientConfiguration, - bool poolConnections); - ~ClientImpl(); - - void createProducerAsync(const std::string& topic, ProducerConfiguration conf, - CreateProducerCallback callback); - - void subscribeAsync(const std::string& topic, const std::string& subscriptionName, - const ConsumerConfiguration& conf, SubscribeCallback callback); - - void subscribeAsync(const std::vector& topics, const std::string& subscriptionName, - const ConsumerConfiguration& conf, SubscribeCallback callback); - - void subscribeWithRegexAsync(const std::string& regexPattern, const std::string& subscriptionName, - const ConsumerConfiguration& conf, SubscribeCallback callback); - - void createReaderAsync(const std::string& topic, const MessageId& startMessageId, - const ReaderConfiguration& conf, ReaderCallback callback); - - void getPartitionsForTopicAsync(const std::string& topic, GetPartitionsCallback callback); - - Future getConnection(const std::string& topic); - - void closeAsync(CloseCallback callback); - void shutdown(); - - MemoryLimitController& getMemoryLimitController(); - - uint64_t newProducerId(); - uint64_t newConsumerId(); - uint64_t newRequestId(); - - uint64_t getNumberOfProducers(); - uint64_t getNumberOfConsumers(); - - const ClientConfiguration& getClientConfig() const; - - const ClientConfiguration& conf() const; - ExecutorServiceProviderPtr getIOExecutorProvider(); - ExecutorServiceProviderPtr getListenerExecutorProvider(); - ExecutorServiceProviderPtr getPartitionListenerExecutorProvider(); - LookupServicePtr getLookup(); - friend class PulsarFriend; - - private: - void handleCreateProducer(const Result result, const LookupDataResultPtr partitionMetadata, - TopicNamePtr topicName, ProducerConfiguration conf, - CreateProducerCallback callback); - - void handleSubscribe(const Result result, const LookupDataResultPtr partitionMetadata, - TopicNamePtr topicName, const std::string& consumerName, ConsumerConfiguration conf, - SubscribeCallback callback); - - void handleReaderMetadataLookup(const Result result, const LookupDataResultPtr partitionMetadata, - TopicNamePtr topicName, MessageId startMessageId, - ReaderConfiguration conf, ReaderCallback callback); - - void handleGetPartitions(const Result result, const LookupDataResultPtr partitionMetadata, - TopicNamePtr topicName, GetPartitionsCallback callback); - - void handleProducerCreated(Result result, ProducerImplBaseWeakPtr producerWeakPtr, - CreateProducerCallback callback, ProducerImplBasePtr producer); - void handleConsumerCreated(Result result, ConsumerImplBaseWeakPtr consumerWeakPtr, - SubscribeCallback callback, ConsumerImplBasePtr consumer); - - typedef std::shared_ptr SharedInt; - - void handleClose(Result result, SharedInt remaining, ResultCallback callback); - - void createPatternMultiTopicsConsumer(const Result result, const NamespaceTopicsPtr topics, - const std::string& regexPattern, const std::string& consumerName, - const ConsumerConfiguration& conf, SubscribeCallback callback); - - enum State - { - Open, - Closing, - Closed - }; - - std::mutex mutex_; - - State state_; - ServiceNameResolver serviceNameResolver_; - ClientConfiguration clientConfiguration_; - MemoryLimitController memoryLimitController_; - - ExecutorServiceProviderPtr ioExecutorProvider_; - ExecutorServiceProviderPtr listenerExecutorProvider_; - ExecutorServiceProviderPtr partitionListenerExecutorProvider_; - - LookupServicePtr lookupServicePtr_; - ConnectionPool pool_; - - uint64_t producerIdGenerator_; - uint64_t consumerIdGenerator_; - uint64_t requestIdGenerator_; - - typedef std::vector ProducersList; - ProducersList producers_; - - typedef std::vector ConsumersList; - ConsumersList consumers_; - - std::atomic closingError; - - friend class Client; -}; -} /* namespace pulsar */ - -#endif /* LIB_CLIENTIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/Commands.cc b/pulsar-client-cpp/lib/Commands.cc deleted file mode 100644 index 417e6e31a17c4..0000000000000 --- a/pulsar-client-cpp/lib/Commands.cc +++ /dev/null @@ -1,816 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "Commands.h" -#include "MessageImpl.h" -#include "VersionInternal.h" -#include "pulsar/MessageBuilder.h" -#include "LogUtils.h" -#include "PulsarApi.pb.h" -#include "Utils.h" -#include "Url.h" -#include -#include "checksum/ChecksumProvider.h" -#include -#include - -using namespace pulsar; -namespace pulsar { - -using namespace pulsar::proto; - -DECLARE_LOG_OBJECT(); - -static inline bool isBuiltInSchema(SchemaType schemaType) { - switch (schemaType) { - case STRING: - case JSON: - case AVRO: - case PROTOBUF: - case PROTOBUF_NATIVE: - return true; - - default: - return false; - } -} - -static inline proto::Schema_Type getSchemaType(SchemaType type) { - switch (type) { - case SchemaType::NONE: - return Schema_Type_None; - case STRING: - return Schema_Type_String; - case JSON: - return Schema_Type_Json; - case PROTOBUF: - return Schema_Type_Protobuf; - case AVRO: - return Schema_Type_Avro; - case PROTOBUF_NATIVE: - return Schema_Type_ProtobufNative; - default: - return Schema_Type_None; - } -} - -static proto::Schema* getSchema(const SchemaInfo& schemaInfo) { - proto::Schema* schema = proto::Schema().New(); - schema->set_name(schemaInfo.getName()); - schema->set_schema_data(schemaInfo.getSchema()); - schema->set_type(getSchemaType(schemaInfo.getSchemaType())); - for (const auto& kv : schemaInfo.getProperties()) { - proto::KeyValue* keyValue = proto::KeyValue().New(); - keyValue->set_key(kv.first); - keyValue->set_value(kv.second); - schema->mutable_properties()->AddAllocated(keyValue); - } - - return schema; -} - -SharedBuffer Commands::writeMessageWithSize(const BaseCommand& cmd) { - size_t cmdSize = cmd.ByteSize(); - size_t frameSize = 4 + cmdSize; - size_t bufferSize = 4 + frameSize; - - SharedBuffer buffer = SharedBuffer::allocate(bufferSize); - - buffer.writeUnsignedInt(frameSize); - buffer.writeUnsignedInt(cmdSize); - cmd.SerializeToArray(buffer.mutableData(), cmdSize); - buffer.bytesWritten(cmdSize); - return buffer; -} - -SharedBuffer Commands::newPartitionMetadataRequest(const std::string& topic, uint64_t requestId) { - static BaseCommand cmd; - static std::mutex mutex; - std::lock_guard lock(mutex); - cmd.set_type(BaseCommand::PARTITIONED_METADATA); - CommandPartitionedTopicMetadata* partitionMetadata = cmd.mutable_partitionmetadata(); - partitionMetadata->set_topic(topic); - partitionMetadata->set_request_id(requestId); - const SharedBuffer buffer = writeMessageWithSize(cmd); - cmd.clear_partitionmetadata(); - return buffer; -} - -SharedBuffer Commands::newLookup(const std::string& topic, const bool authoritative, uint64_t requestId, - const std::string& listenerName) { - static BaseCommand cmd; - static std::mutex mutex; - std::lock_guard lock(mutex); - cmd.set_type(BaseCommand::LOOKUP); - CommandLookupTopic* lookup = cmd.mutable_lookuptopic(); - lookup->set_topic(topic); - lookup->set_authoritative(authoritative); - lookup->set_request_id(requestId); - lookup->set_advertised_listener_name(listenerName); - const SharedBuffer buffer = writeMessageWithSize(cmd); - cmd.clear_lookuptopic(); - return buffer; -} - -SharedBuffer Commands::newConsumerStats(uint64_t consumerId, uint64_t requestId) { - static BaseCommand cmd; - static std::mutex mutex; - std::lock_guard lock(mutex); - cmd.set_type(BaseCommand::CONSUMER_STATS); - CommandConsumerStats* consumerStats = cmd.mutable_consumerstats(); - consumerStats->set_consumer_id(consumerId); - consumerStats->set_request_id(requestId); - const SharedBuffer buffer = writeMessageWithSize(cmd); - cmd.clear_consumerstats(); - return buffer; -} - -PairSharedBuffer Commands::newSend(SharedBuffer& headers, BaseCommand& cmd, uint64_t producerId, - uint64_t sequenceId, ChecksumType checksumType, - const proto::MessageMetadata& metadata, const SharedBuffer& payload) { - cmd.set_type(BaseCommand::SEND); - CommandSend* send = cmd.mutable_send(); - send->set_producer_id(producerId); - send->set_sequence_id(sequenceId); - if (metadata.has_num_messages_in_batch()) { - send->set_num_messages(metadata.num_messages_in_batch()); - } - if (metadata.has_chunk_id()) { - send->set_is_chunk(true); - } - - // / Wire format - // [TOTAL_SIZE] [CMD_SIZE][CMD] [MAGIC_NUMBER][CHECKSUM] [METADATA_SIZE][METADATA] [PAYLOAD] - - int cmdSize = cmd.ByteSize(); - int msgMetadataSize = metadata.ByteSize(); - int payloadSize = payload.readableBytes(); - - int magicAndChecksumLength = (Crc32c == (checksumType)) ? (2 + 4 /* magic + checksumLength*/) : 0; - bool includeChecksum = magicAndChecksumLength > 0; - int headerContentSize = - 4 + cmdSize + magicAndChecksumLength + 4 + msgMetadataSize; // cmdLength + cmdSize + magicLength + - // checksumSize + msgMetadataLength + msgMetadataSize - int totalSize = headerContentSize + payloadSize; - int checksumReaderIndex = -1; - - headers.reset(); - assert(headers.writableBytes() >= (4 + headerContentSize)); // totalSize + headerLength - headers.writeUnsignedInt(totalSize); // External frame - - // Write cmd - headers.writeUnsignedInt(cmdSize); - cmd.SerializeToArray(headers.mutableData(), cmdSize); - headers.bytesWritten(cmdSize); - - // Create checksum placeholder - if (includeChecksum) { - headers.writeUnsignedShort(magicCrc32c); - checksumReaderIndex = headers.writerIndex(); - headers.skipBytes(checksumSize); // skip 4 bytes of checksum - } - - // Write metadata - headers.writeUnsignedInt(msgMetadataSize); - metadata.SerializeToArray(headers.mutableData(), msgMetadataSize); - headers.bytesWritten(msgMetadataSize); - - PairSharedBuffer composite; - composite.set(0, headers); - composite.set(1, payload); - - // Write checksum at created checksum-placeholder - if (includeChecksum) { - int writeIndex = headers.writerIndex(); - int metadataStartIndex = checksumReaderIndex + checksumSize; - uint32_t metadataChecksum = - computeChecksum(0, headers.data() + metadataStartIndex, (writeIndex - metadataStartIndex)); - uint32_t computedChecksum = - computeChecksum(metadataChecksum, payload.data(), payload.readableBytes()); - // set computed checksum - headers.setWriterIndex(checksumReaderIndex); - headers.writeUnsignedInt(computedChecksum); - headers.setWriterIndex(writeIndex); - } - - cmd.clear_send(); - return composite; -} - -SharedBuffer Commands::newConnect(const AuthenticationPtr& authentication, const std::string& logicalAddress, - bool connectingThroughProxy, Result& result) { - BaseCommand cmd; - cmd.set_type(BaseCommand::CONNECT); - CommandConnect* connect = cmd.mutable_connect(); - connect->set_client_version(_PULSAR_VERSION_INTERNAL_); - connect->set_auth_method_name(authentication->getAuthMethodName()); - connect->set_protocol_version(ProtocolVersion_MAX); - - FeatureFlags* flags = connect->mutable_feature_flags(); - flags->set_supports_auth_refresh(true); - if (connectingThroughProxy) { - Url logicalAddressUrl; - Url::parse(logicalAddress, logicalAddressUrl); - connect->set_proxy_to_broker_url(logicalAddressUrl.hostPort()); - } - - AuthenticationDataPtr authDataContent; - result = authentication->getAuthData(authDataContent); - if (result != ResultOk) { - return SharedBuffer{}; - } - - if (authDataContent->hasDataFromCommand()) { - connect->set_auth_data(authDataContent->getCommandData()); - } - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newAuthResponse(const AuthenticationPtr& authentication, Result& result) { - BaseCommand cmd; - cmd.set_type(BaseCommand::AUTH_RESPONSE); - CommandAuthResponse* authResponse = cmd.mutable_authresponse(); - authResponse->set_client_version(_PULSAR_VERSION_INTERNAL_); - - AuthData* authData = authResponse->mutable_response(); - authData->set_auth_method_name(authentication->getAuthMethodName()); - - AuthenticationDataPtr authDataContent; - result = authentication->getAuthData(authDataContent); - if (result != ResultOk) { - return SharedBuffer{}; - } - - if (authDataContent->hasDataFromCommand()) { - authData->set_auth_data(authDataContent->getCommandData()); - } - - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newSubscribe(const std::string& topic, const std::string& subscription, - uint64_t consumerId, uint64_t requestId, CommandSubscribe_SubType subType, - const std::string& consumerName, SubscriptionMode subscriptionMode, - Optional startMessageId, bool readCompacted, - const std::map& metadata, - const std::map& subscriptionProperties, - const SchemaInfo& schemaInfo, - CommandSubscribe_InitialPosition subscriptionInitialPosition, - bool replicateSubscriptionState, KeySharedPolicy keySharedPolicy, - int priorityLevel) { - BaseCommand cmd; - cmd.set_type(BaseCommand::SUBSCRIBE); - CommandSubscribe* subscribe = cmd.mutable_subscribe(); - subscribe->set_topic(topic); - subscribe->set_subscription(subscription); - subscribe->set_subtype(subType); - subscribe->set_consumer_id(consumerId); - subscribe->set_request_id(requestId); - subscribe->set_consumer_name(consumerName); - subscribe->set_durable(subscriptionMode == SubscriptionModeDurable); - subscribe->set_read_compacted(readCompacted); - subscribe->set_initialposition(subscriptionInitialPosition); - subscribe->set_replicate_subscription_state(replicateSubscriptionState); - subscribe->set_priority_level(priorityLevel); - - if (isBuiltInSchema(schemaInfo.getSchemaType())) { - subscribe->set_allocated_schema(getSchema(schemaInfo)); - } - - if (startMessageId.is_present()) { - MessageIdData& messageIdData = *subscribe->mutable_start_message_id(); - messageIdData.set_ledgerid(startMessageId.value().ledgerId()); - messageIdData.set_entryid(startMessageId.value().entryId()); - - if (startMessageId.value().batchIndex() != -1) { - messageIdData.set_batch_index(startMessageId.value().batchIndex()); - } - } - for (std::map::const_iterator it = metadata.begin(); it != metadata.end(); - it++) { - proto::KeyValue* keyValue = proto::KeyValue().New(); - keyValue->set_key(it->first); - keyValue->set_value(it->second); - subscribe->mutable_metadata()->AddAllocated(keyValue); - } - - for (const auto& subscriptionProperty : subscriptionProperties) { - proto::KeyValue* keyValue = proto::KeyValue().New(); - keyValue->set_key(subscriptionProperty.first); - keyValue->set_value(subscriptionProperty.second); - subscribe->mutable_subscription_properties()->AddAllocated(keyValue); - } - - if (subType == CommandSubscribe_SubType_Key_Shared) { - KeySharedMeta& ksm = *subscribe->mutable_keysharedmeta(); - switch (keySharedPolicy.getKeySharedMode()) { - case pulsar::AUTO_SPLIT: - ksm.set_keysharedmode(proto::KeySharedMode::AUTO_SPLIT); - break; - case pulsar::STICKY: - ksm.set_keysharedmode(proto::KeySharedMode::STICKY); - for (StickyRange range : keySharedPolicy.getStickyRanges()) { - IntRange* intRange = IntRange().New(); - intRange->set_start(range.first); - intRange->set_end(range.second); - ksm.mutable_hashranges()->AddAllocated(intRange); - } - } - ksm.set_allowoutoforderdelivery(keySharedPolicy.isAllowOutOfOrderDelivery()); - } - - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newUnsubscribe(uint64_t consumerId, uint64_t requestId) { - BaseCommand cmd; - cmd.set_type(BaseCommand::UNSUBSCRIBE); - CommandUnsubscribe* unsubscribe = cmd.mutable_unsubscribe(); - unsubscribe->set_consumer_id(consumerId); - unsubscribe->set_request_id(requestId); - - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newProducer(const std::string& topic, uint64_t producerId, - const std::string& producerName, uint64_t requestId, - const std::map& metadata, - const SchemaInfo& schemaInfo, uint64_t epoch, - bool userProvidedProducerName, bool encrypted, - ProducerAccessMode accessMode, Optional topicEpoch) { - BaseCommand cmd; - cmd.set_type(BaseCommand::PRODUCER); - CommandProducer* producer = cmd.mutable_producer(); - producer->set_topic(topic); - producer->set_producer_id(producerId); - producer->set_request_id(requestId); - producer->set_epoch(epoch); - producer->set_user_provided_producer_name(userProvidedProducerName); - producer->set_encrypted(encrypted); - producer->set_producer_access_mode(accessMode); - if (topicEpoch.is_present()) { - producer->set_topic_epoch(topicEpoch.value()); - } - - for (std::map::const_iterator it = metadata.begin(); it != metadata.end(); - it++) { - proto::KeyValue* keyValue = proto::KeyValue().New(); - keyValue->set_key(it->first); - keyValue->set_value(it->second); - producer->mutable_metadata()->AddAllocated(keyValue); - } - - if (isBuiltInSchema(schemaInfo.getSchemaType())) { - producer->set_allocated_schema(getSchema(schemaInfo)); - } - - if (!producerName.empty()) { - producer->set_producer_name(producerName); - } - - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newAck(uint64_t consumerId, const MessageIdData& messageId, CommandAck_AckType ackType, - int validationError) { - BaseCommand cmd; - cmd.set_type(BaseCommand::ACK); - CommandAck* ack = cmd.mutable_ack(); - ack->set_consumer_id(consumerId); - ack->set_ack_type(ackType); - if (CommandAck_AckType_IsValid(validationError)) { - ack->set_validation_error((CommandAck_ValidationError)validationError); - } - *(ack->add_message_id()) = messageId; - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newMultiMessageAck(uint64_t consumerId, const std::set& msgIds) { - BaseCommand cmd; - cmd.set_type(BaseCommand::ACK); - CommandAck* ack = cmd.mutable_ack(); - ack->set_consumer_id(consumerId); - ack->set_ack_type(CommandAck_AckType_Individual); - for (const auto& msgId : msgIds) { - auto newMsgId = ack->add_message_id(); - newMsgId->set_ledgerid(msgId.ledgerId()); - newMsgId->set_entryid(msgId.entryId()); - } - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newFlow(uint64_t consumerId, uint32_t messagePermits) { - BaseCommand cmd; - cmd.set_type(BaseCommand::FLOW); - CommandFlow* flow = cmd.mutable_flow(); - flow->set_consumer_id(consumerId); - flow->set_messagepermits(messagePermits); - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newCloseProducer(uint64_t producerId, uint64_t requestId) { - BaseCommand cmd; - cmd.set_type(BaseCommand::CLOSE_PRODUCER); - CommandCloseProducer* close = cmd.mutable_close_producer(); - close->set_producer_id(producerId); - close->set_request_id(requestId); - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newCloseConsumer(uint64_t consumerId, uint64_t requestId) { - BaseCommand cmd; - cmd.set_type(BaseCommand::CLOSE_CONSUMER); - CommandCloseConsumer* close = cmd.mutable_close_consumer(); - close->set_consumer_id(consumerId); - close->set_request_id(requestId); - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newPing() { - BaseCommand cmd; - cmd.set_type(BaseCommand::PING); - cmd.mutable_ping(); - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newPong() { - BaseCommand cmd; - cmd.set_type(BaseCommand::PONG); - cmd.mutable_pong(); - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newRedeliverUnacknowledgedMessages(uint64_t consumerId, - const std::set& messageIds) { - BaseCommand cmd; - cmd.set_type(BaseCommand::REDELIVER_UNACKNOWLEDGED_MESSAGES); - CommandRedeliverUnacknowledgedMessages* command = cmd.mutable_redeliverunacknowledgedmessages(); - command->set_consumer_id(consumerId); - for (const auto& msgId : messageIds) { - MessageIdData* msgIdData = command->add_message_ids(); - msgIdData->set_ledgerid(msgId.ledgerId()); - msgIdData->set_entryid(msgId.entryId()); - } - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newSeek(uint64_t consumerId, uint64_t requestId, const MessageId& messageId) { - BaseCommand cmd; - cmd.set_type(BaseCommand::SEEK); - CommandSeek* commandSeek = cmd.mutable_seek(); - commandSeek->set_consumer_id(consumerId); - commandSeek->set_request_id(requestId); - - MessageIdData& messageIdData = *commandSeek->mutable_message_id(); - messageIdData.set_ledgerid(messageId.ledgerId()); - messageIdData.set_entryid(messageId.entryId()); - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newSeek(uint64_t consumerId, uint64_t requestId, uint64_t timestamp) { - BaseCommand cmd; - cmd.set_type(BaseCommand::SEEK); - CommandSeek* commandSeek = cmd.mutable_seek(); - commandSeek->set_consumer_id(consumerId); - commandSeek->set_request_id(requestId); - commandSeek->set_message_publish_time(timestamp); - return writeMessageWithSize(cmd); -} - -SharedBuffer Commands::newGetLastMessageId(uint64_t consumerId, uint64_t requestId) { - BaseCommand cmd; - cmd.set_type(BaseCommand::GET_LAST_MESSAGE_ID); - - CommandGetLastMessageId* getLastMessageId = cmd.mutable_getlastmessageid(); - getLastMessageId->set_consumer_id(consumerId); - getLastMessageId->set_request_id(requestId); - const SharedBuffer buffer = writeMessageWithSize(cmd); - cmd.clear_getlastmessageid(); - return buffer; -} - -SharedBuffer Commands::newGetTopicsOfNamespace(const std::string& nsName, uint64_t requestId) { - BaseCommand cmd; - cmd.set_type(BaseCommand::GET_TOPICS_OF_NAMESPACE); - CommandGetTopicsOfNamespace* getTopics = cmd.mutable_gettopicsofnamespace(); - getTopics->set_request_id(requestId); - getTopics->set_namespace_(nsName); - - const SharedBuffer buffer = writeMessageWithSize(cmd); - cmd.clear_gettopicsofnamespace(); - return buffer; -} - -std::string Commands::messageType(BaseCommand_Type type) { - switch (type) { - case BaseCommand::CONNECT: - return "CONNECT"; - break; - case BaseCommand::CONNECTED: - return "CONNECTED"; - break; - case BaseCommand::SUBSCRIBE: - return "SUBSCRIBE"; - break; - case BaseCommand::PRODUCER: - return "PRODUCER"; - break; - case BaseCommand::SEND: - return "SEND"; - break; - case BaseCommand::SEND_RECEIPT: - return "SEND_RECEIPT"; - break; - case BaseCommand::SEND_ERROR: - return "SEND_ERROR"; - break; - case BaseCommand::MESSAGE: - return "MESSAGE"; - break; - case BaseCommand::ACK: - return "ACK"; - break; - case BaseCommand::FLOW: - return "FLOW"; - break; - case BaseCommand::UNSUBSCRIBE: - return "UNSUBSCRIBE"; - break; - case BaseCommand::SUCCESS: - return "SUCCESS"; - break; - case BaseCommand::ERROR: - return "ERROR"; - break; - case BaseCommand::CLOSE_PRODUCER: - return "CLOSE_PRODUCER"; - break; - case BaseCommand::CLOSE_CONSUMER: - return "CLOSE_CONSUMER"; - break; - case BaseCommand::PRODUCER_SUCCESS: - return "PRODUCER_SUCCESS"; - break; - case BaseCommand::PING: - return "PING"; - break; - case BaseCommand::PONG: - return "PONG"; - break; - case BaseCommand::PARTITIONED_METADATA: - return "PARTITIONED_METADATA"; - break; - case BaseCommand::PARTITIONED_METADATA_RESPONSE: - return "PARTITIONED_METADATA_RESPONSE"; - break; - case BaseCommand::REDELIVER_UNACKNOWLEDGED_MESSAGES: - return "REDELIVER_UNACKNOWLEDGED_MESSAGES"; - break; - case BaseCommand::LOOKUP: - return "LOOKUP"; - break; - case BaseCommand::LOOKUP_RESPONSE: - return "LOOKUP_RESPONSE"; - break; - case BaseCommand::CONSUMER_STATS: - return "CONSUMER_STATS"; - break; - case BaseCommand::CONSUMER_STATS_RESPONSE: - return "CONSUMER_STATS_RESPONSE"; - break; - case BaseCommand::REACHED_END_OF_TOPIC: - return "REACHED_END_OF_TOPIC"; - break; - case BaseCommand::SEEK: - return "SEEK"; - break; - case BaseCommand::ACTIVE_CONSUMER_CHANGE: - return "ACTIVE_CONSUMER_CHANGE"; - break; - case BaseCommand::GET_LAST_MESSAGE_ID: - return "GET_LAST_MESSAGE_ID"; - break; - case BaseCommand::GET_LAST_MESSAGE_ID_RESPONSE: - return "GET_LAST_MESSAGE_ID_RESPONSE"; - break; - case BaseCommand::GET_TOPICS_OF_NAMESPACE: - return "GET_TOPICS_OF_NAMESPACE"; - break; - case BaseCommand::GET_TOPICS_OF_NAMESPACE_RESPONSE: - return "GET_TOPICS_OF_NAMESPACE_RESPONSE"; - break; - case BaseCommand::GET_SCHEMA: - return "GET_SCHEMA"; - break; - case BaseCommand::GET_SCHEMA_RESPONSE: - return "GET_SCHEMA_RESPONSE"; - break; - case BaseCommand::AUTH_CHALLENGE: - return "AUTH_CHALLENGE"; - break; - case BaseCommand::AUTH_RESPONSE: - return "AUTH_RESPONSE"; - break; - case BaseCommand::ACK_RESPONSE: - return "ACK_RESPONSE"; - break; - case BaseCommand::GET_OR_CREATE_SCHEMA: - return "GET_OR_CREATE_SCHEMA"; - case BaseCommand::GET_OR_CREATE_SCHEMA_RESPONSE: - return "GET_OR_CREATE_SCHEMA_RESPONSE"; - case BaseCommand::NEW_TXN: - return "NEW_TXN"; - break; - case BaseCommand::NEW_TXN_RESPONSE: - return "NEW_TXN_RESPONSE"; - break; - case BaseCommand::ADD_PARTITION_TO_TXN: - return "ADD_PARTITION_TO_TXN"; - break; - case BaseCommand::ADD_PARTITION_TO_TXN_RESPONSE: - return "ADD_PARTITION_TO_TXN_RESPONSE"; - break; - case BaseCommand::ADD_SUBSCRIPTION_TO_TXN: - return "ADD_SUBSCRIPTION_TO_TXN"; - break; - case BaseCommand::ADD_SUBSCRIPTION_TO_TXN_RESPONSE: - return "ADD_SUBSCRIPTION_TO_TXN_RESPONSE"; - break; - case BaseCommand::END_TXN: - return "END_TXN"; - break; - case BaseCommand::END_TXN_RESPONSE: - return "END_TXN_RESPONSE"; - break; - case BaseCommand::END_TXN_ON_PARTITION: - return "END_TXN_ON_PARTITION"; - break; - case BaseCommand::END_TXN_ON_PARTITION_RESPONSE: - return "END_TXN_ON_PARTITION_RESPONSE"; - break; - case BaseCommand::END_TXN_ON_SUBSCRIPTION: - return "END_TXN_ON_SUBSCRIPTION"; - break; - case BaseCommand::END_TXN_ON_SUBSCRIPTION_RESPONSE: - return "END_TXN_ON_SUBSCRIPTION_RESPONSE"; - break; - case BaseCommand::TC_CLIENT_CONNECT_REQUEST: - return "TC_CLIENT_CONNECT_REQUEST"; - case BaseCommand::TC_CLIENT_CONNECT_RESPONSE: - return "TC_CLIENT_CONNECT_RESPONSE"; - break; - case BaseCommand::WATCH_TOPIC_LIST: - return "WATCH_TOPIC_LIST"; - break; - case BaseCommand::WATCH_TOPIC_LIST_SUCCESS: - return "WATCH_TOPIC_LIST_SUCCESS"; - break; - case BaseCommand::WATCH_TOPIC_UPDATE: - return "WATCH_TOPIC_UPDATE"; - break; - case BaseCommand::WATCH_TOPIC_LIST_CLOSE: - return "WATCH_TOPIC_LIST_CLOSE"; - break; - }; - BOOST_THROW_EXCEPTION(std::logic_error("Invalid BaseCommand enumeration value")); -} - -void Commands::initBatchMessageMetadata(const Message& msg, pulsar::proto::MessageMetadata& batchMetadata) { - // metadata has already been set in ProducerImpl::setMessageMetadata - const proto::MessageMetadata& metadata = msg.impl_->metadata; - - // required fields - batchMetadata.set_producer_name(metadata.producer_name()); - batchMetadata.set_sequence_id(metadata.sequence_id()); - batchMetadata.set_publish_time(metadata.publish_time()); - - // optional fields - if (metadata.has_partition_key()) { - batchMetadata.set_partition_key(metadata.partition_key()); - } - if (metadata.has_ordering_key()) { - batchMetadata.set_ordering_key(metadata.ordering_key()); - } - if (metadata.has_replicated_from()) { - batchMetadata.set_replicated_from(metadata.replicated_from()); - } - if (metadata.replicate_to_size() > 0) { - for (int i = 0; i < metadata.replicate_to_size(); i++) { - batchMetadata.add_replicate_to(metadata.replicate_to(i)); - } - } - if (metadata.has_schema_version()) { - batchMetadata.set_schema_version(metadata.schema_version()); - } -} - -uint64_t Commands::serializeSingleMessageInBatchWithPayload(const Message& msg, SharedBuffer& batchPayLoad, - unsigned long maxMessageSizeInBytes) { - const auto& msgMetadata = msg.impl_->metadata; - SingleMessageMetadata metadata; - if (msgMetadata.has_partition_key()) { - metadata.set_partition_key(msgMetadata.partition_key()); - } - if (msgMetadata.has_ordering_key()) { - metadata.set_ordering_key(msgMetadata.ordering_key()); - } - - metadata.mutable_properties()->Reserve(msgMetadata.properties_size()); - for (int i = 0; i < msgMetadata.properties_size(); i++) { - auto keyValue = proto::KeyValue().New(); - *keyValue = msgMetadata.properties(i); - metadata.mutable_properties()->AddAllocated(keyValue); - } - - if (msgMetadata.has_event_time()) { - metadata.set_event_time(msgMetadata.event_time()); - } - - if (msgMetadata.has_sequence_id()) { - metadata.set_sequence_id(msgMetadata.sequence_id()); - } - - // Format of batch message - // Each Message = [METADATA_SIZE][METADATA] [PAYLOAD] - - int payloadSize = msg.impl_->payload.readableBytes(); - metadata.set_payload_size(payloadSize); - - int msgMetadataSize = metadata.ByteSize(); - - unsigned long requiredSpace = sizeof(uint32_t) + msgMetadataSize + payloadSize; - if (batchPayLoad.writableBytes() <= sizeof(uint32_t) + msgMetadataSize + payloadSize) { - LOG_DEBUG("remaining size of batchPayLoad buffer [" - << batchPayLoad.writableBytes() << "] can't accomodate new payload [" << requiredSpace - << "] - expanding the batchPayload buffer"); - uint32_t new_size = - std::min(batchPayLoad.readableBytes() * 2, static_cast(maxMessageSizeInBytes)); - new_size = std::max(new_size, batchPayLoad.readableBytes() + static_cast(requiredSpace)); - SharedBuffer buffer = SharedBuffer::allocate(new_size); - // Adding batch created so far - buffer.write(batchPayLoad.data(), batchPayLoad.readableBytes()); - batchPayLoad = buffer; - } - // Adding the new message - batchPayLoad.writeUnsignedInt(msgMetadataSize); - metadata.SerializeToArray(batchPayLoad.mutableData(), msgMetadataSize); - batchPayLoad.bytesWritten(msgMetadataSize); - batchPayLoad.write(msg.impl_->payload.data(), payloadSize); - - return msgMetadata.sequence_id(); -} - -Message Commands::deSerializeSingleMessageInBatch(Message& batchedMessage, int32_t batchIndex) { - SharedBuffer& uncompressedPayload = batchedMessage.impl_->payload; - - // Format of batch message - // Each Message = [METADATA_SIZE][METADATA] [PAYLOAD] - - const int& singleMetaSize = uncompressedPayload.readUnsignedInt(); - SingleMessageMetadata metadata; - metadata.ParseFromArray(uncompressedPayload.data(), singleMetaSize); - uncompressedPayload.consume(singleMetaSize); - - const int& payloadSize = metadata.payload_size(); - - // Get a slice of size payloadSize from offset readIndex_ - SharedBuffer payload = uncompressedPayload.slice(0, payloadSize); - uncompressedPayload.consume(payloadSize); - - const MessageId& m = batchedMessage.impl_->messageId; - MessageId singleMessageId(m.partition(), m.ledgerId(), m.entryId(), batchIndex); - Message singleMessage(singleMessageId, batchedMessage.impl_->metadata, payload, metadata, - batchedMessage.impl_->getTopicName()); - singleMessage.impl_->cnx_ = batchedMessage.impl_->cnx_; - - return singleMessage; -} - -bool Commands::peerSupportsGetLastMessageId(int32_t peerVersion) { return peerVersion >= proto::v12; } - -bool Commands::peerSupportsActiveConsumerListener(int32_t peerVersion) { return peerVersion >= proto::v12; } - -bool Commands::peerSupportsMultiMessageAcknowledgement(int32_t peerVersion) { - return peerVersion >= proto::v12; -} - -bool Commands::peerSupportsJsonSchemaAvroFormat(int32_t peerVersion) { return peerVersion >= proto::v13; } - -bool Commands::peerSupportsGetOrCreateSchema(int32_t peerVersion) { return peerVersion >= proto::v15; } -} // namespace pulsar -/* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/Commands.h b/pulsar-client-cpp/lib/Commands.h deleted file mode 100644 index 4ff8674497aeb..0000000000000 --- a/pulsar-client-cpp/lib/Commands.h +++ /dev/null @@ -1,152 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_COMMANDS_H_ -#define LIB_COMMANDS_H_ - -#include -#include -#include -#include -#include - -#include "PulsarApi.pb.h" -#include "SharedBuffer.h" -#include "Utils.h" - -#include - -using namespace pulsar; - -namespace pulsar { - -typedef std::shared_ptr MessageMetadataPtr; - -/** - * Construct buffers ready to send for Pulsar client commands. - * - * Buffer are already including the 4 byte size at the beginning - */ -class Commands { - public: - enum ChecksumType - { - Crc32c, - None - }; - enum WireFormatConstant - { - DefaultMaxMessageSize = (5 * 1024 * 1024 - (10 * 1024)), - MaxFrameSize = (5 * 1024 * 1024) - }; - - enum SubscriptionMode - { - // Make the subscription to be backed by a durable cursor that will retain messages and persist the - // current - // position - SubscriptionModeDurable, - - // Lightweight subscription mode that doesn't have a durable cursor associated - SubscriptionModeNonDurable - }; - - const static uint16_t magicCrc32c = 0x0e01; - const static int checksumSize = 4; - - static SharedBuffer newConnect(const AuthenticationPtr& authentication, const std::string& logicalAddress, - bool connectingThroughProxy, Result& result); - - static SharedBuffer newAuthResponse(const AuthenticationPtr& authentication, Result& result); - - static SharedBuffer newPartitionMetadataRequest(const std::string& topic, uint64_t requestId); - - static SharedBuffer newLookup(const std::string& topic, const bool authoritative, uint64_t requestId, - const std::string& listenerName); - - static PairSharedBuffer newSend(SharedBuffer& headers, proto::BaseCommand& cmd, uint64_t producerId, - uint64_t sequenceId, ChecksumType checksumType, - const proto::MessageMetadata& metadata, const SharedBuffer& payload); - - static SharedBuffer newSubscribe(const std::string& topic, const std::string& subscription, - uint64_t consumerId, uint64_t requestId, - proto::CommandSubscribe_SubType subType, const std::string& consumerName, - SubscriptionMode subscriptionMode, Optional startMessageId, - bool readCompacted, const std::map& metadata, - const std::map& subscriptionProperties, - const SchemaInfo& schemaInfo, - proto::CommandSubscribe_InitialPosition subscriptionInitialPosition, - bool replicateSubscriptionState, KeySharedPolicy keySharedPolicy, - int priorityLevel = 0); - - static SharedBuffer newUnsubscribe(uint64_t consumerId, uint64_t requestId); - - static SharedBuffer newProducer(const std::string& topic, uint64_t producerId, - const std::string& producerName, uint64_t requestId, - const std::map& metadata, - const SchemaInfo& schemaInfo, uint64_t epoch, - bool userProvidedProducerName, bool encrypted, - proto::ProducerAccessMode accessMode, Optional topicEpoch); - - static SharedBuffer newAck(uint64_t consumerId, const proto::MessageIdData& messageId, - proto::CommandAck_AckType ackType, int validationError); - static SharedBuffer newMultiMessageAck(uint64_t consumerId, const std::set& msgIds); - - static SharedBuffer newFlow(uint64_t consumerId, uint32_t messagePermits); - - static SharedBuffer newCloseProducer(uint64_t producerId, uint64_t requestId); - - static SharedBuffer newCloseConsumer(uint64_t consumerId, uint64_t requestId); - - static SharedBuffer newPing(); - static SharedBuffer newPong(); - - static SharedBuffer newRedeliverUnacknowledgedMessages(uint64_t consumerId, - const std::set& messageIds); - - static std::string messageType(proto::BaseCommand::Type type); - - static void initBatchMessageMetadata(const Message& msg, pulsar::proto::MessageMetadata& batchMetadata); - - static PULSAR_PUBLIC uint64_t serializeSingleMessageInBatchWithPayload( - const Message& msg, SharedBuffer& batchPayLoad, unsigned long maxMessageSizeInBytes); - - static Message deSerializeSingleMessageInBatch(Message& batchedMessage, int32_t batchIndex); - - static SharedBuffer newConsumerStats(uint64_t consumerId, uint64_t requestId); - - static SharedBuffer newSeek(uint64_t consumerId, uint64_t requestId, const MessageId& messageId); - static SharedBuffer newSeek(uint64_t consumerId, uint64_t requestId, uint64_t timestamp); - static SharedBuffer newGetLastMessageId(uint64_t consumerId, uint64_t requestId); - static SharedBuffer newGetTopicsOfNamespace(const std::string& nsName, uint64_t requestId); - - static bool peerSupportsGetLastMessageId(int32_t peerVersion); - static bool peerSupportsActiveConsumerListener(int32_t peerVersion); - static bool peerSupportsMultiMessageAcknowledgement(int32_t peerVersion); - static bool peerSupportsJsonSchemaAvroFormat(int32_t peerVersion); - static bool peerSupportsGetOrCreateSchema(int32_t peerVersion); - - private: - Commands(); - - static SharedBuffer writeMessageWithSize(const proto::BaseCommand& cmd); -}; - -} /* namespace pulsar */ - -#endif /* LIB_COMMANDS_H_ */ diff --git a/pulsar-client-cpp/lib/CompressionCodec.cc b/pulsar-client-cpp/lib/CompressionCodec.cc deleted file mode 100644 index c17b534451478..0000000000000 --- a/pulsar-client-cpp/lib/CompressionCodec.cc +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "CompressionCodec.h" -#include "CompressionCodecLZ4.h" -#include "CompressionCodecZLib.h" -#include "CompressionCodecZstd.h" -#include "CompressionCodecSnappy.h" - -#include - -using namespace pulsar; -namespace pulsar { - -CompressionCodecNone CompressionCodecProvider::compressionCodecNone_; -CompressionCodecLZ4 CompressionCodecProvider::compressionCodecLZ4_; -CompressionCodecZLib CompressionCodecProvider::compressionCodecZLib_; -CompressionCodecZstd CompressionCodecProvider::compressionCodecZstd_; -CompressionCodecSnappy CompressionCodecProvider::compressionCodecSnappy_; - -CompressionCodec& CompressionCodecProvider::getCodec(CompressionType compressionType) { - switch (compressionType) { - case CompressionLZ4: - return compressionCodecLZ4_; - case CompressionZLib: - return compressionCodecZLib_; - case CompressionZSTD: - return compressionCodecZstd_; - case CompressionSNAPPY: - return compressionCodecSnappy_; - default: - return compressionCodecNone_; - } - BOOST_THROW_EXCEPTION(std::logic_error("Invalid CompressionType enumeration value")); -} - -CompressionType CompressionCodecProvider::convertType(proto::CompressionType type) { - switch (type) { - case proto::NONE: - return CompressionNone; - case proto::LZ4: - return CompressionLZ4; - case proto::ZLIB: - return CompressionZLib; - case proto::ZSTD: - return CompressionZSTD; - case proto::SNAPPY: - return CompressionSNAPPY; - } - BOOST_THROW_EXCEPTION(std::logic_error("Invalid proto::CompressionType enumeration value")); -} - -proto::CompressionType CompressionCodecProvider::convertType(CompressionType type) { - switch (type) { - case CompressionNone: - return proto::NONE; - case CompressionLZ4: - return proto::LZ4; - case CompressionZLib: - return proto::ZLIB; - case CompressionZSTD: - return proto::ZSTD; - case CompressionSNAPPY: - return proto::SNAPPY; - } - BOOST_THROW_EXCEPTION(std::logic_error("Invalid CompressionType enumeration value")); -} - -SharedBuffer CompressionCodecNone::encode(const SharedBuffer& raw) { return raw; } - -bool CompressionCodecNone::decode(const SharedBuffer& encoded, uint32_t uncompressedSize, - SharedBuffer& decoded) { - decoded = encoded; - return true; -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/CompressionCodec.h b/pulsar-client-cpp/lib/CompressionCodec.h deleted file mode 100644 index fd65f9cdf6ecc..0000000000000 --- a/pulsar-client-cpp/lib/CompressionCodec.h +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_COMPRESSIONCODEC_H_ -#define LIB_COMPRESSIONCODEC_H_ - -#include -#include - -#include "SharedBuffer.h" -#include "PulsarApi.pb.h" - -#include - -using namespace pulsar; -namespace pulsar { - -class CompressionCodec; -class CompressionCodecNone; -class CompressionCodecLZ4; -class CompressionCodecZLib; -class CompressionCodecZstd; -class CompressionCodecSnappy; - -class PULSAR_PUBLIC CompressionCodecProvider { - public: - static CompressionType convertType(proto::CompressionType type); - static proto::CompressionType convertType(CompressionType type); - - static CompressionCodec& getCodec(CompressionType compressionType); - - private: - static CompressionCodecNone compressionCodecNone_; - static CompressionCodecLZ4 compressionCodecLZ4_; - static CompressionCodecZLib compressionCodecZLib_; - static CompressionCodecZstd compressionCodecZstd_; - static CompressionCodecSnappy compressionCodecSnappy_; -}; - -class PULSAR_PUBLIC CompressionCodec { - public: - virtual ~CompressionCodec() {} - - /** - * Compress a buffer - * - * @param raw - * a buffer with the uncompressed content. The reader/writer indexes will not be modified - * @return a buffer with the compressed content. - */ - virtual SharedBuffer encode(const SharedBuffer& raw) = 0; - - /** - * Decompress a buffer. - * - * The buffer needs to have been compressed with the matching Encoder. - * - * @param encoded - * the compressed content - * @param uncompressedSize - * the size of the original content - * @param decoded - * were the result will be passed - * @return true if the buffer was decompressed, false otherwise - */ - virtual bool decode(const SharedBuffer& encoded, uint32_t uncompressedSize, SharedBuffer& decoded) = 0; -}; - -class PULSAR_PUBLIC CompressionCodecNone : public CompressionCodec { - public: - SharedBuffer encode(const SharedBuffer& raw); - - bool decode(const SharedBuffer& encoded, uint32_t uncompressedSize, SharedBuffer& decoded); -}; -} // namespace pulsar - -#endif /* LIB_COMPRESSIONCODEC_H_ */ diff --git a/pulsar-client-cpp/lib/CompressionCodecLZ4.cc b/pulsar-client-cpp/lib/CompressionCodecLZ4.cc deleted file mode 100644 index 508e4f4ab2271..0000000000000 --- a/pulsar-client-cpp/lib/CompressionCodecLZ4.cc +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "CompressionCodecLZ4.h" - -#include "lz4/lz4.h" -#include - -namespace pulsar { - -SharedBuffer CompressionCodecLZ4::encode(const SharedBuffer& raw) { - // Get the max size of the compressed data and allocate a buffer to hold it - int maxCompressedSize = LZ4_compressBound(raw.readableBytes()); - SharedBuffer compressed = SharedBuffer::allocate(maxCompressedSize); - - int compressedSize = - LZ4_compress_default(raw.data(), compressed.mutableData(), raw.readableBytes(), maxCompressedSize); - assert(compressedSize > 0); - compressed.bytesWritten(compressedSize); - - return compressed; -} - -bool CompressionCodecLZ4::decode(const SharedBuffer& encoded, uint32_t uncompressedSize, - SharedBuffer& decoded) { - SharedBuffer decompressed = SharedBuffer::allocate(uncompressedSize); - - int result = LZ4_decompress_fast(encoded.data(), decompressed.mutableData(), uncompressedSize); - if (result > 0) { - decompressed.bytesWritten(uncompressedSize); - decoded = decompressed; - return true; - } else { - // Decompression failed - return false; - } -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/CompressionCodecLZ4.h b/pulsar-client-cpp/lib/CompressionCodecLZ4.h deleted file mode 100644 index 147e01380c447..0000000000000 --- a/pulsar-client-cpp/lib/CompressionCodecLZ4.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_COMPRESSIONCODECLZ4_H_ -#define LIB_COMPRESSIONCODECLZ4_H_ - -#include "CompressionCodec.h" - -namespace pulsar { - -class CompressionCodecLZ4 : public CompressionCodec { - public: - SharedBuffer encode(const SharedBuffer& raw); - - bool decode(const SharedBuffer& encoded, uint32_t uncompressedSize, SharedBuffer& decoded); -}; -} // namespace pulsar -#endif /* LIB_COMPRESSIONCODECLZ4_H_ */ diff --git a/pulsar-client-cpp/lib/CompressionCodecSnappy.cc b/pulsar-client-cpp/lib/CompressionCodecSnappy.cc deleted file mode 100644 index 04b0d973eb9f9..0000000000000 --- a/pulsar-client-cpp/lib/CompressionCodecSnappy.cc +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "CompressionCodecSnappy.h" - -#if HAS_SNAPPY -#include -#include - -namespace pulsar { - -SharedBuffer CompressionCodecSnappy::encode(const SharedBuffer& raw) { - // Get the max size of the compressed data and allocate a buffer to hold it - size_t maxCompressedLength = snappy::MaxCompressedLength(raw.readableBytes()); - SharedBuffer compressed = SharedBuffer::allocate(static_cast(maxCompressedLength)); - snappy::ByteArraySource source(raw.data(), raw.readableBytes()); - snappy::UncheckedByteArraySink sink(compressed.mutableData()); - size_t compressedSize = snappy::Compress(&source, &sink); - compressed.setWriterIndex(static_cast(compressedSize)); - return compressed; -} - -bool CompressionCodecSnappy::decode(const SharedBuffer& encoded, uint32_t uncompressedSize, - SharedBuffer& decoded) { - SharedBuffer uncompressed = SharedBuffer::allocate(uncompressedSize); - snappy::ByteArraySource source(encoded.data(), encoded.readableBytes()); - snappy::UncheckedByteArraySink sink(uncompressed.mutableData()); - if (snappy::Uncompress(&source, &sink)) { - decoded = uncompressed; - decoded.setWriterIndex(uncompressedSize); - return true; - } else { - // Decompression failed - return false; - } -} -} // namespace pulsar - -#else // No SNAPPY - -#include - -namespace pulsar { - -SharedBuffer CompressionCodecSnappy::encode(const SharedBuffer& raw) { - throw std::runtime_error("Snappy compression not supported"); -} - -bool CompressionCodecSnappy::decode(const SharedBuffer& encoded, uint32_t uncompressedSize, - SharedBuffer& decoded) { - throw std::runtime_error("Snappy compression not supported"); -} -} // namespace pulsar - -#endif // HAS_SNAPPY diff --git a/pulsar-client-cpp/lib/CompressionCodecSnappy.h b/pulsar-client-cpp/lib/CompressionCodecSnappy.h deleted file mode 100644 index 933b9efb4133b..0000000000000 --- a/pulsar-client-cpp/lib/CompressionCodecSnappy.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include "CompressionCodec.h" - -namespace pulsar { - -class PULSAR_PUBLIC CompressionCodecSnappy : public CompressionCodec { - public: - SharedBuffer encode(const SharedBuffer& raw); - - bool decode(const SharedBuffer& encoded, uint32_t uncompressedSize, SharedBuffer& decoded); -}; -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/CompressionCodecZLib.cc b/pulsar-client-cpp/lib/CompressionCodecZLib.cc deleted file mode 100644 index 657c5488e9d22..0000000000000 --- a/pulsar-client-cpp/lib/CompressionCodecZLib.cc +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "CompressionCodecZLib.h" - -#include -#include -#include -#include -#include "LogUtils.h" - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -SharedBuffer CompressionCodecZLib::encode(const SharedBuffer &raw) { - // Get the max size of the compressed data and allocate a buffer to hold it - int maxCompressedSize = compressBound(raw.readableBytes()); - SharedBuffer compressed = SharedBuffer::allocate(maxCompressedSize); - - unsigned long bytesWritten = maxCompressedSize; - int res = compress((Bytef *)compressed.mutableData(), &bytesWritten, (const Bytef *)raw.data(), - raw.readableBytes()); - if (res != Z_OK) { - LOG_ERROR("Failed to compress buffer. res=" << res); - abort(); - } - - compressed.bytesWritten(bytesWritten); - return compressed; -} - -static bool buffer_uncompress(const char *compressedBuffer, unsigned long compressedSize, char *resultBuffer, - uint32_t uncompressedSize) { - z_stream stream; - stream.next_in = (Bytef *)compressedBuffer; - stream.avail_in = compressedSize; - stream.zalloc = NULL; - stream.zfree = NULL; - stream.opaque = NULL; - - int res = inflateInit2(&stream, MAX_WBITS); - if (res != Z_OK) { - LOG_ERROR("Failed to initialize inflate stream: " << res); - return false; - } - - stream.next_out = (Bytef *)resultBuffer; - stream.avail_out = uncompressedSize; - - res = inflate(&stream, Z_PARTIAL_FLUSH); - inflateEnd(&stream); - - if (res == Z_OK || res == Z_STREAM_END) { - return true; - } else { - LOG_ERROR("Failed to decompress zlib buffer: " << res << " -- compressed size: " << compressedSize - << " -- uncompressed size: " << uncompressedSize); - return false; - } -} - -bool CompressionCodecZLib::decode(const SharedBuffer &encoded, uint32_t uncompressedSize, - SharedBuffer &decoded) { - SharedBuffer decompressed = SharedBuffer::allocate(uncompressedSize); - - if (buffer_uncompress(encoded.data(), encoded.readableBytes(), decompressed.mutableData(), - uncompressedSize)) { - decoded = decompressed; - decoded.setWriterIndex(uncompressedSize); - return true; - } else { - return false; - } -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/CompressionCodecZLib.h b/pulsar-client-cpp/lib/CompressionCodecZLib.h deleted file mode 100644 index cd4380b588a41..0000000000000 --- a/pulsar-client-cpp/lib/CompressionCodecZLib.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_COMPRESSIONCODECZLIB_H_ -#define LIB_COMPRESSIONCODECZLIB_H_ - -#include -#include "CompressionCodec.h" -#include - -// Make symbol visible to unit tests - -namespace pulsar { - -class PULSAR_PUBLIC CompressionCodecZLib : public CompressionCodec { - public: - SharedBuffer encode(const SharedBuffer& raw); - - bool decode(const SharedBuffer& encoded, uint32_t uncompressedSize, SharedBuffer& decoded); -}; - -} // namespace pulsar - -#endif /* LIB_COMPRESSIONCODECZLIB_H_ */ diff --git a/pulsar-client-cpp/lib/CompressionCodecZstd.cc b/pulsar-client-cpp/lib/CompressionCodecZstd.cc deleted file mode 100644 index 14c09925501da..0000000000000 --- a/pulsar-client-cpp/lib/CompressionCodecZstd.cc +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "CompressionCodecZstd.h" - -#if HAS_ZSTD -#include - -namespace pulsar { - -static const int COMPRESSION_LEVEL = 3; - -SharedBuffer CompressionCodecZstd::encode(const SharedBuffer& raw) { - // Get the max size of the compressed data and allocate a buffer to hold it - size_t maxCompressedSize = ZSTD_compressBound(raw.readableBytes()); - SharedBuffer compressed = SharedBuffer::allocate(maxCompressedSize); - - int compressedSize = ZSTD_compress(compressed.mutableData(), maxCompressedSize, raw.data(), - raw.readableBytes(), COMPRESSION_LEVEL); - compressed.bytesWritten(compressedSize); - - return compressed; -} - -bool CompressionCodecZstd::decode(const SharedBuffer& encoded, uint32_t uncompressedSize, - SharedBuffer& decoded) { - SharedBuffer decompressed = SharedBuffer::allocate(uncompressedSize); - - size_t result = ZSTD_decompress(decompressed.mutableData(), uncompressedSize, encoded.data(), - encoded.readableBytes()); - if (result == uncompressedSize) { - decompressed.bytesWritten(uncompressedSize); - decoded = decompressed; - return true; - } else { - // Decompression failed - return false; - } -} -} // namespace pulsar - -#else // No ZSTD - -#include - -namespace pulsar { - -SharedBuffer CompressionCodecZstd::encode(const SharedBuffer& raw) { - throw std::runtime_error("ZStd compression not supported"); -} - -bool CompressionCodecZstd::decode(const SharedBuffer& encoded, uint32_t uncompressedSize, - SharedBuffer& decoded) { - throw std::runtime_error("ZStd compression not supported"); -} -} // namespace pulsar - -#endif // HAS_ZSTD diff --git a/pulsar-client-cpp/lib/CompressionCodecZstd.h b/pulsar-client-cpp/lib/CompressionCodecZstd.h deleted file mode 100644 index dba296706d21a..0000000000000 --- a/pulsar-client-cpp/lib/CompressionCodecZstd.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include "CompressionCodec.h" - -namespace pulsar { - -class CompressionCodecZstd : public CompressionCodec { - public: - SharedBuffer encode(const SharedBuffer& raw); - - bool decode(const SharedBuffer& encoded, uint32_t uncompressedSize, SharedBuffer& decoded); -}; -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ConnectionPool.cc b/pulsar-client-cpp/lib/ConnectionPool.cc deleted file mode 100644 index e03697f9c7bd3..0000000000000 --- a/pulsar-client-cpp/lib/ConnectionPool.cc +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "ConnectionPool.h" - -#include "LogUtils.h" -#include "Url.h" - -#include -#include - -using boost::asio::ip::tcp; -namespace ssl = boost::asio::ssl; -typedef ssl::stream ssl_socket; - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -ConnectionPool::ConnectionPool(const ClientConfiguration& conf, ExecutorServiceProviderPtr executorProvider, - const AuthenticationPtr& authentication, bool poolConnections) - : clientConfiguration_(conf), - executorProvider_(executorProvider), - authentication_(authentication), - pool_(), - poolConnections_(poolConnections), - mutex_() {} - -bool ConnectionPool::close() { - bool expectedState = false; - if (!closed_.compare_exchange_strong(expectedState, true)) { - return false; - } - - std::unique_lock lock(mutex_); - if (poolConnections_) { - for (auto cnxIt = pool_.begin(); cnxIt != pool_.end(); cnxIt++) { - ClientConnectionPtr cnx = cnxIt->second.lock(); - if (cnx) { - cnx->close(); - } - } - pool_.clear(); - } - return true; -} - -Future ConnectionPool::getConnectionAsync( - const std::string& logicalAddress, const std::string& physicalAddress) { - if (closed_) { - Promise promise; - promise.setFailed(ResultAlreadyClosed); - return promise.getFuture(); - } - - std::unique_lock lock(mutex_); - - if (poolConnections_) { - PoolMap::iterator cnxIt = pool_.find(logicalAddress); - if (cnxIt != pool_.end()) { - ClientConnectionPtr cnx = cnxIt->second.lock(); - - if (cnx && !cnx->isClosed()) { - // Found a valid or pending connection in the pool - LOG_DEBUG("Got connection from pool for " << logicalAddress << " use_count: " // - << (cnx.use_count() - 1) << " @ " << cnx.get()); - return cnx->getConnectFuture(); - } else { - // Deleting stale connection - LOG_INFO("Deleting stale connection from pool for " - << logicalAddress << " use_count: " << (cnx.use_count() - 1) << " @ " << cnx.get()); - pool_.erase(logicalAddress); - } - } - } - - // No valid or pending connection found in the pool, creating a new one - ClientConnectionPtr cnx(new ClientConnection(logicalAddress, physicalAddress, executorProvider_->get(), - clientConfiguration_, authentication_)); - - LOG_INFO("Created connection for " << logicalAddress); - - Future future = cnx->getConnectFuture(); - pool_.insert(std::make_pair(logicalAddress, cnx)); - - lock.unlock(); - - cnx->tcpConnectAsync(); - return future; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ConnectionPool.h b/pulsar-client-cpp/lib/ConnectionPool.h deleted file mode 100644 index 21d439e732683..0000000000000 --- a/pulsar-client-cpp/lib/ConnectionPool.h +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _PULSAR_CONNECTION_POOL_HEADER_ -#define _PULSAR_CONNECTION_POOL_HEADER_ - -#include -#include - -#include "ClientConnection.h" - -#include -#include -#include -#include -namespace pulsar { - -class ExecutorService; - -class PULSAR_PUBLIC ConnectionPool { - public: - ConnectionPool(const ClientConfiguration& conf, ExecutorServiceProviderPtr executorProvider, - const AuthenticationPtr& authentication, bool poolConnections = true); - - /** - * Close the connection pool. - * - * @return false if it has already been closed. - */ - bool close(); - - /** - * Get a connection from the pool. - *

- * The connection can either be created or be coming from the pool itself. - *

- * When specifying multiple addresses, the logicalAddress is used as a tag for the broker, - * while the physicalAddress is where the connection is actually happening. - *

- * These two addresses can be different when the client is forced to connect through - * a proxy layer. Essentially, the pool is using the logical address as a way to - * decide whether to reuse a particular connection. - * - * @param logicalAddress the address to use as the broker tag - * @param physicalAddress the real address where the TCP connection should be made - * @return a future that will produce the ClientCnx object - */ - Future getConnectionAsync(const std::string& logicalAddress, - const std::string& physicalAddress); - - Future getConnectionAsync(const std::string& address) { - return getConnectionAsync(address, address); - } - - private: - ClientConfiguration clientConfiguration_; - ExecutorServiceProviderPtr executorProvider_; - AuthenticationPtr authentication_; - typedef std::map PoolMap; - PoolMap pool_; - bool poolConnections_; - std::mutex mutex_; - std::atomic_bool closed_{false}; - - friend class ConnectionPoolTest; -}; -} // namespace pulsar -#endif //_PULSAR_CONNECTION_POOL_HEADER_ diff --git a/pulsar-client-cpp/lib/ConsoleLoggerFactory.cc b/pulsar-client-cpp/lib/ConsoleLoggerFactory.cc deleted file mode 100644 index 397c7feed070f..0000000000000 --- a/pulsar-client-cpp/lib/ConsoleLoggerFactory.cc +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include "lib/ConsoleLoggerFactoryImpl.h" - -namespace pulsar { - -ConsoleLoggerFactory::ConsoleLoggerFactory(Logger::Level level) - : impl_(new ConsoleLoggerFactoryImpl(level)) {} - -ConsoleLoggerFactory::~ConsoleLoggerFactory() {} - -Logger* ConsoleLoggerFactory::getLogger(const std::string& fileName) { return impl_->getLogger(fileName); } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ConsoleLoggerFactoryImpl.h b/pulsar-client-cpp/lib/ConsoleLoggerFactoryImpl.h deleted file mode 100644 index 61c1d90d745e5..0000000000000 --- a/pulsar-client-cpp/lib/ConsoleLoggerFactoryImpl.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include "lib/SimpleLogger.h" - -namespace pulsar { - -class ConsoleLoggerFactoryImpl { - public: - ConsoleLoggerFactoryImpl(Logger::Level level) : level_(level) {} - - Logger* getLogger(const std::string& fileName) { return new SimpleLogger(std::cout, fileName, level_); } - - private: - Logger::Level level_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Consumer.cc b/pulsar-client-cpp/lib/Consumer.cc deleted file mode 100644 index 5d1636291286b..0000000000000 --- a/pulsar-client-cpp/lib/Consumer.cc +++ /dev/null @@ -1,270 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include "ConsumerImpl.h" -#include "Utils.h" -#include -#include - -namespace pulsar { - -static const std::string EMPTY_STRING; - -Consumer::Consumer() : impl_() {} - -Consumer::Consumer(ConsumerImplBasePtr impl) : impl_(impl) {} - -const std::string& Consumer::getTopic() const { return impl_ != NULL ? impl_->getTopic() : EMPTY_STRING; } - -const std::string& Consumer::getSubscriptionName() const { - return impl_ != NULL ? impl_->getSubscriptionName() : EMPTY_STRING; -} - -Result Consumer::unsubscribe() { - if (!impl_) { - return ResultConsumerNotInitialized; - } - Promise promise; - impl_->unsubscribeAsync(WaitForCallback(promise)); - Result result; - promise.getFuture().get(result); - return result; -} - -void Consumer::unsubscribeAsync(ResultCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized); - return; - } - - impl_->unsubscribeAsync(callback); -} - -Result Consumer::receive(Message& msg) { - if (!impl_) { - return ResultConsumerNotInitialized; - } - - return impl_->receive(msg); -} - -Result Consumer::receive(Message& msg, int timeoutMs) { - if (!impl_) { - return ResultConsumerNotInitialized; - } - - return impl_->receive(msg, timeoutMs); -} - -void Consumer::receiveAsync(ReceiveCallback callback) { - if (!impl_) { - Message msg; - callback(ResultConsumerNotInitialized, msg); - return; - } - impl_->receiveAsync(callback); -} - -Result Consumer::acknowledge(const Message& message) { return acknowledge(message.getMessageId()); } - -Result Consumer::acknowledge(const MessageId& messageId) { - if (!impl_) { - return ResultConsumerNotInitialized; - } - Promise promise; - impl_->acknowledgeAsync(messageId, WaitForCallback(promise)); - Result result; - promise.getFuture().get(result); - return result; -} - -void Consumer::acknowledgeAsync(const Message& message, ResultCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized); - return; - } - - impl_->acknowledgeAsync(message.getMessageId(), callback); -} - -void Consumer::acknowledgeAsync(const MessageId& messageId, ResultCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized); - return; - } - - impl_->acknowledgeAsync(messageId, callback); -} - -Result Consumer::acknowledgeCumulative(const Message& message) { - return acknowledgeCumulative(message.getMessageId()); -} - -Result Consumer::acknowledgeCumulative(const MessageId& messageId) { - if (!impl_) { - return ResultConsumerNotInitialized; - } - - Promise promise; - impl_->acknowledgeCumulativeAsync(messageId, WaitForCallback(promise)); - Result result; - promise.getFuture().get(result); - return result; -} - -void Consumer::acknowledgeCumulativeAsync(const Message& message, ResultCallback callback) { - acknowledgeCumulativeAsync(message.getMessageId(), callback); -} - -void Consumer::acknowledgeCumulativeAsync(const MessageId& messageId, ResultCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized); - return; - } - - impl_->acknowledgeCumulativeAsync(messageId, callback); -} - -void Consumer::negativeAcknowledge(const Message& message) { negativeAcknowledge(message.getMessageId()); } - -void Consumer::negativeAcknowledge(const MessageId& messageId) { - if (impl_) { - impl_->negativeAcknowledge(messageId); - ; - } -} - -Result Consumer::close() { - Promise promise; - closeAsync(WaitForCallback(promise)); - - Result result; - promise.getFuture().get(result); - return result; -} - -void Consumer::closeAsync(ResultCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized); - return; - } - - impl_->closeAsync(callback); -} - -Result Consumer::pauseMessageListener() { - if (!impl_) { - return ResultConsumerNotInitialized; - } - - return impl_->pauseMessageListener(); -} - -Result Consumer::resumeMessageListener() { - if (!impl_) { - return ResultConsumerNotInitialized; - } - - return impl_->resumeMessageListener(); -} - -void Consumer::redeliverUnacknowledgedMessages() { - if (impl_) { - impl_->redeliverUnacknowledgedMessages(); - } -} - -Result Consumer::getBrokerConsumerStats(BrokerConsumerStats& brokerConsumerStats) { - if (!impl_) { - return ResultConsumerNotInitialized; - } - Promise promise; - getBrokerConsumerStatsAsync(WaitForCallbackValue(promise)); - return promise.getFuture().get(brokerConsumerStats); -} - -void Consumer::getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized, BrokerConsumerStats()); - return; - } - impl_->getBrokerConsumerStatsAsync(callback); -} - -void Consumer::seekAsync(const MessageId& msgId, ResultCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized); - return; - } - impl_->seekAsync(msgId, callback); -} - -void Consumer::seekAsync(uint64_t timestamp, ResultCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized); - return; - } - impl_->seekAsync(timestamp, callback); -} - -Result Consumer::seek(const MessageId& msgId) { - if (!impl_) { - return ResultConsumerNotInitialized; - } - - Promise promise; - impl_->seekAsync(msgId, WaitForCallback(promise)); - Result result; - promise.getFuture().get(result); - return result; -} - -Result Consumer::seek(uint64_t timestamp) { - if (!impl_) { - return ResultConsumerNotInitialized; - } - - Promise promise; - impl_->seekAsync(timestamp, WaitForCallback(promise)); - Result result; - promise.getFuture().get(result); - return result; -} - -bool Consumer::isConnected() const { return impl_ && impl_->isConnected(); } - -void Consumer::getLastMessageIdAsync(GetLastMessageIdCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized, MessageId()); - return; - } - getLastMessageIdAsync([callback](Result result, const GetLastMessageIdResponse& response) { - callback(result, response.getLastMessageId()); - }); -} - -Result Consumer::getLastMessageId(MessageId& messageId) { - Promise promise; - - getLastMessageIdAsync(WaitForCallbackValue(promise)); - return promise.getFuture().get(messageId); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ConsumerConfiguration.cc b/pulsar-client-cpp/lib/ConsumerConfiguration.cc deleted file mode 100644 index f9fe499b9541e..0000000000000 --- a/pulsar-client-cpp/lib/ConsumerConfiguration.cc +++ /dev/null @@ -1,270 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include - -namespace pulsar { - -const static std::string emptyString; - -ConsumerConfiguration::ConsumerConfiguration() : impl_(std::make_shared()) {} - -ConsumerConfiguration::~ConsumerConfiguration() {} - -ConsumerConfiguration::ConsumerConfiguration(const ConsumerConfiguration& x) : impl_(x.impl_) {} - -ConsumerConfiguration& ConsumerConfiguration::operator=(const ConsumerConfiguration& x) { - impl_ = x.impl_; - return *this; -} - -ConsumerConfiguration ConsumerConfiguration::clone() const { - ConsumerConfiguration newConf; - newConf.impl_.reset(new ConsumerConfigurationImpl(*this->impl_)); - return newConf; -} - -ConsumerConfiguration& ConsumerConfiguration::setSchema(const SchemaInfo& schemaInfo) { - impl_->schemaInfo = schemaInfo; - return *this; -} - -const SchemaInfo& ConsumerConfiguration::getSchema() const { return impl_->schemaInfo; } - -long ConsumerConfiguration::getBrokerConsumerStatsCacheTimeInMs() const { - return impl_->brokerConsumerStatsCacheTimeInMs; -} - -void ConsumerConfiguration::setBrokerConsumerStatsCacheTimeInMs(const long cacheTimeInMs) { - impl_->brokerConsumerStatsCacheTimeInMs = cacheTimeInMs; -} - -ConsumerConfiguration& ConsumerConfiguration::setConsumerType(ConsumerType consumerType) { - impl_->consumerType = consumerType; - return *this; -} - -ConsumerType ConsumerConfiguration::getConsumerType() const { return impl_->consumerType; } - -ConsumerConfiguration& ConsumerConfiguration::setMessageListener(MessageListener messageListener) { - impl_->messageListener = messageListener; - impl_->hasMessageListener = true; - return *this; -} - -MessageListener ConsumerConfiguration::getMessageListener() const { return impl_->messageListener; } - -bool ConsumerConfiguration::hasMessageListener() const { return impl_->hasMessageListener; } - -ConsumerConfiguration& ConsumerConfiguration::setConsumerEventListener( - ConsumerEventListenerPtr eventListener) { - impl_->eventListener = eventListener; - impl_->hasConsumerEventListener = true; - return *this; -} - -ConsumerEventListenerPtr ConsumerConfiguration::getConsumerEventListener() const { - return impl_->eventListener; -} - -bool ConsumerConfiguration::hasConsumerEventListener() const { return impl_->hasConsumerEventListener; } - -void ConsumerConfiguration::setReceiverQueueSize(int size) { impl_->receiverQueueSize = size; } - -int ConsumerConfiguration::getReceiverQueueSize() const { return impl_->receiverQueueSize; } - -void ConsumerConfiguration::setMaxTotalReceiverQueueSizeAcrossPartitions(int size) { - impl_->maxTotalReceiverQueueSizeAcrossPartitions = size; -} - -int ConsumerConfiguration::getMaxTotalReceiverQueueSizeAcrossPartitions() const { - return impl_->maxTotalReceiverQueueSizeAcrossPartitions; -} - -const std::string& ConsumerConfiguration::getConsumerName() const { return impl_->consumerName; } - -void ConsumerConfiguration::setConsumerName(const std::string& consumerName) { - impl_->consumerName = consumerName; -} - -long ConsumerConfiguration::getUnAckedMessagesTimeoutMs() const { return impl_->unAckedMessagesTimeoutMs; } - -void ConsumerConfiguration::setUnAckedMessagesTimeoutMs(const uint64_t milliSeconds) { - if (milliSeconds < 10000 && milliSeconds != 0) { - throw std::invalid_argument( - "Consumer Config Exception: Unacknowledged message timeout should be greater than 10 seconds."); - } - impl_->unAckedMessagesTimeoutMs = milliSeconds; -} - -long ConsumerConfiguration::getTickDurationInMs() const { return impl_->tickDurationInMs; } - -void ConsumerConfiguration::setTickDurationInMs(const uint64_t milliSeconds) { - impl_->tickDurationInMs = milliSeconds; -} - -void ConsumerConfiguration::setNegativeAckRedeliveryDelayMs(long redeliveryDelayMillis) { - impl_->negativeAckRedeliveryDelayMs = redeliveryDelayMillis; -} - -long ConsumerConfiguration::getNegativeAckRedeliveryDelayMs() const { - return impl_->negativeAckRedeliveryDelayMs; -} - -void ConsumerConfiguration::setAckGroupingTimeMs(long ackGroupingMillis) { - impl_->ackGroupingTimeMs = ackGroupingMillis; -} - -long ConsumerConfiguration::getAckGroupingTimeMs() const { return impl_->ackGroupingTimeMs; } - -void ConsumerConfiguration::setAckGroupingMaxSize(long maxGroupingSize) { - impl_->ackGroupingMaxSize = maxGroupingSize; -} - -long ConsumerConfiguration::getAckGroupingMaxSize() const { return impl_->ackGroupingMaxSize; } - -bool ConsumerConfiguration::isEncryptionEnabled() const { return (impl_->cryptoKeyReader != NULL); } - -const CryptoKeyReaderPtr ConsumerConfiguration::getCryptoKeyReader() const { return impl_->cryptoKeyReader; } - -ConsumerConfiguration& ConsumerConfiguration::setCryptoKeyReader(CryptoKeyReaderPtr cryptoKeyReader) { - impl_->cryptoKeyReader = cryptoKeyReader; - return *this; -} - -ConsumerCryptoFailureAction ConsumerConfiguration::getCryptoFailureAction() const { - return impl_->cryptoFailureAction; -} - -ConsumerConfiguration& ConsumerConfiguration::setCryptoFailureAction(ConsumerCryptoFailureAction action) { - impl_->cryptoFailureAction = action; - return *this; -} - -bool ConsumerConfiguration::isReadCompacted() const { return impl_->readCompacted; } - -void ConsumerConfiguration::setReadCompacted(bool compacted) { impl_->readCompacted = compacted; } - -void ConsumerConfiguration::setSubscriptionInitialPosition(InitialPosition subscriptionInitialPosition) { - impl_->subscriptionInitialPosition = subscriptionInitialPosition; -} - -InitialPosition ConsumerConfiguration::getSubscriptionInitialPosition() const { - return impl_->subscriptionInitialPosition; -} - -void ConsumerConfiguration::setPatternAutoDiscoveryPeriod(int periodInSeconds) { - impl_->patternAutoDiscoveryPeriod = periodInSeconds; -} - -int ConsumerConfiguration::getPatternAutoDiscoveryPeriod() const { return impl_->patternAutoDiscoveryPeriod; } - -void ConsumerConfiguration::setReplicateSubscriptionStateEnabled(bool enabled) { - impl_->replicateSubscriptionStateEnabled = enabled; -} - -bool ConsumerConfiguration::isReplicateSubscriptionStateEnabled() const { - return impl_->replicateSubscriptionStateEnabled; -} - -bool ConsumerConfiguration::hasProperty(const std::string& name) const { - const std::map& m = impl_->properties; - return m.find(name) != m.end(); -} - -const std::string& ConsumerConfiguration::getProperty(const std::string& name) const { - if (hasProperty(name)) { - const std::map& m = impl_->properties; - return m.at(name); - } else { - return emptyString; - } -} - -std::map& ConsumerConfiguration::getProperties() const { return impl_->properties; } - -ConsumerConfiguration& ConsumerConfiguration::setProperty(const std::string& name, const std::string& value) { - impl_->properties.insert(std::make_pair(name, value)); - return *this; -} - -ConsumerConfiguration& ConsumerConfiguration::setProperties( - const std::map& properties) { - for (std::map::const_iterator it = properties.begin(); it != properties.end(); - it++) { - setProperty(it->first, it->second); - } - return *this; -} - -std::map& ConsumerConfiguration::getSubscriptionProperties() const { - return impl_->subscriptionProperties; -} - -ConsumerConfiguration& ConsumerConfiguration::setSubscriptionProperties( - const std::map& subscriptionProperties) { - for (const auto& subscriptionProperty : subscriptionProperties) { - impl_->subscriptionProperties.emplace(subscriptionProperty.first, subscriptionProperty.second); - } - return *this; -} - -ConsumerConfiguration& ConsumerConfiguration::setPriorityLevel(int priorityLevel) { - if (priorityLevel < 0) { - throw std::invalid_argument("Consumer Config Exception: PriorityLevel should be nonnegative number."); - } - impl_->priorityLevel = priorityLevel; - return *this; -} - -int ConsumerConfiguration::getPriorityLevel() const { return impl_->priorityLevel; } - -ConsumerConfiguration& ConsumerConfiguration::setKeySharedPolicy(KeySharedPolicy keySharedPolicy) { - impl_->keySharedPolicy = keySharedPolicy.clone(); - return *this; -} - -KeySharedPolicy ConsumerConfiguration::getKeySharedPolicy() const { return impl_->keySharedPolicy; } - -ConsumerConfiguration& ConsumerConfiguration::setMaxPendingChunkedMessage(size_t maxPendingChunkedMessage) { - impl_->maxPendingChunkedMessage = maxPendingChunkedMessage; - return *this; -} - -size_t ConsumerConfiguration::getMaxPendingChunkedMessage() const { return impl_->maxPendingChunkedMessage; } - -ConsumerConfiguration& ConsumerConfiguration::setAutoAckOldestChunkedMessageOnQueueFull( - bool autoAckOldestChunkedMessageOnQueueFull) { - impl_->autoAckOldestChunkedMessageOnQueueFull = autoAckOldestChunkedMessageOnQueueFull; - return *this; -} - -bool ConsumerConfiguration::isAutoAckOldestChunkedMessageOnQueueFull() const { - return impl_->autoAckOldestChunkedMessageOnQueueFull; -} - -ConsumerConfiguration& ConsumerConfiguration::setStartMessageIdInclusive(bool startMessageIdInclusive) { - impl_->startMessageIdInclusive = startMessageIdInclusive; - return *this; -} - -bool ConsumerConfiguration::isStartMessageIdInclusive() const { return impl_->startMessageIdInclusive; } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ConsumerConfigurationImpl.h b/pulsar-client-cpp/lib/ConsumerConfigurationImpl.h deleted file mode 100644 index cca83a3882930..0000000000000 --- a/pulsar-client-cpp/lib/ConsumerConfigurationImpl.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_CONSUMERCONFIGURATIONIMPL_H_ -#define LIB_CONSUMERCONFIGURATIONIMPL_H_ - -#include - -#include - -namespace pulsar { -struct ConsumerConfigurationImpl { - SchemaInfo schemaInfo; - long unAckedMessagesTimeoutMs{0}; - long tickDurationInMs{1000}; - - long negativeAckRedeliveryDelayMs{60000}; - long ackGroupingTimeMs{100}; - long ackGroupingMaxSize{1000}; - ConsumerType consumerType{ConsumerExclusive}; - MessageListener messageListener; - bool hasMessageListener{false}; - ConsumerEventListenerPtr eventListener; - bool hasConsumerEventListener{false}; - int receiverQueueSize{1000}; - int maxTotalReceiverQueueSizeAcrossPartitions{50000}; - std::string consumerName; - long brokerConsumerStatsCacheTimeInMs{30 * 1000L}; // 30 seconds - CryptoKeyReaderPtr cryptoKeyReader; - ConsumerCryptoFailureAction cryptoFailureAction{ConsumerCryptoFailureAction::FAIL}; - bool readCompacted{false}; - InitialPosition subscriptionInitialPosition{InitialPosition::InitialPositionLatest}; - int patternAutoDiscoveryPeriod{60}; - bool replicateSubscriptionStateEnabled{false}; - std::map properties; - std::map subscriptionProperties; - int priorityLevel{0}; - KeySharedPolicy keySharedPolicy; - size_t maxPendingChunkedMessage{10}; - bool autoAckOldestChunkedMessageOnQueueFull{false}; - bool startMessageIdInclusive{false}; -}; -} // namespace pulsar -#endif /* LIB_CONSUMERCONFIGURATIONIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/ConsumerImpl.cc b/pulsar-client-cpp/lib/ConsumerImpl.cc deleted file mode 100644 index 37fcd95248af4..0000000000000 --- a/pulsar-client-cpp/lib/ConsumerImpl.cc +++ /dev/null @@ -1,1422 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "ConsumerImpl.h" -#include "MessageImpl.h" -#include "Commands.h" -#include "LogUtils.h" -#include "TimeUtils.h" -#include -#include "pulsar/Result.h" -#include "pulsar/MessageId.h" -#include "Utils.h" -#include "MessageIdUtil.h" -#include "AckGroupingTracker.h" -#include "AckGroupingTrackerEnabled.h" -#include "AckGroupingTrackerDisabled.h" -#include -#include - -namespace pulsar { - -DECLARE_LOG_OBJECT() - -ConsumerImpl::ConsumerImpl(const ClientImplPtr client, const std::string& topic, - const std::string& subscriptionName, const ConsumerConfiguration& conf, - bool isPersistent, - const ExecutorServicePtr listenerExecutor /* = NULL by default */, - bool hasParent /* = false by default */, - const ConsumerTopicType consumerTopicType /* = NonPartitioned by default */, - Commands::SubscriptionMode subscriptionMode, Optional startMessageId) - : HandlerBase(client, topic, Backoff(milliseconds(100), seconds(60), milliseconds(0))), - waitingForZeroQueueSizeMessage(false), - config_(conf), - subscription_(subscriptionName), - originalSubscriptionName_(subscriptionName), - isPersistent_(isPersistent), - messageListener_(config_.getMessageListener()), - eventListener_(config_.getConsumerEventListener()), - hasParent_(hasParent), - consumerTopicType_(consumerTopicType), - subscriptionMode_(subscriptionMode), - // This is the initial capacity of the queue - incomingMessages_(std::max(config_.getReceiverQueueSize(), 1)), - availablePermits_(0), - receiverQueueRefillThreshold_(config_.getReceiverQueueSize() / 2), - consumerId_(client->newConsumerId()), - consumerName_(config_.getConsumerName()), - messageListenerRunning_(true), - batchAcknowledgementTracker_(topic_, subscriptionName, (long)consumerId_), - negativeAcksTracker_(client, *this, conf), - ackGroupingTrackerPtr_(std::make_shared()), - readCompacted_(conf.isReadCompacted()), - startMessageId_(startMessageId), - maxPendingChunkedMessage_(conf.getMaxPendingChunkedMessage()), - autoAckOldestChunkedMessageOnQueueFull_(conf.isAutoAckOldestChunkedMessageOnQueueFull()) { - std::stringstream consumerStrStream; - consumerStrStream << "[" << topic_ << ", " << subscription_ << ", " << consumerId_ << "] "; - consumerStr_ = consumerStrStream.str(); - - // Initialize un-ACKed messages OT tracker. - if (conf.getUnAckedMessagesTimeoutMs() != 0) { - if (conf.getTickDurationInMs() > 0) { - unAckedMessageTrackerPtr_.reset(new UnAckedMessageTrackerEnabled( - conf.getUnAckedMessagesTimeoutMs(), conf.getTickDurationInMs(), client, *this)); - } else { - unAckedMessageTrackerPtr_.reset( - new UnAckedMessageTrackerEnabled(conf.getUnAckedMessagesTimeoutMs(), client, *this)); - } - } else { - unAckedMessageTrackerPtr_.reset(new UnAckedMessageTrackerDisabled()); - } - - // Initialize listener executor. - if (listenerExecutor) { - listenerExecutor_ = listenerExecutor; - } else { - listenerExecutor_ = client->getListenerExecutorProvider()->get(); - } - - // Setup stats reporter. - unsigned int statsIntervalInSeconds = client->getClientConfig().getStatsIntervalInSeconds(); - if (statsIntervalInSeconds) { - consumerStatsBasePtr_ = std::make_shared( - consumerStr_, client->getIOExecutorProvider()->get(), statsIntervalInSeconds); - } else { - consumerStatsBasePtr_ = std::make_shared(); - } - - // Create msgCrypto - if (conf.isEncryptionEnabled()) { - msgCrypto_ = std::make_shared(consumerStr_, false); - } -} - -ConsumerImpl::~ConsumerImpl() { - LOG_DEBUG(getName() << "~ConsumerImpl"); - incomingMessages_.clear(); - if (state_ == Ready) { - // this could happen at least in this condition: - // consumer seek, caused reconnection, if consumer close happened before connection ready, - // then consumer will not send closeConsumer to Broker side, and caused a leak of consumer in - // broker. - LOG_WARN(getName() << "Destroyed consumer which was not properly closed"); - - ClientConnectionPtr cnx = getCnx().lock(); - ClientImplPtr client = client_.lock(); - int requestId = client->newRequestId(); - if (cnx) { - cnx->sendRequestWithId(Commands::newCloseConsumer(consumerId_, requestId), requestId); - cnx->removeConsumer(consumerId_); - LOG_INFO(getName() << "Closed consumer for race condition: " << consumerId_); - } - } -} - -void ConsumerImpl::setPartitionIndex(int partitionIndex) { partitionIndex_ = partitionIndex; } - -int ConsumerImpl::getPartitionIndex() { return partitionIndex_; } - -uint64_t ConsumerImpl::getConsumerId() { return consumerId_; } - -Future ConsumerImpl::getConsumerCreatedFuture() { - return consumerCreatedPromise_.getFuture(); -} - -const std::string& ConsumerImpl::getSubscriptionName() const { return originalSubscriptionName_; } - -const std::string& ConsumerImpl::getTopic() const { return topic_; } - -void ConsumerImpl::start() { - HandlerBase::start(); - - // Initialize ackGroupingTrackerPtr_ here because the shared_from_this() was not initialized until the - // constructor completed. - if (TopicName::get(topic_)->isPersistent()) { - if (config_.getAckGroupingTimeMs() > 0) { - ackGroupingTrackerPtr_.reset(new AckGroupingTrackerEnabled( - client_.lock(), shared_from_this(), consumerId_, config_.getAckGroupingTimeMs(), - config_.getAckGroupingMaxSize())); - } else { - ackGroupingTrackerPtr_.reset(new AckGroupingTrackerDisabled(*this, consumerId_)); - } - } else { - LOG_INFO(getName() << "ACK will NOT be sent to broker for this non-persistent topic."); - } - ackGroupingTrackerPtr_->start(); -} - -void ConsumerImpl::connectionOpened(const ClientConnectionPtr& cnx) { - if (state_ == Closed) { - LOG_DEBUG(getName() << "connectionOpened : Consumer is already closed"); - return; - } - - // Register consumer so that we can handle other incomming commands (e.g. ACTIVE_CONSUMER_CHANGE) after - // sending the subscribe request. - cnx->registerConsumer(consumerId_, shared_from_this()); - - if (duringSeek_) { - ackGroupingTrackerPtr_->flushAndClean(); - } - - Lock lockForMessageId(mutexForMessageId_); - // Update startMessageId so that we can discard messages after delivery restarts - const auto startMessageId = clearReceiveQueue(); - const auto subscribeMessageId = (subscriptionMode_ == Commands::SubscriptionModeNonDurable) - ? startMessageId - : Optional::empty(); - startMessageId_ = startMessageId; - lockForMessageId.unlock(); - - unAckedMessageTrackerPtr_->clear(); - batchAcknowledgementTracker_.clear(); - - ClientImplPtr client = client_.lock(); - uint64_t requestId = client->newRequestId(); - SharedBuffer cmd = Commands::newSubscribe( - topic_, subscription_, consumerId_, requestId, getSubType(), consumerName_, subscriptionMode_, - subscribeMessageId, readCompacted_, config_.getProperties(), config_.getSubscriptionProperties(), - config_.getSchema(), getInitialPosition(), config_.isReplicateSubscriptionStateEnabled(), - config_.getKeySharedPolicy(), config_.getPriorityLevel()); - cnx->sendRequestWithId(cmd, requestId) - .addListener( - std::bind(&ConsumerImpl::handleCreateConsumer, shared_from_this(), cnx, std::placeholders::_1)); -} - -void ConsumerImpl::connectionFailed(Result result) { - // Keep a reference to ensure object is kept alive - ConsumerImplPtr ptr = shared_from_this(); - - if (consumerCreatedPromise_.setFailed(result)) { - state_ = Failed; - } -} - -void ConsumerImpl::sendFlowPermitsToBroker(const ClientConnectionPtr& cnx, int numMessages) { - if (cnx && numMessages > 0) { - LOG_DEBUG(getName() << "Send more permits: " << numMessages); - SharedBuffer cmd = Commands::newFlow(consumerId_, static_cast(numMessages)); - cnx->sendCommand(cmd); - } -} - -void ConsumerImpl::handleCreateConsumer(const ClientConnectionPtr& cnx, Result result) { - static bool firstTime = true; - if (result == ResultOk) { - if (firstTime) { - firstTime = false; - } - LOG_INFO(getName() << "Created consumer on broker " << cnx->cnxString()); - { - Lock lock(mutex_); - connection_ = cnx; - incomingMessages_.clear(); - state_ = Ready; - backoff_.reset(); - // Complicated logic since we don't have a isLocked() function for mutex - if (waitingForZeroQueueSizeMessage) { - sendFlowPermitsToBroker(cnx, 1); - } - availablePermits_ = 0; - } - - LOG_DEBUG(getName() << "Send initial flow permits: " << config_.getReceiverQueueSize()); - if (consumerTopicType_ == NonPartitioned || !firstTime) { - if (config_.getReceiverQueueSize() != 0) { - sendFlowPermitsToBroker(cnx, config_.getReceiverQueueSize()); - } else if (messageListener_) { - sendFlowPermitsToBroker(cnx, 1); - } - } - consumerCreatedPromise_.setValue(shared_from_this()); - } else { - if (result == ResultTimeout) { - // Creating the consumer has timed out. We need to ensure the broker closes the consumer - // in case it was indeed created, otherwise it might prevent new subscribe operation, - // since we are not closing the connection - int requestId = client_.lock()->newRequestId(); - cnx->sendRequestWithId(Commands::newCloseConsumer(consumerId_, requestId), requestId); - } - - if (consumerCreatedPromise_.isComplete()) { - // Consumer had already been initially created, we need to retry connecting in any case - LOG_WARN(getName() << "Failed to reconnect consumer: " << strResult(result)); - scheduleReconnection(shared_from_this()); - } else { - // Consumer was not yet created, retry to connect to broker if it's possible - if (isRetriableError(result) && (creationTimestamp_ + operationTimeut_ < TimeUtils::now())) { - LOG_WARN(getName() << "Temporary error in creating consumer : " << strResult(result)); - scheduleReconnection(shared_from_this()); - } else { - LOG_ERROR(getName() << "Failed to create consumer: " << strResult(result)); - consumerCreatedPromise_.setFailed(result); - state_ = Failed; - } - } - } -} - -void ConsumerImpl::unsubscribeAsync(ResultCallback callback) { - LOG_INFO(getName() << "Unsubscribing"); - - if (state_ != Ready) { - callback(ResultAlreadyClosed); - LOG_ERROR(getName() << "Can not unsubscribe a closed subscription, please call subscribe again and " - "then call unsubscribe"); - return; - } - - Lock lock(mutex_); - - ClientConnectionPtr cnx = getCnx().lock(); - if (cnx) { - LOG_DEBUG(getName() << "Unsubscribe request sent for consumer - " << consumerId_); - ClientImplPtr client = client_.lock(); - lock.unlock(); - int requestId = client->newRequestId(); - SharedBuffer cmd = Commands::newUnsubscribe(consumerId_, requestId); - cnx->sendRequestWithId(cmd, requestId) - .addListener(std::bind(&ConsumerImpl::handleUnsubscribe, shared_from_this(), - std::placeholders::_1, callback)); - } else { - Result result = ResultNotConnected; - lock.unlock(); - LOG_WARN(getName() << "Failed to unsubscribe: " << strResult(result)); - callback(result); - } -} - -void ConsumerImpl::handleUnsubscribe(Result result, ResultCallback callback) { - if (result == ResultOk) { - state_ = Closed; - LOG_INFO(getName() << "Unsubscribed successfully"); - } else { - LOG_WARN(getName() << "Failed to unsubscribe: " << strResult(result)); - } - callback(result); -} - -Optional ConsumerImpl::processMessageChunk(const SharedBuffer& payload, - const proto::MessageMetadata& metadata, - const MessageId& messageId, - const proto::MessageIdData& messageIdData, - const ClientConnectionPtr& cnx) { - const auto chunkId = metadata.chunk_id(); - const auto uuid = metadata.uuid(); - LOG_DEBUG("Process message chunk (chunkId: " << chunkId << ", uuid: " << uuid - << ", messageId: " << messageId << ") of " - << payload.readableBytes() << " bytes"); - - Lock lock(chunkProcessMutex_); - auto it = chunkedMessageCache_.find(uuid); - - if (chunkId == 0) { - if (it == chunkedMessageCache_.end()) { - it = chunkedMessageCache_.putIfAbsent( - uuid, ChunkedMessageCtx{metadata.num_chunks_from_msg(), metadata.total_chunk_msg_size()}); - } - if (maxPendingChunkedMessage_ > 0 && chunkedMessageCache_.size() >= maxPendingChunkedMessage_) { - chunkedMessageCache_.removeOldestValues( - chunkedMessageCache_.size() - maxPendingChunkedMessage_ + 1, - [this, messageId](const std::string& uuid, const ChunkedMessageCtx& ctx) { - if (autoAckOldestChunkedMessageOnQueueFull_) { - doAcknowledgeIndividual(messageId, [uuid, messageId](Result result) { - if (result != ResultOk) { - LOG_WARN("Failed to acknowledge discarded chunk, uuid: " - << uuid << ", messageId: " << messageId); - } - }); - } else { - trackMessage(messageId); - } - }); - it = chunkedMessageCache_.putIfAbsent( - uuid, ChunkedMessageCtx{metadata.num_chunks_from_msg(), metadata.total_chunk_msg_size()}); - } - } - - auto& chunkedMsgCtx = it->second; - if (it == chunkedMessageCache_.end() || !chunkedMsgCtx.validateChunkId(chunkId)) { - if (it == chunkedMessageCache_.end()) { - LOG_ERROR("Received an uncached chunk (uuid: " << uuid << " chunkId: " << chunkId - << ", messageId: " << messageId << ")"); - } else { - LOG_ERROR("Received a chunk whose chunk id is invalid (uuid: " - << uuid << " chunkId: " << chunkId << ", messageId: " << messageId << ")"); - chunkedMessageCache_.remove(uuid); - } - lock.unlock(); - increaseAvailablePermits(cnx); - trackMessage(messageId); - return Optional::empty(); - } - - chunkedMsgCtx.appendChunk(messageId, payload); - if (!chunkedMsgCtx.isCompleted()) { - lock.unlock(); - increaseAvailablePermits(cnx); - return Optional::empty(); - } - - LOG_DEBUG("Chunked message completed chunkId: " << chunkId << ", ChunkedMessageCtx: " << chunkedMsgCtx - << ", sequenceId: " << metadata.sequence_id()); - - auto wholePayload = chunkedMsgCtx.getBuffer(); - chunkedMessageCache_.remove(uuid); - if (uncompressMessageIfNeeded(cnx, messageIdData, metadata, wholePayload, false)) { - return Optional::of(wholePayload); - } else { - return Optional::empty(); - } -} - -void ConsumerImpl::messageReceived(const ClientConnectionPtr& cnx, const proto::CommandMessage& msg, - bool& isChecksumValid, proto::MessageMetadata& metadata, - SharedBuffer& payload) { - LOG_DEBUG(getName() << "Received Message -- Size: " << payload.readableBytes()); - - if (!decryptMessageIfNeeded(cnx, msg, metadata, payload)) { - // Message was discarded or not consumed due to decryption failure - return; - } - - if (!isChecksumValid) { - // Message discarded for checksum error - discardCorruptedMessage(cnx, msg.message_id(), proto::CommandAck::ChecksumMismatch); - return; - } - - const bool isMessageUndecryptable = - metadata.encryption_keys_size() > 0 && !config_.getCryptoKeyReader().get() && - config_.getCryptoFailureAction() == ConsumerCryptoFailureAction::CONSUME; - - const bool isChunkedMessage = metadata.num_chunks_from_msg() > 1; - if (!isMessageUndecryptable && !isChunkedMessage) { - if (!uncompressMessageIfNeeded(cnx, msg.message_id(), metadata, payload, true)) { - // Message was discarded on decompression error - return; - } - } - - // Only a non-batched messages can be a chunk - if (!metadata.has_num_messages_in_batch() && isChunkedMessage) { - const auto& messageIdData = msg.message_id(); - MessageId messageId(messageIdData.partition(), messageIdData.ledgerid(), messageIdData.entryid(), - messageIdData.batch_index()); - auto optionalPayload = processMessageChunk(payload, metadata, messageId, messageIdData, cnx); - if (optionalPayload.is_present()) { - payload = optionalPayload.value(); - } else { - return; - } - } - - Message m(msg, metadata, payload, partitionIndex_); - m.impl_->cnx_ = cnx.get(); - m.impl_->setTopicName(topic_); - m.impl_->setRedeliveryCount(msg.redelivery_count()); - - if (metadata.has_schema_version()) { - m.impl_->setSchemaVersion(metadata.schema_version()); - } - - LOG_DEBUG(getName() << " metadata.num_messages_in_batch() = " << metadata.num_messages_in_batch()); - LOG_DEBUG(getName() << " metadata.has_num_messages_in_batch() = " - << metadata.has_num_messages_in_batch()); - - uint32_t numOfMessageReceived = m.impl_->metadata.num_messages_in_batch(); - if (this->ackGroupingTrackerPtr_->isDuplicate(m.getMessageId())) { - LOG_DEBUG(getName() << " Ignoring message as it was ACKed earlier by same consumer."); - increaseAvailablePermits(cnx, numOfMessageReceived); - return; - } - - if (metadata.has_num_messages_in_batch()) { - Lock lock(mutex_); - numOfMessageReceived = receiveIndividualMessagesFromBatch(cnx, m, msg.redelivery_count()); - } else { - const auto startMessageId = startMessageId_.get(); - if (isPersistent_ && startMessageId.is_present() && - m.getMessageId().ledgerId() == startMessageId.value().ledgerId() && - m.getMessageId().entryId() == startMessageId.value().entryId() && - isPriorEntryIndex(m.getMessageId().entryId())) { - LOG_DEBUG(getName() << " Ignoring message from before the startMessageId: " - << startMessageId.value()); - return; - } - - Lock lock(pendingReceiveMutex_); - // if asyncReceive is waiting then notify callback without adding to incomingMessages queue - bool asyncReceivedWaiting = !pendingReceives_.empty(); - ReceiveCallback callback; - if (asyncReceivedWaiting) { - callback = pendingReceives_.front(); - pendingReceives_.pop(); - } - lock.unlock(); - - if (asyncReceivedWaiting) { - listenerExecutor_->postWork(std::bind(&ConsumerImpl::notifyPendingReceivedCallback, - shared_from_this(), ResultOk, m, callback)); - return; - } - - // config_.getReceiverQueueSize() != 0 or waiting For ZeroQueueSize Message` - if (config_.getReceiverQueueSize() != 0 || - (config_.getReceiverQueueSize() == 0 && messageListener_)) { - incomingMessages_.push(m); - } else { - Lock lock(mutex_); - if (waitingForZeroQueueSizeMessage) { - lock.unlock(); - incomingMessages_.push(m); - } - } - } - - if (messageListener_) { - if (!messageListenerRunning_) { - return; - } - // Trigger message listener callback in a separate thread - while (numOfMessageReceived--) { - listenerExecutor_->postWork(std::bind(&ConsumerImpl::internalListener, shared_from_this())); - } - } -} - -void ConsumerImpl::activeConsumerChanged(bool isActive) { - if (eventListener_) { - listenerExecutor_->postWork( - std::bind(&ConsumerImpl::internalConsumerChangeListener, shared_from_this(), isActive)); - } -} - -void ConsumerImpl::internalConsumerChangeListener(bool isActive) { - try { - if (isActive) { - eventListener_->becameActive(Consumer(shared_from_this()), partitionIndex_); - } else { - eventListener_->becameInactive(Consumer(shared_from_this()), partitionIndex_); - } - } catch (const std::exception& e) { - LOG_ERROR(getName() << "Exception thrown from event listener " << e.what()); - } -} - -void ConsumerImpl::failPendingReceiveCallback() { - Message msg; - Lock lock(pendingReceiveMutex_); - while (!pendingReceives_.empty()) { - ReceiveCallback callback = pendingReceives_.front(); - pendingReceives_.pop(); - listenerExecutor_->postWork(std::bind(&ConsumerImpl::notifyPendingReceivedCallback, - shared_from_this(), ResultAlreadyClosed, msg, callback)); - } - lock.unlock(); -} - -void ConsumerImpl::notifyPendingReceivedCallback(Result result, Message& msg, - const ReceiveCallback& callback) { - if (result == ResultOk && config_.getReceiverQueueSize() != 0) { - messageProcessed(msg); - unAckedMessageTrackerPtr_->add(msg.getMessageId()); - } - callback(result, msg); -} - -// Zero Queue size is not supported with Batch Messages -uint32_t ConsumerImpl::receiveIndividualMessagesFromBatch(const ClientConnectionPtr& cnx, - Message& batchedMessage, int redeliveryCount) { - unsigned int batchSize = batchedMessage.impl_->metadata.num_messages_in_batch(); - batchAcknowledgementTracker_.receivedMessage(batchedMessage); - LOG_DEBUG("Received Batch messages of size - " << batchSize - << " -- msgId: " << batchedMessage.getMessageId()); - const auto startMessageId = startMessageId_.get(); - - int skippedMessages = 0; - - for (int i = 0; i < batchSize; i++) { - // This is a cheap copy since message contains only one shared pointer (impl_) - Message msg = Commands::deSerializeSingleMessageInBatch(batchedMessage, i); - msg.impl_->setRedeliveryCount(redeliveryCount); - msg.impl_->setTopicName(batchedMessage.getTopicName()); - - if (startMessageId.is_present()) { - const MessageId& msgId = msg.getMessageId(); - - // If we are receiving a batch message, we need to discard messages that were prior - // to the startMessageId - if (isPersistent_ && msgId.ledgerId() == startMessageId.value().ledgerId() && - msgId.entryId() == startMessageId.value().entryId() && - isPriorBatchIndex(msgId.batchIndex())) { - LOG_DEBUG(getName() << "Ignoring message from before the startMessageId" - << msg.getMessageId()); - ++skippedMessages; - continue; - } - } - - // - Lock lock(pendingReceiveMutex_); - if (!pendingReceives_.empty()) { - ReceiveCallback callback = pendingReceives_.front(); - pendingReceives_.pop(); - lock.unlock(); - listenerExecutor_->postWork(std::bind(&ConsumerImpl::notifyPendingReceivedCallback, - shared_from_this(), ResultOk, msg, callback)); - } else { - // Regular path, append individual message to incoming messages queue - incomingMessages_.push(msg); - lock.unlock(); - } - } - - if (skippedMessages > 0) { - increaseAvailablePermits(cnx, skippedMessages); - } - - return batchSize - skippedMessages; -} - -bool ConsumerImpl::decryptMessageIfNeeded(const ClientConnectionPtr& cnx, const proto::CommandMessage& msg, - const proto::MessageMetadata& metadata, SharedBuffer& payload) { - if (!metadata.encryption_keys_size()) { - return true; - } - - // If KeyReader is not configured throw exception based on config param - if (!config_.isEncryptionEnabled()) { - if (config_.getCryptoFailureAction() == ConsumerCryptoFailureAction::CONSUME) { - LOG_WARN(getName() << "CryptoKeyReader is not implemented. Consuming encrypted message."); - return true; - } else if (config_.getCryptoFailureAction() == ConsumerCryptoFailureAction::DISCARD) { - LOG_WARN(getName() << "Skipping decryption since CryptoKeyReader is not implemented and config " - "is set to discard"); - discardCorruptedMessage(cnx, msg.message_id(), proto::CommandAck::DecryptionError); - } else { - LOG_ERROR(getName() << "Message delivery failed since CryptoKeyReader is not implemented to " - "consume encrypted message"); - } - return false; - } - - SharedBuffer decryptedPayload; - if (msgCrypto_->decrypt(metadata, payload, config_.getCryptoKeyReader(), decryptedPayload)) { - payload = decryptedPayload; - return true; - } - - if (config_.getCryptoFailureAction() == ConsumerCryptoFailureAction::CONSUME) { - // Note, batch message will fail to consume even if config is set to consume - LOG_WARN( - getName() << "Decryption failed. Consuming encrypted message since config is set to consume."); - return true; - } else if (config_.getCryptoFailureAction() == ConsumerCryptoFailureAction::DISCARD) { - LOG_WARN(getName() << "Discarding message since decryption failed and config is set to discard"); - discardCorruptedMessage(cnx, msg.message_id(), proto::CommandAck::DecryptionError); - } else { - LOG_ERROR(getName() << "Message delivery failed since unable to decrypt incoming message"); - } - return false; -} - -bool ConsumerImpl::uncompressMessageIfNeeded(const ClientConnectionPtr& cnx, - const proto::MessageIdData& messageIdData, - const proto::MessageMetadata& metadata, SharedBuffer& payload, - bool checkMaxMessageSize) { - if (!metadata.has_compression()) { - return true; - } - - CompressionType compressionType = CompressionCodecProvider::convertType(metadata.compression()); - - uint32_t uncompressedSize = metadata.uncompressed_size(); - uint32_t payloadSize = payload.readableBytes(); - if (cnx) { - if (checkMaxMessageSize && payloadSize > ClientConnection::getMaxMessageSize()) { - // Uncompressed size is itself corrupted since it cannot be bigger than the MaxMessageSize - LOG_ERROR(getName() << "Got corrupted payload message size " << payloadSize // - << " at " << messageIdData.ledgerid() << ":" << messageIdData.entryid()); - discardCorruptedMessage(cnx, messageIdData, proto::CommandAck::UncompressedSizeCorruption); - return false; - } - } else { - LOG_ERROR("Connection not ready for Consumer - " << getConsumerId()); - return false; - } - - if (!CompressionCodecProvider::getCodec(compressionType).decode(payload, uncompressedSize, payload)) { - LOG_ERROR(getName() << "Failed to decompress message with " << uncompressedSize // - << " at " << messageIdData.ledgerid() << ":" << messageIdData.entryid()); - discardCorruptedMessage(cnx, messageIdData, proto::CommandAck::DecompressionError); - return false; - } - - return true; -} - -void ConsumerImpl::discardCorruptedMessage(const ClientConnectionPtr& cnx, - const proto::MessageIdData& messageId, - proto::CommandAck::ValidationError validationError) { - LOG_ERROR(getName() << "Discarding corrupted message at " << messageId.ledgerid() << ":" - << messageId.entryid()); - - SharedBuffer cmd = - Commands::newAck(consumerId_, messageId, proto::CommandAck::Individual, validationError); - - cnx->sendCommand(cmd); - increaseAvailablePermits(cnx); -} - -void ConsumerImpl::internalListener() { - if (!messageListenerRunning_) { - return; - } - Message msg; - if (!incomingMessages_.pop(msg, std::chrono::milliseconds(0))) { - // This will only happen when the connection got reset and we cleared the queue - return; - } - trackMessage(msg.getMessageId()); - try { - consumerStatsBasePtr_->receivedMessage(msg, ResultOk); - lastDequedMessageId_ = msg.getMessageId(); - messageListener_(Consumer(shared_from_this()), msg); - } catch (const std::exception& e) { - LOG_ERROR(getName() << "Exception thrown from listener" << e.what()); - } - messageProcessed(msg, false); -} - -Result ConsumerImpl::fetchSingleMessageFromBroker(Message& msg) { - if (config_.getReceiverQueueSize() != 0) { - LOG_ERROR(getName() << " Can't use receiveForZeroQueueSize if the queue size is not 0"); - return ResultInvalidConfiguration; - } - - // Using RAII for locking - ClientConnectionPtr currentCnx = getCnx().lock(); - Lock lock(mutexForReceiveWithZeroQueueSize); - - // Just being cautious - if (incomingMessages_.size() != 0) { - LOG_ERROR( - getName() << "The incoming message queue should never be greater than 0 when Queue size is 0"); - incomingMessages_.clear(); - } - Lock localLock(mutex_); - waitingForZeroQueueSizeMessage = true; - localLock.unlock(); - - sendFlowPermitsToBroker(currentCnx, 1); - - while (true) { - if (!incomingMessages_.pop(msg)) { - return ResultInterrupted; - } - - { - // Lock needed to prevent race between connectionOpened and the check "msg.impl_->cnx_ == - // currentCnx.get())" - Lock localLock(mutex_); - // if message received due to an old flow - discard it and wait for the message from the - // latest flow command - if (msg.impl_->cnx_ == currentCnx.get()) { - waitingForZeroQueueSizeMessage = false; - // Can't use break here else it may trigger a race with connection opened. - return ResultOk; - } - } - } - return ResultOk; -} - -Result ConsumerImpl::receive(Message& msg) { - Result res = receiveHelper(msg); - consumerStatsBasePtr_->receivedMessage(msg, res); - return res; -} - -void ConsumerImpl::receiveAsync(ReceiveCallback& callback) { - Message msg; - - // fail the callback if consumer is closing or closed - if (state_ != Ready) { - callback(ResultAlreadyClosed, msg); - return; - } - - Lock lock(pendingReceiveMutex_); - if (incomingMessages_.pop(msg, std::chrono::milliseconds(0))) { - lock.unlock(); - messageProcessed(msg); - callback(ResultOk, msg); - } else { - pendingReceives_.push(callback); - lock.unlock(); - - if (config_.getReceiverQueueSize() == 0) { - sendFlowPermitsToBroker(getCnx().lock(), 1); - } - } -} - -Result ConsumerImpl::receiveHelper(Message& msg) { - if (state_ != Ready) { - return ResultAlreadyClosed; - } - - if (messageListener_) { - LOG_ERROR(getName() << "Can not receive when a listener has been set"); - return ResultInvalidConfiguration; - } - - if (config_.getReceiverQueueSize() == 0) { - return fetchSingleMessageFromBroker(msg); - } - - if (!incomingMessages_.pop(msg)) { - return ResultInterrupted; - } - - messageProcessed(msg); - return ResultOk; -} - -Result ConsumerImpl::receive(Message& msg, int timeout) { - Result res = receiveHelper(msg, timeout); - consumerStatsBasePtr_->receivedMessage(msg, res); - return res; -} - -Result ConsumerImpl::receiveHelper(Message& msg, int timeout) { - if (config_.getReceiverQueueSize() == 0) { - LOG_WARN(getName() << "Can't use this function if the queue size is 0"); - return ResultInvalidConfiguration; - } - - if (state_ != Ready) { - return ResultAlreadyClosed; - } - - if (messageListener_) { - LOG_ERROR(getName() << "Can not receive when a listener has been set"); - return ResultInvalidConfiguration; - } - - if (incomingMessages_.pop(msg, std::chrono::milliseconds(timeout))) { - messageProcessed(msg); - return ResultOk; - } else { - if (state_ != Ready) { - return ResultAlreadyClosed; - } - return ResultTimeout; - } -} - -void ConsumerImpl::messageProcessed(Message& msg, bool track) { - Lock lock(mutexForMessageId_); - lastDequedMessageId_ = msg.getMessageId(); - lock.unlock(); - - ClientConnectionPtr currentCnx = getCnx().lock(); - if (currentCnx && msg.impl_->cnx_ != currentCnx.get()) { - LOG_DEBUG(getName() << "Not adding permit since connection is different."); - return; - } - - increaseAvailablePermits(currentCnx); - if (track) { - trackMessage(msg.getMessageId()); - } -} - -/** - * Clear the internal receiver queue and returns the message id of what was the 1st message in the queue that - * was - * not seen by the application - */ -Optional ConsumerImpl::clearReceiveQueue() { - bool expectedDuringSeek = true; - if (duringSeek_.compare_exchange_strong(expectedDuringSeek, false)) { - return Optional::of(seekMessageId_.get()); - } else if (subscriptionMode_ == Commands::SubscriptionModeDurable) { - return startMessageId_.get(); - } - Message nextMessageInQueue; - if (incomingMessages_.peekAndClear(nextMessageInQueue)) { - // There was at least one message pending in the queue - const MessageId& nextMessageId = nextMessageInQueue.getMessageId(); - MessageId previousMessageId; - if (nextMessageId.batchIndex() >= 0) { - previousMessageId = MessageId(-1, nextMessageId.ledgerId(), nextMessageId.entryId(), - nextMessageId.batchIndex() - 1); - } else { - previousMessageId = MessageId(-1, nextMessageId.ledgerId(), nextMessageId.entryId() - 1, -1); - } - return Optional::of(previousMessageId); - } else if (lastDequedMessageId_ != MessageId::earliest()) { - // If the queue was empty we need to restart from the message just after the last one that has been - // dequeued - // in the past - return Optional::of(lastDequedMessageId_); - } else { - // No message was received or dequeued by this consumer. Next message would still be the - // startMessageId - return startMessageId_.get(); - } -} - -void ConsumerImpl::increaseAvailablePermits(const ClientConnectionPtr& currentCnx, int delta) { - int newAvailablePermits = availablePermits_.fetch_add(delta) + delta; - - while (newAvailablePermits >= receiverQueueRefillThreshold_ && messageListenerRunning_) { - if (availablePermits_.compare_exchange_weak(newAvailablePermits, 0)) { - sendFlowPermitsToBroker(currentCnx, newAvailablePermits); - break; - } - } -} - -inline proto::CommandSubscribe_SubType ConsumerImpl::getSubType() { - ConsumerType type = config_.getConsumerType(); - switch (type) { - case ConsumerExclusive: - return proto::CommandSubscribe::Exclusive; - - case ConsumerShared: - return proto::CommandSubscribe::Shared; - - case ConsumerFailover: - return proto::CommandSubscribe::Failover; - - case ConsumerKeyShared: - return proto::CommandSubscribe_SubType_Key_Shared; - } - BOOST_THROW_EXCEPTION(std::logic_error("Invalid ConsumerType enumeration value")); -} - -inline proto::CommandSubscribe_InitialPosition ConsumerImpl::getInitialPosition() { - InitialPosition initialPosition = config_.getSubscriptionInitialPosition(); - switch (initialPosition) { - case InitialPositionLatest: - return proto::CommandSubscribe_InitialPosition::CommandSubscribe_InitialPosition_Latest; - - case InitialPositionEarliest: - return proto::CommandSubscribe_InitialPosition::CommandSubscribe_InitialPosition_Earliest; - } - BOOST_THROW_EXCEPTION(std::logic_error("Invalid InitialPosition enumeration value")); -} - -void ConsumerImpl::statsCallback(Result res, ResultCallback callback, proto::CommandAck_AckType ackType) { - consumerStatsBasePtr_->messageAcknowledged(res, ackType); - if (callback) { - callback(res); - } -} - -void ConsumerImpl::acknowledgeAsync(const MessageId& msgId, ResultCallback callback) { - ResultCallback cb = std::bind(&ConsumerImpl::statsCallback, shared_from_this(), std::placeholders::_1, - callback, proto::CommandAck_AckType_Individual); - if (msgId.batchIndex() != -1 && - !batchAcknowledgementTracker_.isBatchReady(msgId, proto::CommandAck_AckType_Individual)) { - cb(ResultOk); - return; - } - doAcknowledgeIndividual(msgId, cb); -} - -void ConsumerImpl::acknowledgeCumulativeAsync(const MessageId& msgId, ResultCallback callback) { - ResultCallback cb = std::bind(&ConsumerImpl::statsCallback, shared_from_this(), std::placeholders::_1, - callback, proto::CommandAck_AckType_Cumulative); - if (!isCumulativeAcknowledgementAllowed(config_.getConsumerType())) { - cb(ResultCumulativeAcknowledgementNotAllowedError); - return; - } - if (msgId.batchIndex() != -1 && - !batchAcknowledgementTracker_.isBatchReady(msgId, proto::CommandAck_AckType_Cumulative)) { - MessageId messageId = batchAcknowledgementTracker_.getGreatestCumulativeAckReady(msgId); - if (messageId == MessageId()) { - // Nothing to ACK, because the batch that msgId belongs to is NOT completely consumed. - cb(ResultOk); - } else { - doAcknowledgeCumulative(messageId, cb); - } - } else { - doAcknowledgeCumulative(msgId, cb); - } -} - -bool ConsumerImpl::isCumulativeAcknowledgementAllowed(ConsumerType consumerType) { - return consumerType != ConsumerKeyShared && consumerType != ConsumerShared; -} - -void ConsumerImpl::doAcknowledgeIndividual(const MessageId& messageId, ResultCallback callback) { - this->unAckedMessageTrackerPtr_->remove(messageId); - this->batchAcknowledgementTracker_.deleteAckedMessage(messageId, proto::CommandAck::Individual); - this->ackGroupingTrackerPtr_->addAcknowledge(messageId); - callback(ResultOk); -} - -void ConsumerImpl::doAcknowledgeCumulative(const MessageId& messageId, ResultCallback callback) { - this->unAckedMessageTrackerPtr_->removeMessagesTill(messageId); - this->batchAcknowledgementTracker_.deleteAckedMessage(messageId, proto::CommandAck::Cumulative); - this->ackGroupingTrackerPtr_->addAcknowledgeCumulative(messageId); - callback(ResultOk); -} - -void ConsumerImpl::negativeAcknowledge(const MessageId& messageId) { - unAckedMessageTrackerPtr_->remove(messageId); - negativeAcksTracker_.add(messageId); -} - -void ConsumerImpl::disconnectConsumer() { - LOG_INFO("Broker notification of Closed consumer: " << consumerId_); - Lock lock(mutex_); - connection_.reset(); - lock.unlock(); - scheduleReconnection(shared_from_this()); -} - -void ConsumerImpl::closeAsync(ResultCallback callback) { - // Keep a reference to ensure object is kept alive - ConsumerImplPtr ptr = shared_from_this(); - - if (state_ != Ready) { - if (callback) { - callback(ResultAlreadyClosed); - } - return; - } - - LOG_INFO(getName() << "Closing consumer for topic " << topic_); - state_ = Closing; - incomingMessages_.close(); - - // Flush pending grouped ACK requests. - if (ackGroupingTrackerPtr_) { - ackGroupingTrackerPtr_->close(); - } - - ClientConnectionPtr cnx = getCnx().lock(); - if (!cnx) { - state_ = Closed; - // If connection is gone, also the consumer is closed on the broker side - if (callback) { - callback(ResultOk); - } - return; - } - - ClientImplPtr client = client_.lock(); - if (!client) { - state_ = Closed; - // Client was already destroyed - if (callback) { - callback(ResultOk); - } - return; - } - - int requestId = client->newRequestId(); - Future future = - cnx->sendRequestWithId(Commands::newCloseConsumer(consumerId_, requestId), requestId); - if (callback) { - // Pass the shared pointer "ptr" to the handler to prevent the object from being destroyed - future.addListener( - std::bind(&ConsumerImpl::handleClose, shared_from_this(), std::placeholders::_1, callback, ptr)); - } - - // fail pendingReceive callback - failPendingReceiveCallback(); -} - -void ConsumerImpl::handleClose(Result result, ResultCallback callback, ConsumerImplPtr consumer) { - if (result == ResultOk) { - state_ = Closed; - - ClientConnectionPtr cnx = getCnx().lock(); - if (cnx) { - cnx->removeConsumer(consumerId_); - } - - LOG_INFO(getName() << "Closed consumer " << consumerId_); - } else { - LOG_ERROR(getName() << "Failed to close consumer: " << result); - } - - if (callback) { - callback(result); - } -} - -const std::string& ConsumerImpl::getName() const { return consumerStr_; } - -void ConsumerImpl::shutdown() { - state_ = Closed; - - consumerCreatedPromise_.setFailed(ResultAlreadyClosed); -} - -bool ConsumerImpl::isClosed() { return state_ == Closed; } - -bool ConsumerImpl::isOpen() { return state_ == Ready; } - -Result ConsumerImpl::pauseMessageListener() { - if (!messageListener_) { - return ResultInvalidConfiguration; - } - messageListenerRunning_ = false; - return ResultOk; -} - -Result ConsumerImpl::resumeMessageListener() { - if (!messageListener_) { - return ResultInvalidConfiguration; - } - - if (messageListenerRunning_) { - // Not paused - return ResultOk; - } - messageListenerRunning_ = true; - const size_t count = incomingMessages_.size(); - - for (size_t i = 0; i < count; i++) { - // Trigger message listener callback in a separate thread - listenerExecutor_->postWork(std::bind(&ConsumerImpl::internalListener, shared_from_this())); - } - // Check current permits and determine whether to send FLOW command - this->increaseAvailablePermits(getCnx().lock(), 0); - return ResultOk; -} - -void ConsumerImpl::redeliverUnacknowledgedMessages() { - static std::set emptySet; - redeliverMessages(emptySet); - unAckedMessageTrackerPtr_->clear(); -} - -void ConsumerImpl::redeliverUnacknowledgedMessages(const std::set& messageIds) { - if (messageIds.empty()) { - return; - } - if (config_.getConsumerType() != ConsumerShared && config_.getConsumerType() != ConsumerKeyShared) { - redeliverUnacknowledgedMessages(); - return; - } - redeliverMessages(messageIds); -} - -void ConsumerImpl::redeliverMessages(const std::set& messageIds) { - ClientConnectionPtr cnx = getCnx().lock(); - if (cnx) { - if (cnx->getServerProtocolVersion() >= proto::v2) { - cnx->sendCommand(Commands::newRedeliverUnacknowledgedMessages(consumerId_, messageIds)); - LOG_DEBUG("Sending RedeliverUnacknowledgedMessages command for Consumer - " << getConsumerId()); - } - } else { - LOG_DEBUG("Connection not ready for Consumer - " << getConsumerId()); - } -} - -int ConsumerImpl::getNumOfPrefetchedMessages() const { return incomingMessages_.size(); } - -void ConsumerImpl::getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback) { - if (state_ != Ready) { - LOG_ERROR(getName() << "Client connection is not open, please try again later.") - callback(ResultConsumerNotInitialized, BrokerConsumerStats()); - return; - } - - Lock lock(mutex_); - if (brokerConsumerStats_.isValid()) { - LOG_DEBUG(getName() << "Serving data from cache"); - BrokerConsumerStatsImpl brokerConsumerStats = brokerConsumerStats_; - lock.unlock(); - callback(ResultOk, - BrokerConsumerStats(std::make_shared(brokerConsumerStats_))); - return; - } - lock.unlock(); - - ClientConnectionPtr cnx = getCnx().lock(); - if (cnx) { - if (cnx->getServerProtocolVersion() >= proto::v8) { - ClientImplPtr client = client_.lock(); - uint64_t requestId = client->newRequestId(); - LOG_DEBUG(getName() << " Sending ConsumerStats Command for Consumer - " << getConsumerId() - << ", requestId - " << requestId); - - cnx->newConsumerStats(consumerId_, requestId) - .addListener(std::bind(&ConsumerImpl::brokerConsumerStatsListener, shared_from_this(), - std::placeholders::_1, std::placeholders::_2, callback)); - return; - } else { - LOG_ERROR(getName() << " Operation not supported since server protobuf version " - << cnx->getServerProtocolVersion() << " is older than proto::v7"); - callback(ResultUnsupportedVersionError, BrokerConsumerStats()); - return; - } - } - LOG_ERROR(getName() << " Client Connection not ready for Consumer"); - callback(ResultNotConnected, BrokerConsumerStats()); -} - -void ConsumerImpl::brokerConsumerStatsListener(Result res, BrokerConsumerStatsImpl brokerConsumerStats, - BrokerConsumerStatsCallback callback) { - if (res == ResultOk) { - Lock lock(mutex_); - brokerConsumerStats.setCacheTime(config_.getBrokerConsumerStatsCacheTimeInMs()); - brokerConsumerStats_ = brokerConsumerStats; - } - - if (callback) { - callback(res, BrokerConsumerStats(std::make_shared(brokerConsumerStats))); - } -} - -void ConsumerImpl::seekAsync(const MessageId& msgId, ResultCallback callback) { - const auto state = state_.load(); - if (state == Closed || state == Closing) { - LOG_ERROR(getName() << "Client connection already closed."); - if (callback) { - callback(ResultAlreadyClosed); - } - return; - } - - ClientImplPtr client = client_.lock(); - if (!client) { - LOG_ERROR(getName() << "Client is expired when seekAsync " << msgId); - return; - } - const auto requestId = client->newRequestId(); - seekAsyncInternal(requestId, Commands::newSeek(consumerId_, requestId, msgId), msgId, 0L, callback); -} - -void ConsumerImpl::seekAsync(uint64_t timestamp, ResultCallback callback) { - const auto state = state_.load(); - if (state == Closed || state == Closing) { - LOG_ERROR(getName() << "Client connection already closed."); - if (callback) { - callback(ResultAlreadyClosed); - } - return; - } - - ClientImplPtr client = client_.lock(); - if (!client) { - LOG_ERROR(getName() << "Client is expired when seekAsync " << timestamp); - return; - } - const auto requestId = client->newRequestId(); - seekAsyncInternal(requestId, Commands::newSeek(consumerId_, requestId, timestamp), MessageId::earliest(), - timestamp, callback); -} - -bool ConsumerImpl::isReadCompacted() { return readCompacted_; } - -inline bool hasMoreMessages(const MessageId& lastMessageIdInBroker, const MessageId& messageId) { - return lastMessageIdInBroker > messageId && lastMessageIdInBroker.entryId() != -1; -} - -void ConsumerImpl::hasMessageAvailableAsync(HasMessageAvailableCallback callback) { - const auto startMessageId = startMessageId_.get(); - Lock lock(mutexForMessageId_); - const auto messageId = - (lastDequedMessageId_ == MessageId::earliest()) ? startMessageId.value() : lastDequedMessageId_; - - if (messageId == MessageId::latest()) { - lock.unlock(); - getLastMessageIdAsync([callback](Result result, const GetLastMessageIdResponse& response) { - if (result != ResultOk) { - callback(result, {}); - return; - } - if (response.hasMarkDeletePosition() && response.getLastMessageId().entryId() >= 0) { - // We only care about comparing ledger ids and entry ids as mark delete position doesn't have - // other ids such as batch index - callback(ResultOk, compareLedgerAndEntryId(response.getMarkDeletePosition(), - response.getLastMessageId()) < 0); - } else { - callback(ResultOk, false); - } - }); - } else { - if (hasMoreMessages(lastMessageIdInBroker_, messageId)) { - lock.unlock(); - callback(ResultOk, true); - return; - } - lock.unlock(); - - getLastMessageIdAsync([callback, messageId](Result result, const GetLastMessageIdResponse& response) { - callback(result, (result == ResultOk) && hasMoreMessages(response.getLastMessageId(), messageId)); - }); - } -} - -void ConsumerImpl::getLastMessageIdAsync(BrokerGetLastMessageIdCallback callback) { - const auto state = state_.load(); - if (state == Closed || state == Closing) { - LOG_ERROR(getName() << "Client connection already closed."); - if (callback) { - callback(ResultAlreadyClosed, MessageId()); - } - return; - } - - TimeDuration operationTimeout = seconds(client_.lock()->conf().getOperationTimeoutSeconds()); - BackoffPtr backoff = std::make_shared(milliseconds(100), operationTimeout * 2, milliseconds(0)); - DeadlineTimerPtr timer = executor_->createDeadlineTimer(); - - internalGetLastMessageIdAsync(backoff, operationTimeout, timer, callback); -} - -void ConsumerImpl::internalGetLastMessageIdAsync(const BackoffPtr& backoff, TimeDuration remainTime, - const DeadlineTimerPtr& timer, - BrokerGetLastMessageIdCallback callback) { - ClientConnectionPtr cnx = getCnx().lock(); - if (cnx) { - if (cnx->getServerProtocolVersion() >= proto::v12) { - ClientImplPtr client = client_.lock(); - uint64_t requestId = client->newRequestId(); - LOG_DEBUG(getName() << " Sending getLastMessageId Command for Consumer - " << getConsumerId() - << ", requestId - " << requestId); - - auto self = shared_from_this(); - cnx->newGetLastMessageId(consumerId_, requestId) - .addListener([this, self, callback](Result result, const GetLastMessageIdResponse& response) { - if (result == ResultOk) { - LOG_DEBUG(getName() << "getLastMessageId: " << response); - Lock lock(mutexForMessageId_); - lastMessageIdInBroker_ = response.getLastMessageId(); - lock.unlock(); - } else { - LOG_ERROR(getName() << "Failed to getLastMessageId: " << result); - } - callback(result, response); - }); - } else { - LOG_ERROR(getName() << " Operation not supported since server protobuf version " - << cnx->getServerProtocolVersion() << " is older than proto::v12"); - callback(ResultUnsupportedVersionError, MessageId()); - } - } else { - TimeDuration next = std::min(remainTime, backoff->next()); - if (next.total_milliseconds() <= 0) { - LOG_ERROR(getName() << " Client Connection not ready for Consumer"); - callback(ResultNotConnected, MessageId()); - return; - } - remainTime -= next; - - timer->expires_from_now(next); - - auto self = shared_from_this(); - timer->async_wait([this, backoff, remainTime, timer, next, callback, - self](const boost::system::error_code& ec) -> void { - if (ec == boost::asio::error::operation_aborted) { - LOG_DEBUG(getName() << " Get last message id operation was cancelled, code[" << ec << "]."); - return; - } - if (ec) { - LOG_ERROR(getName() << " Failed to get last message id, code[" << ec << "]."); - return; - } - LOG_WARN(getName() << " Could not get connection while getLastMessageId -- Will try again in " - << next.total_milliseconds() << " ms") - this->internalGetLastMessageIdAsync(backoff, remainTime, timer, callback); - }); - } -} - -void ConsumerImpl::setNegativeAcknowledgeEnabledForTesting(bool enabled) { - negativeAcksTracker_.setEnabledForTesting(enabled); -} - -void ConsumerImpl::trackMessage(const MessageId& messageId) { - if (hasParent_) { - unAckedMessageTrackerPtr_->remove(messageId); - } else { - unAckedMessageTrackerPtr_->add(messageId); - } -} - -bool ConsumerImpl::isConnected() const { return !getCnx().expired() && state_ == Ready; } - -uint64_t ConsumerImpl::getNumberOfConnectedConsumer() { return isConnected() ? 1 : 0; } - -void ConsumerImpl::seekAsyncInternal(long requestId, SharedBuffer seek, const MessageId& seekId, - long timestamp, ResultCallback callback) { - ClientConnectionPtr cnx = getCnx().lock(); - if (!cnx) { - LOG_ERROR(getName() << " Client Connection not ready for Consumer"); - callback(ResultNotConnected); - return; - } - - const auto originalSeekMessageId = seekMessageId_.get(); - seekMessageId_ = seekId; - duringSeek_ = true; - if (timestamp > 0) { - LOG_INFO(getName() << " Seeking subscription to " << timestamp); - } else { - LOG_INFO(getName() << " Seeking subscription to " << seekId); - } - - std::weak_ptr weakSelf{shared_from_this()}; - - cnx->sendRequestWithId(seek, requestId) - .addListener([this, weakSelf, callback, originalSeekMessageId](Result result, - const ResponseData& responseData) { - auto self = weakSelf.lock(); - if (!self) { - callback(result); - return; - } - if (result == ResultOk) { - LOG_INFO(getName() << "Seek successfully"); - ackGroupingTrackerPtr_->flushAndClean(); - Lock lock(mutexForMessageId_); - lastDequedMessageId_ = MessageId::earliest(); - lock.unlock(); - } else { - LOG_ERROR(getName() << "Failed to seek: " << result); - seekMessageId_ = originalSeekMessageId; - duringSeek_ = false; - } - callback(result); - }); -} - -bool ConsumerImpl::isPriorBatchIndex(int32_t idx) { - return config_.isStartMessageIdInclusive() ? idx < startMessageId_.get().value().batchIndex() - : idx <= startMessageId_.get().value().batchIndex(); -} - -bool ConsumerImpl::isPriorEntryIndex(int64_t idx) { - return config_.isStartMessageIdInclusive() ? idx < startMessageId_.get().value().entryId() - : idx <= startMessageId_.get().value().entryId(); -} - -} /* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/ConsumerImpl.h b/pulsar-client-cpp/lib/ConsumerImpl.h deleted file mode 100644 index 1ad3a4c372764..0000000000000 --- a/pulsar-client-cpp/lib/ConsumerImpl.h +++ /dev/null @@ -1,326 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_CONSUMERIMPL_H_ -#define LIB_CONSUMERIMPL_H_ - -#include - -#include "pulsar/Result.h" -#include "UnboundedBlockingQueue.h" -#include "HandlerBase.h" -#include "ClientConnection.h" -#include "lib/UnAckedMessageTrackerEnabled.h" -#include "NegativeAcksTracker.h" -#include "Commands.h" -#include "ExecutorService.h" -#include "ConsumerImplBase.h" -#include "lib/UnAckedMessageTrackerDisabled.h" -#include "MessageCrypto.h" -#include "AckGroupingTracker.h" -#include "GetLastMessageIdResponse.h" - -#include "CompressionCodec.h" -#include -#include -#include "BatchAcknowledgementTracker.h" -#include -#include -#include -#include -#include -#include -#include -#include "Synchronized.h" - -using namespace pulsar; - -namespace pulsar { -class UnAckedMessageTracker; -class ExecutorService; -class ConsumerImpl; -class BatchAcknowledgementTracker; -typedef std::shared_ptr MessageCryptoPtr; -typedef std::function BrokerGetLastMessageIdCallback; -typedef std::shared_ptr BackoffPtr; - -enum ConsumerTopicType -{ - NonPartitioned, - Partitioned -}; - -class ConsumerImpl : public ConsumerImplBase, - public HandlerBase, - public std::enable_shared_from_this { - public: - ConsumerImpl(const ClientImplPtr client, const std::string& topic, const std::string& subscriptionName, - const ConsumerConfiguration&, bool isPersistent, - const ExecutorServicePtr listenerExecutor = ExecutorServicePtr(), bool hasParent = false, - const ConsumerTopicType consumerTopicType = NonPartitioned, - Commands::SubscriptionMode = Commands::SubscriptionModeDurable, - Optional startMessageId = Optional::empty()); - ~ConsumerImpl(); - void setPartitionIndex(int partitionIndex); - int getPartitionIndex(); - void sendFlowPermitsToBroker(const ClientConnectionPtr& cnx, int numMessages); - uint64_t getConsumerId(); - void messageReceived(const ClientConnectionPtr& cnx, const proto::CommandMessage& msg, - bool& isChecksumValid, proto::MessageMetadata& msgMetadata, SharedBuffer& payload); - void messageProcessed(Message& msg, bool track = true); - void activeConsumerChanged(bool isActive); - inline proto::CommandSubscribe_SubType getSubType(); - inline proto::CommandSubscribe_InitialPosition getInitialPosition(); - void handleUnsubscribe(Result result, ResultCallback callback); - - /** - * Send individual ACK request of given message ID to broker. - * @param[in] messageId ID of the message to be ACKed. - * @param[in] callback call back function, which is called after sending ACK. For now, it's - * always provided with ResultOk. - */ - void doAcknowledgeIndividual(const MessageId& messageId, ResultCallback callback); - - /** - * Send cumulative ACK request of given message ID to broker. - * @param[in] messageId ID of the message to be ACKed. - * @param[in] callback call back function, which is called after sending ACK. For now, it's - * always provided with ResultOk. - */ - void doAcknowledgeCumulative(const MessageId& messageId, ResultCallback callback); - - // overrided methods from ConsumerImplBase - Future getConsumerCreatedFuture() override; - const std::string& getSubscriptionName() const override; - const std::string& getTopic() const override; - Result receive(Message& msg) override; - Result receive(Message& msg, int timeout) override; - void receiveAsync(ReceiveCallback& callback) override; - void unsubscribeAsync(ResultCallback callback) override; - void acknowledgeAsync(const MessageId& msgId, ResultCallback callback) override; - void acknowledgeCumulativeAsync(const MessageId& msgId, ResultCallback callback) override; - void closeAsync(ResultCallback callback) override; - void start() override; - void shutdown() override; - bool isClosed() override; - bool isOpen() override; - Result pauseMessageListener() override; - Result resumeMessageListener() override; - void redeliverUnacknowledgedMessages() override; - void redeliverUnacknowledgedMessages(const std::set& messageIds) override; - const std::string& getName() const override; - int getNumOfPrefetchedMessages() const override; - void getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback) override; - void seekAsync(const MessageId& msgId, ResultCallback callback) override; - void seekAsync(uint64_t timestamp, ResultCallback callback) override; - void negativeAcknowledge(const MessageId& msgId) override; - bool isConnected() const override; - uint64_t getNumberOfConnectedConsumer() override; - - virtual void disconnectConsumer(); - Result fetchSingleMessageFromBroker(Message& msg); - - virtual bool isCumulativeAcknowledgementAllowed(ConsumerType consumerType); - - virtual void redeliverMessages(const std::set& messageIds); - - virtual bool isReadCompacted(); - virtual void hasMessageAvailableAsync(HasMessageAvailableCallback callback); - virtual void getLastMessageIdAsync(BrokerGetLastMessageIdCallback callback); - - protected: - // overrided methods from HandlerBase - void connectionOpened(const ClientConnectionPtr& cnx) override; - void connectionFailed(Result result) override; - HandlerBaseWeakPtr get_weak_from_this() override { return shared_from_this(); } - - void handleCreateConsumer(const ClientConnectionPtr& cnx, Result result); - - void internalListener(); - - void internalConsumerChangeListener(bool isActive); - - void handleClose(Result result, ResultCallback callback, ConsumerImplPtr consumer); - ConsumerStatsBasePtr consumerStatsBasePtr_; - - private: - bool waitingForZeroQueueSizeMessage; - bool uncompressMessageIfNeeded(const ClientConnectionPtr& cnx, const proto::MessageIdData& messageIdData, - const proto::MessageMetadata& metadata, SharedBuffer& payload, - bool checkMaxMessageSize); - void discardCorruptedMessage(const ClientConnectionPtr& cnx, const proto::MessageIdData& messageId, - proto::CommandAck::ValidationError validationError); - void increaseAvailablePermits(const ClientConnectionPtr& currentCnx, int delta = 1); - void drainIncomingMessageQueue(size_t count); - uint32_t receiveIndividualMessagesFromBatch(const ClientConnectionPtr& cnx, Message& batchedMessage, - int redeliveryCount); - bool isPriorBatchIndex(int32_t idx); - bool isPriorEntryIndex(int64_t idx); - void brokerConsumerStatsListener(Result, BrokerConsumerStatsImpl, BrokerConsumerStatsCallback); - - bool decryptMessageIfNeeded(const ClientConnectionPtr& cnx, const proto::CommandMessage& msg, - const proto::MessageMetadata& metadata, SharedBuffer& payload); - - // TODO - Convert these functions to lambda when we move to C++11 - Result receiveHelper(Message& msg); - Result receiveHelper(Message& msg, int timeout); - void statsCallback(Result, ResultCallback, proto::CommandAck_AckType); - void notifyPendingReceivedCallback(Result result, Message& message, const ReceiveCallback& callback); - void failPendingReceiveCallback(); - void setNegativeAcknowledgeEnabledForTesting(bool enabled) override; - void trackMessage(const MessageId& messageId); - void internalGetLastMessageIdAsync(const BackoffPtr& backoff, TimeDuration remainTime, - const DeadlineTimerPtr& timer, - BrokerGetLastMessageIdCallback callback); - - Optional clearReceiveQueue(); - void seekAsyncInternal(long requestId, SharedBuffer seek, const MessageId& seekId, long timestamp, - ResultCallback callback); - - std::mutex mutexForReceiveWithZeroQueueSize; - const ConsumerConfiguration config_; - const std::string subscription_; - std::string originalSubscriptionName_; - const bool isPersistent_; - MessageListener messageListener_; - ConsumerEventListenerPtr eventListener_; - ExecutorServicePtr listenerExecutor_; - bool hasParent_; - ConsumerTopicType consumerTopicType_; - - const Commands::SubscriptionMode subscriptionMode_; - - UnboundedBlockingQueue incomingMessages_; - std::queue pendingReceives_; - std::atomic_int availablePermits_; - const int receiverQueueRefillThreshold_; - uint64_t consumerId_; - std::string consumerName_; - std::string consumerStr_; - int32_t partitionIndex_ = -1; - Promise consumerCreatedPromise_; - std::atomic_bool messageListenerRunning_; - CompressionCodecProvider compressionCodecProvider_; - UnAckedMessageTrackerPtr unAckedMessageTrackerPtr_; - BatchAcknowledgementTracker batchAcknowledgementTracker_; - BrokerConsumerStatsImpl brokerConsumerStats_; - NegativeAcksTracker negativeAcksTracker_; - AckGroupingTrackerPtr ackGroupingTrackerPtr_; - - MessageCryptoPtr msgCrypto_; - const bool readCompacted_; - - // Make the access to `lastDequedMessageId_` and `lastMessageIdInBroker_` thread safe - mutable std::mutex mutexForMessageId_; - MessageId lastDequedMessageId_{MessageId::earliest()}; - MessageId lastMessageIdInBroker_{MessageId::earliest()}; - - std::atomic_bool duringSeek_{false}; - Synchronized> startMessageId_{Optional::empty()}; - Synchronized seekMessageId_{MessageId::earliest()}; - - class ChunkedMessageCtx { - public: - ChunkedMessageCtx() : totalChunks_(0) {} - ChunkedMessageCtx(int totalChunks, int totalChunkMessageSize) - : totalChunks_(totalChunks), chunkedMsgBuffer_(SharedBuffer::allocate(totalChunkMessageSize)) { - chunkedMessageIds_.reserve(totalChunks); - } - - ChunkedMessageCtx(const ChunkedMessageCtx&) = delete; - // Here we don't use =default to be compatible with GCC 4.8 - ChunkedMessageCtx(ChunkedMessageCtx&& rhs) noexcept - : totalChunks_(rhs.totalChunks_), - chunkedMsgBuffer_(std::move(rhs.chunkedMsgBuffer_)), - chunkedMessageIds_(std::move(rhs.chunkedMessageIds_)) {} - - bool validateChunkId(int chunkId) const noexcept { return chunkId == numChunks(); } - - void appendChunk(const MessageId& messageId, const SharedBuffer& payload) { - chunkedMessageIds_.emplace_back(messageId); - chunkedMsgBuffer_.write(payload.data(), payload.readableBytes()); - } - - bool isCompleted() const noexcept { return totalChunks_ == numChunks(); } - - const SharedBuffer& getBuffer() const noexcept { return chunkedMsgBuffer_; } - - const std::vector& getChunkedMessageIds() const noexcept { return chunkedMessageIds_; } - - friend std::ostream& operator<<(std::ostream& os, const ChunkedMessageCtx& ctx) { - return os << "ChunkedMessageCtx " << ctx.chunkedMsgBuffer_.readableBytes() << " of " - << ctx.chunkedMsgBuffer_.writerIndex() << " bytes, " << ctx.numChunks() << " of " - << ctx.totalChunks_ << " chunks"; - } - - private: - const int totalChunks_; - SharedBuffer chunkedMsgBuffer_; - std::vector chunkedMessageIds_; - - int numChunks() const noexcept { return static_cast(chunkedMessageIds_.size()); } - }; - - const size_t maxPendingChunkedMessage_; - // if queue size is reasonable (most of the time equal to number of producers try to publish messages - // concurrently on the topic) then it guards against broken chunked message which was not fully published - const bool autoAckOldestChunkedMessageOnQueueFull_; - - // The key is UUID, value is the associated ChunkedMessageCtx of the chunked message. - std::unordered_map chunkedMessagesMap_; - // This list contains all the keys of `chunkedMessagesMap_`, each key is an UUID that identifies a pending - // chunked message. Once the number of pending chunked messages exceeds the limit, the oldest UUIDs and - // the associated ChunkedMessageCtx will be removed. - std::list pendingChunkedMessageUuidQueue_; - - // The key is UUID, value is the associated ChunkedMessageCtx of the chunked message. - MapCache chunkedMessageCache_; - mutable std::mutex chunkProcessMutex_; - - /** - * Process a chunk. If the chunk is the last chunk of a message, concatenate all buffered chunks into the - * payload and return it. - * - * @param payload the payload of a chunk - * @param metadata the message metadata - * @param messageId - * @param messageIdData - * @param cnx - * - * @return the concatenated payload if chunks are concatenated into a completed message payload - * successfully, else Optional::empty() - */ - Optional processMessageChunk(const SharedBuffer& payload, - const proto::MessageMetadata& metadata, - const MessageId& messageId, - const proto::MessageIdData& messageIdData, - const ClientConnectionPtr& cnx); - - friend class PulsarFriend; - - // these two declared friend to access setNegativeAcknowledgeEnabledForTesting - friend class MultiTopicsConsumerImpl; - - FRIEND_TEST(ConsumerTest, testPartitionedConsumerUnAckedMessageRedelivery); - FRIEND_TEST(ConsumerTest, testMultiTopicsConsumerUnAckedMessageRedelivery); - FRIEND_TEST(ConsumerTest, testBatchUnAckedMessageTracker); -}; - -} /* namespace pulsar */ - -#endif /* LIB_CONSUMERIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/ConsumerImplBase.h b/pulsar-client-cpp/lib/ConsumerImplBase.h deleted file mode 100644 index 693d4da9a3779..0000000000000 --- a/pulsar-client-cpp/lib/ConsumerImplBase.h +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CONSUMER_IMPL_BASE_HEADER -#define PULSAR_CONSUMER_IMPL_BASE_HEADER -#include -#include - -#include - -namespace pulsar { -class ConsumerImplBase; - -typedef std::weak_ptr ConsumerImplBaseWeakPtr; - -class ConsumerImplBase { - public: - virtual ~ConsumerImplBase() {} - virtual Future getConsumerCreatedFuture() = 0; - virtual const std::string& getSubscriptionName() const = 0; - virtual const std::string& getTopic() const = 0; - virtual Result receive(Message& msg) = 0; - virtual Result receive(Message& msg, int timeout) = 0; - virtual void receiveAsync(ReceiveCallback& callback) = 0; - virtual void unsubscribeAsync(ResultCallback callback) = 0; - virtual void acknowledgeAsync(const MessageId& msgId, ResultCallback callback) = 0; - virtual void acknowledgeCumulativeAsync(const MessageId& msgId, ResultCallback callback) = 0; - virtual void closeAsync(ResultCallback callback) = 0; - virtual void start() = 0; - virtual void shutdown() = 0; - virtual bool isClosed() = 0; - virtual bool isOpen() = 0; - virtual Result pauseMessageListener() = 0; - virtual Result resumeMessageListener() = 0; - virtual void redeliverUnacknowledgedMessages() = 0; - virtual void redeliverUnacknowledgedMessages(const std::set& messageIds) = 0; - virtual const std::string& getName() const = 0; - virtual int getNumOfPrefetchedMessages() const = 0; - virtual void getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback) = 0; - virtual void seekAsync(const MessageId& msgId, ResultCallback callback) = 0; - virtual void seekAsync(uint64_t timestamp, ResultCallback callback) = 0; - virtual void negativeAcknowledge(const MessageId& msgId) = 0; - virtual bool isConnected() const = 0; - virtual uint64_t getNumberOfConnectedConsumer() = 0; - - private: - virtual void setNegativeAcknowledgeEnabledForTesting(bool enabled) = 0; - - friend class PulsarFriend; -}; -} // namespace pulsar -#endif // PULSAR_CONSUMER_IMPL_BASE_HEADER diff --git a/pulsar-client-cpp/lib/CryptoKeyReader.cc b/pulsar-client-cpp/lib/CryptoKeyReader.cc deleted file mode 100644 index 1eb73e8fe9fe9..0000000000000 --- a/pulsar-client-cpp/lib/CryptoKeyReader.cc +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include - -using namespace pulsar; - -CryptoKeyReader::CryptoKeyReader() {} -CryptoKeyReader::~CryptoKeyReader() {} - -Result CryptoKeyReader::getPublicKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const { - return ResultInvalidConfiguration; -} - -Result CryptoKeyReader::getPrivateKey(const std::string& keyName, - std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const { - return ResultInvalidConfiguration; -} - -DefaultCryptoKeyReader::DefaultCryptoKeyReader(const std::string& publicKeyPath, - const std::string& privateKeyPath) { - publicKeyPath_ = publicKeyPath; - privateKeyPath_ = privateKeyPath; -} - -DefaultCryptoKeyReader::~DefaultCryptoKeyReader() {} - -void DefaultCryptoKeyReader::readFile(std::string fileName, std::string& fileContents) const { - std::ifstream ifs(fileName); - std::stringstream fileStream; - fileStream << ifs.rdbuf(); - - fileContents = fileStream.str(); -} - -Result DefaultCryptoKeyReader::getPublicKey(const std::string& keyName, - std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const { - std::string keyContents; - readFile(publicKeyPath_, keyContents); - - encKeyInfo.setKey(keyContents); - return ResultOk; -} - -Result DefaultCryptoKeyReader::getPrivateKey(const std::string& keyName, - std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const { - std::string keyContents; - readFile(privateKeyPath_, keyContents); - - encKeyInfo.setKey(keyContents); - return ResultOk; -} - -CryptoKeyReaderPtr DefaultCryptoKeyReader::create(const std::string& publicKeyPath, - const std::string& privateKeyPath) { - return CryptoKeyReaderPtr(new DefaultCryptoKeyReader(publicKeyPath, privateKeyPath)); -} \ No newline at end of file diff --git a/pulsar-client-cpp/lib/DeprecatedException.cc b/pulsar-client-cpp/lib/DeprecatedException.cc deleted file mode 100644 index 283d8bb6a562a..0000000000000 --- a/pulsar-client-cpp/lib/DeprecatedException.cc +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -namespace pulsar { -const std::string DeprecatedException::message_prefix = "Deprecated: "; - -DeprecatedException::DeprecatedException(const std::string& __arg) - : std::runtime_error(message_prefix + __arg) {} -} // namespace pulsar \ No newline at end of file diff --git a/pulsar-client-cpp/lib/EncryptionKeyInfo.cc b/pulsar-client-cpp/lib/EncryptionKeyInfo.cc deleted file mode 100644 index 68c6c0a888421..0000000000000 --- a/pulsar-client-cpp/lib/EncryptionKeyInfo.cc +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include "EncryptionKeyInfoImpl.h" - -namespace pulsar { - -EncryptionKeyInfo::EncryptionKeyInfo() : impl_(new EncryptionKeyInfoImpl()) {} - -EncryptionKeyInfo::EncryptionKeyInfo(EncryptionKeyInfoImplPtr impl) : impl_(impl) {} - -std::string& EncryptionKeyInfo::getKey() { return impl_->getKey(); } - -void EncryptionKeyInfo::setKey(std::string key) { impl_->setKey(key); } - -EncryptionKeyInfo::StringMap& EncryptionKeyInfo::getMetadata() { return impl_->getMetadata(); } - -void EncryptionKeyInfo::setMetadata(StringMap& metadata) { impl_->setMetadata(metadata); } - -}; /* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/EncryptionKeyInfoImpl.cc b/pulsar-client-cpp/lib/EncryptionKeyInfoImpl.cc deleted file mode 100644 index e61ca700c57e6..0000000000000 --- a/pulsar-client-cpp/lib/EncryptionKeyInfoImpl.cc +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "EncryptionKeyInfoImpl.h" - -namespace pulsar { - -EncryptionKeyInfoImpl::EncryptionKeyInfoImpl(std::string key, StringMap& metadata) - : metadata_(metadata), key_(key) {} - -std::string& EncryptionKeyInfoImpl::getKey() { return key_; } - -void EncryptionKeyInfoImpl::setKey(std::string key) { key_ = key; } - -EncryptionKeyInfoImpl::StringMap& EncryptionKeyInfoImpl::getMetadata() { return metadata_; } - -void EncryptionKeyInfoImpl::setMetadata(StringMap& metadata) { metadata_ = metadata; } - -}; /* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/EncryptionKeyInfoImpl.h b/pulsar-client-cpp/lib/EncryptionKeyInfoImpl.h deleted file mode 100644 index 0470d1cbaf521..0000000000000 --- a/pulsar-client-cpp/lib/EncryptionKeyInfoImpl.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_ENCRYPTIONKEYINFOIMPL_H_ -#define LIB_ENCRYPTIONKEYINFOIMPL_H_ - -#include -#include -#include - -namespace pulsar { - -class PULSAR_PUBLIC EncryptionKeyInfoImpl { - public: - typedef std::map StringMap; - - EncryptionKeyInfoImpl() = default; - - EncryptionKeyInfoImpl(std::string key, StringMap& metadata); - - std::string& getKey(); - - void setKey(std::string key); - - StringMap& getMetadata(void); - - void setMetadata(StringMap& metadata); - - private: - StringMap metadata_; - std::string key_; -}; - -} /* namespace pulsar */ - -#endif /* LIB_ENCRYPTIONKEYINFOIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/ExecutorService.cc b/pulsar-client-cpp/lib/ExecutorService.cc deleted file mode 100644 index a7390f19cea44..0000000000000 --- a/pulsar-client-cpp/lib/ExecutorService.cc +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "ExecutorService.h" - -#include -#include -#include -#include "TimeUtils.h" - -#include "LogUtils.h" -DECLARE_LOG_OBJECT() - -namespace pulsar { - -ExecutorService::ExecutorService() {} - -ExecutorService::~ExecutorService() { close(0); } - -void ExecutorService::start() { - auto self = shared_from_this(); - std::thread t{[self] { - if (self->isClosed()) { - return; - } - LOG_DEBUG("Run io_service in a single thread"); - boost::system::error_code ec; - self->getIOService().run(ec); - if (ec) { - LOG_ERROR("Failed to run io_service: " << ec.message()); - } else { - LOG_DEBUG("Event loop of ExecutorService exits successfully"); - } - self->ioServiceDone_ = true; - self->cond_.notify_all(); - }}; - t.detach(); -} - -ExecutorServicePtr ExecutorService::create() { - // make_shared cannot access the private constructor, so we need to expose the private constructor via a - // derived class. - struct ExecutorServiceImpl : public ExecutorService {}; - - auto executor = std::make_shared(); - executor->start(); - return std::static_pointer_cast(executor); -} - -/* - * factory method of boost::asio::ip::tcp::socket associated with io_service_ instance - * @ returns shared_ptr to this socket - */ -SocketPtr ExecutorService::createSocket() { return SocketPtr(new boost::asio::ip::tcp::socket(io_service_)); } - -TlsSocketPtr ExecutorService::createTlsSocket(SocketPtr &socket, boost::asio::ssl::context &ctx) { - return std::shared_ptr >( - new boost::asio::ssl::stream(*socket, ctx)); -} - -/* - * factory method of Resolver object associated with io_service_ instance - * @returns shraed_ptr to resolver object - */ -TcpResolverPtr ExecutorService::createTcpResolver() { - return TcpResolverPtr(new boost::asio::ip::tcp::resolver(io_service_)); -} - -DeadlineTimerPtr ExecutorService::createDeadlineTimer() { - return DeadlineTimerPtr(new boost::asio::deadline_timer(io_service_)); -} - -void ExecutorService::close(long timeoutMs) { - bool expectedState = false; - if (!closed_.compare_exchange_strong(expectedState, true)) { - return; - } - if (timeoutMs == 0) { // non-blocking - io_service_.stop(); - return; - } - - std::unique_lock lock{mutex_}; - io_service_.stop(); - if (timeoutMs > 0) { - cond_.wait_for(lock, std::chrono::milliseconds(timeoutMs), [this] { return ioServiceDone_.load(); }); - } else { // < 0 - cond_.wait(lock, [this] { return ioServiceDone_.load(); }); - } -} - -void ExecutorService::postWork(std::function task) { io_service_.post(task); } - -///////////////////// - -ExecutorServiceProvider::ExecutorServiceProvider(int nthreads) - : executors_(nthreads), executorIdx_(0), mutex_() {} - -ExecutorServicePtr ExecutorServiceProvider::get() { - Lock lock(mutex_); - - int idx = executorIdx_++ % executors_.size(); - if (!executors_[idx]) { - executors_[idx] = ExecutorService::create(); - } - - return executors_[idx]; -} - -void ExecutorServiceProvider::close(long timeoutMs) { - Lock lock(mutex_); - - TimeoutProcessor timeoutProcessor{timeoutMs}; - for (auto &&executor : executors_) { - timeoutProcessor.tik(); - if (executor) { - executor->close(timeoutProcessor.getLeftTimeout()); - } - timeoutProcessor.tok(); - executor.reset(); - } -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ExecutorService.h b/pulsar-client-cpp/lib/ExecutorService.h deleted file mode 100644 index e4cbb3ce62ef0..0000000000000 --- a/pulsar-client-cpp/lib/ExecutorService.h +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _PULSAR_EXECUTOR_SERVICE_HEADER_ -#define _PULSAR_EXECUTOR_SERVICE_HEADER_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace pulsar { -typedef std::shared_ptr SocketPtr; -typedef std::shared_ptr > TlsSocketPtr; -typedef std::shared_ptr TcpResolverPtr; -typedef std::shared_ptr DeadlineTimerPtr; -class PULSAR_PUBLIC ExecutorService : public std::enable_shared_from_this { - public: - using IOService = boost::asio::io_service; - using SharedPtr = std::shared_ptr; - - static SharedPtr create(); - ~ExecutorService(); - - ExecutorService(const ExecutorService &) = delete; - ExecutorService &operator=(const ExecutorService &) = delete; - - SocketPtr createSocket(); - static TlsSocketPtr createTlsSocket(SocketPtr &socket, boost::asio::ssl::context &ctx); - TcpResolverPtr createTcpResolver(); - DeadlineTimerPtr createDeadlineTimer(); - void postWork(std::function task); - - // See TimeoutProcessor for the semantics of the parameter. - void close(long timeoutMs = 3000); - - IOService &getIOService() { return io_service_; } - bool isClosed() const noexcept { return closed_; } - - private: - /* - * io_service is our interface to os, io object schedule async ops on this object - */ - IOService io_service_; - - /* - * work will not let io_service.run() return even after it has finished work - * it will keep it running in the background so we don't have to take care of it - */ - IOService::work work_{io_service_}; - - std::atomic_bool closed_{false}; - std::mutex mutex_; - std::condition_variable cond_; - std::atomic_bool ioServiceDone_{false}; - - ExecutorService(); - - void start(); -}; - -using ExecutorServicePtr = ExecutorService::SharedPtr; - -class PULSAR_PUBLIC ExecutorServiceProvider { - public: - explicit ExecutorServiceProvider(int nthreads); - - ExecutorServicePtr get(); - - // See TimeoutProcessor for the semantics of the parameter. - void close(long timeoutMs = 3000); - - private: - typedef std::vector ExecutorList; - ExecutorList executors_; - int executorIdx_; - std::mutex mutex_; - typedef std::unique_lock Lock; -}; - -typedef std::shared_ptr ExecutorServiceProviderPtr; -} // namespace pulsar - -#endif //_PULSAR_EXECUTOR_SERVICE_HEADER_ diff --git a/pulsar-client-cpp/lib/FileLoggerFactory.cc b/pulsar-client-cpp/lib/FileLoggerFactory.cc deleted file mode 100644 index a82613f00a078..0000000000000 --- a/pulsar-client-cpp/lib/FileLoggerFactory.cc +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include "lib/FileLoggerFactoryImpl.h" - -namespace pulsar { - -FileLoggerFactory::FileLoggerFactory(Logger::Level level, const std::string& logFilePath) - : impl_(new FileLoggerFactoryImpl(level, logFilePath)) {} - -FileLoggerFactory::~FileLoggerFactory() {} - -Logger* FileLoggerFactory::getLogger(const std::string& filename) { return impl_->getLogger(filename); } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/FileLoggerFactoryImpl.h b/pulsar-client-cpp/lib/FileLoggerFactoryImpl.h deleted file mode 100644 index 75329c65fc88e..0000000000000 --- a/pulsar-client-cpp/lib/FileLoggerFactoryImpl.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include -#include - -#include "lib/SimpleLogger.h" - -namespace pulsar { - -class FileLoggerFactoryImpl { - public: - FileLoggerFactoryImpl(Logger::Level level, const std::string& logFilePath) - : level_(level), os_(logFilePath, std::ios_base::out | std::ios_base::app) {} - - ~FileLoggerFactoryImpl() { os_.close(); } - - Logger* getLogger(const std::string& filename) { return new SimpleLogger(os_, filename, level_); } - - private: - const Logger::Level level_; - std::ofstream os_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Future.h b/pulsar-client-cpp/lib/Future.h deleted file mode 100644 index 6754c890399d8..0000000000000 --- a/pulsar-client-cpp/lib/Future.h +++ /dev/null @@ -1,181 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_FUTURE_H_ -#define LIB_FUTURE_H_ - -#include -#include -#include -#include - -#include - -typedef std::unique_lock Lock; - -namespace pulsar { - -template -struct InternalState { - std::mutex mutex; - std::condition_variable condition; - Result result; - Type value; - bool complete; - - std::list > listeners; -}; - -template -class Future { - public: - typedef std::function ListenerCallback; - - Future& addListener(ListenerCallback callback) { - InternalState* state = state_.get(); - Lock lock(state->mutex); - - if (state->complete) { - lock.unlock(); - callback(state->result, state->value); - } else { - state->listeners.push_back(callback); - } - - return *this; - } - - Result get(Type& result) { - InternalState* state = state_.get(); - Lock lock(state->mutex); - - if (!state->complete) { - // Wait for result - while (!state->complete) { - state->condition.wait(lock); - } - } - - result = state->value; - return state->result; - } - - template - bool get(Result& res, Type& value, Duration d) { - InternalState* state = state_.get(); - Lock lock(state->mutex); - - if (!state->complete) { - // Wait for result - while (!state->complete) { - if (!state->condition.wait_for(lock, d, [&state] { return state->complete; })) { - // Timeout while waiting for the future to complete - return false; - } - } - } - - value = state->value; - res = state->result; - return true; - } - - private: - typedef std::shared_ptr > InternalStatePtr; - Future(InternalStatePtr state) : state_(state) {} - - std::shared_ptr > state_; - - template - friend class Promise; -}; - -template -class Promise { - public: - Promise() : state_(std::make_shared >()) {} - - bool setValue(const Type& value) const { - static Result DEFAULT_RESULT; - InternalState* state = state_.get(); - Lock lock(state->mutex); - - if (state->complete) { - return false; - } - - state->value = value; - state->result = DEFAULT_RESULT; - state->complete = true; - - decltype(state->listeners) listeners; - listeners.swap(state->listeners); - - lock.unlock(); - - for (auto& callback : listeners) { - callback(DEFAULT_RESULT, value); - } - - state->condition.notify_all(); - return true; - } - - bool setFailed(Result result) const { - static Type DEFAULT_VALUE; - InternalState* state = state_.get(); - Lock lock(state->mutex); - - if (state->complete) { - return false; - } - - state->result = result; - state->complete = true; - - decltype(state->listeners) listeners; - listeners.swap(state->listeners); - - lock.unlock(); - - for (auto& callback : listeners) { - callback(result, DEFAULT_VALUE); - } - - state->condition.notify_all(); - return true; - } - - bool isComplete() const { - InternalState* state = state_.get(); - Lock lock(state->mutex); - return state->complete; - } - - Future getFuture() const { return Future(state_); } - - private: - typedef std::function ListenerCallback; - std::shared_ptr > state_; -}; - -class Void {}; - -} /* namespace pulsar */ - -#endif /* LIB_FUTURE_H_ */ diff --git a/pulsar-client-cpp/lib/GetLastMessageIdResponse.h b/pulsar-client-cpp/lib/GetLastMessageIdResponse.h deleted file mode 100644 index 0acb78394e115..0000000000000 --- a/pulsar-client-cpp/lib/GetLastMessageIdResponse.h +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include - -namespace pulsar { - -class GetLastMessageIdResponse { - friend std::ostream& operator<<(std::ostream& os, const GetLastMessageIdResponse& response) { - os << "lastMessageId: " << response.lastMessageId_; - if (response.hasMarkDeletePosition_) { - os << ", markDeletePosition: " << response.markDeletePosition_; - } - return os; - } - - public: - GetLastMessageIdResponse() = default; - - GetLastMessageIdResponse(const MessageId& lastMessageId) - : lastMessageId_(lastMessageId), hasMarkDeletePosition_{false} {} - - GetLastMessageIdResponse(const MessageId& lastMessageId, const MessageId& markDeletePosition) - : lastMessageId_(lastMessageId), - markDeletePosition_(markDeletePosition), - hasMarkDeletePosition_(true) {} - - const MessageId& getLastMessageId() const noexcept { return lastMessageId_; } - const MessageId& getMarkDeletePosition() const noexcept { return markDeletePosition_; } - bool hasMarkDeletePosition() const noexcept { return hasMarkDeletePosition_; } - - private: - MessageId lastMessageId_; - MessageId markDeletePosition_; - bool hasMarkDeletePosition_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/HTTPLookupService.cc b/pulsar-client-cpp/lib/HTTPLookupService.cc deleted file mode 100644 index 61392666a93fa..0000000000000 --- a/pulsar-client-cpp/lib/HTTPLookupService.cc +++ /dev/null @@ -1,398 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include - -#include -#include -namespace ptree = boost::property_tree; - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -const static std::string V1_PATH = "/lookup/v2/destination/"; -const static std::string V2_PATH = "/lookup/v2/topic/"; - -const static std::string ADMIN_PATH_V1 = "/admin/"; -const static std::string ADMIN_PATH_V2 = "/admin/v2/"; - -const static int MAX_HTTP_REDIRECTS = 20; -const static std::string PARTITION_METHOD_NAME = "partitions"; -const static int NUMBER_OF_LOOKUP_THREADS = 1; - -static inline bool needRedirection(long code) { return (code == 307 || code == 302 || code == 301); } - -HTTPLookupService::CurlInitializer::CurlInitializer() { - // Once per application - https://curl.haxx.se/mail/lib-2015-11/0052.html - curl_global_init(CURL_GLOBAL_ALL); -} -HTTPLookupService::CurlInitializer::~CurlInitializer() { curl_global_cleanup(); } - -HTTPLookupService::CurlInitializer HTTPLookupService::curlInitializer; - -HTTPLookupService::HTTPLookupService(ServiceNameResolver &serviceNameResolver, - const ClientConfiguration &clientConfiguration, - const AuthenticationPtr &authData) - : executorProvider_(std::make_shared(NUMBER_OF_LOOKUP_THREADS)), - serviceNameResolver_(serviceNameResolver), - authenticationPtr_(authData), - lookupTimeoutInSeconds_(clientConfiguration.getOperationTimeoutSeconds()), - tlsTrustCertsFilePath_(clientConfiguration.getTlsTrustCertsFilePath()), - isUseTls_(clientConfiguration.isUseTls()), - tlsAllowInsecure_(clientConfiguration.isTlsAllowInsecureConnection()), - tlsValidateHostname_(clientConfiguration.isValidateHostName()) {} - -auto HTTPLookupService::getBroker(const TopicName &topicName) -> LookupResultFuture { - LookupResultPromise promise; - - const auto &url = serviceNameResolver_.resolveHost(); - std::stringstream completeUrlStream; - if (topicName.isV2Topic()) { - completeUrlStream << url << V2_PATH << topicName.getDomain() << "/" << topicName.getProperty() << '/' - << topicName.getNamespacePortion() << '/' << topicName.getEncodedLocalName(); - } else { - completeUrlStream << url << V1_PATH << topicName.getDomain() << "/" << topicName.getProperty() << '/' - << topicName.getCluster() << '/' << topicName.getNamespacePortion() << '/' - << topicName.getEncodedLocalName(); - } - - const auto completeUrl = completeUrlStream.str(); - auto self = shared_from_this(); - executorProvider_->get()->postWork([this, self, promise, completeUrl] { - std::string responseData; - Result result = sendHTTPRequest(completeUrl, responseData); - - if (result != ResultOk) { - promise.setFailed(result); - } else { - const auto lookupDataResultPtr = parseLookupData(responseData); - const auto brokerAddress = (serviceNameResolver_.useTls() ? lookupDataResultPtr->getBrokerUrlTls() - : lookupDataResultPtr->getBrokerUrl()); - promise.setValue({brokerAddress, brokerAddress}); - } - }); - return promise.getFuture(); -} - -Future HTTPLookupService::getPartitionMetadataAsync( - const TopicNamePtr &topicName) { - LookupPromise promise; - std::stringstream completeUrlStream; - - const auto &url = serviceNameResolver_.resolveHost(); - if (topicName->isV2Topic()) { - completeUrlStream << url << ADMIN_PATH_V2 << topicName->getDomain() << '/' << topicName->getProperty() - << '/' << topicName->getNamespacePortion() << '/' - << topicName->getEncodedLocalName() << '/' << PARTITION_METHOD_NAME; - } else { - completeUrlStream << url << ADMIN_PATH_V1 << topicName->getDomain() << '/' << topicName->getProperty() - << '/' << topicName->getCluster() << '/' << topicName->getNamespacePortion() << '/' - << topicName->getEncodedLocalName() << '/' << PARTITION_METHOD_NAME; - } - - completeUrlStream << "?checkAllowAutoCreation=true"; - executorProvider_->get()->postWork(std::bind(&HTTPLookupService::handleLookupHTTPRequest, - shared_from_this(), promise, completeUrlStream.str(), - PartitionMetaData)); - return promise.getFuture(); -} - -Future HTTPLookupService::getTopicsOfNamespaceAsync( - const NamespaceNamePtr &nsName) { - NamespaceTopicsPromise promise; - std::stringstream completeUrlStream; - - const auto &url = serviceNameResolver_.resolveHost(); - if (nsName->isV2()) { - completeUrlStream << url << ADMIN_PATH_V2 << "namespaces" << '/' << nsName->toString() << '/' - << "topics"; - } else { - completeUrlStream << url << ADMIN_PATH_V1 << "namespaces" << '/' << nsName->toString() << '/' - << "destinations"; - } - - executorProvider_->get()->postWork(std::bind(&HTTPLookupService::handleNamespaceTopicsHTTPRequest, - shared_from_this(), promise, completeUrlStream.str())); - return promise.getFuture(); -} - -static size_t curlWriteCallback(void *contents, size_t size, size_t nmemb, void *responseDataPtr) { - ((std::string *)responseDataPtr)->append((char *)contents, size * nmemb); - return size * nmemb; -} - -void HTTPLookupService::handleNamespaceTopicsHTTPRequest(NamespaceTopicsPromise promise, - const std::string completeUrl) { - std::string responseData; - Result result = sendHTTPRequest(completeUrl, responseData); - - if (result != ResultOk) { - promise.setFailed(result); - } else { - promise.setValue(parseNamespaceTopicsData(responseData)); - } -} - -Result HTTPLookupService::sendHTTPRequest(std::string completeUrl, std::string &responseData) { - uint16_t reqCount = 0; - Result retResult = ResultOk; - while (++reqCount <= MAX_HTTP_REDIRECTS) { - CURL *handle; - CURLcode res; - std::string version = std::string("Pulsar-CPP-v") + _PULSAR_VERSION_INTERNAL_; - handle = curl_easy_init(); - - if (!handle) { - LOG_ERROR("Unable to curl_easy_init for url " << completeUrl); - // No curl_easy_cleanup required since handle not initialized - return ResultLookupError; - } - // set URL - curl_easy_setopt(handle, CURLOPT_URL, completeUrl.c_str()); - - // Write callback - curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, curlWriteCallback); - curl_easy_setopt(handle, CURLOPT_WRITEDATA, &responseData); - - // New connection is made for each call - curl_easy_setopt(handle, CURLOPT_FRESH_CONNECT, 1L); - curl_easy_setopt(handle, CURLOPT_FORBID_REUSE, 1L); - - // Skipping signal handling - results in timeouts not honored during the DNS lookup - curl_easy_setopt(handle, CURLOPT_NOSIGNAL, 1L); - - // Timer - curl_easy_setopt(handle, CURLOPT_TIMEOUT, lookupTimeoutInSeconds_); - - // Set User Agent - curl_easy_setopt(handle, CURLOPT_USERAGENT, version.c_str()); - - // Fail if HTTP return code >=400 - curl_easy_setopt(handle, CURLOPT_FAILONERROR, 1L); - - // Authorization data - AuthenticationDataPtr authDataContent; - Result authResult = authenticationPtr_->getAuthData(authDataContent); - if (authResult != ResultOk) { - LOG_ERROR("Failed to getAuthData: " << authResult); - curl_easy_cleanup(handle); - return authResult; - } - struct curl_slist *list = NULL; - if (authDataContent->hasDataForHttp()) { - list = curl_slist_append(list, authDataContent->getHttpHeaders().c_str()); - } - curl_easy_setopt(handle, CURLOPT_HTTPHEADER, list); - - // TLS - if (isUseTls_) { - if (curl_easy_setopt(handle, CURLOPT_SSLENGINE, NULL) != CURLE_OK) { - LOG_ERROR("Unable to load SSL engine for url " << completeUrl); - curl_easy_cleanup(handle); - return ResultConnectError; - } - if (curl_easy_setopt(handle, CURLOPT_SSLENGINE_DEFAULT, 1L) != CURLE_OK) { - LOG_ERROR("Unable to load SSL engine as default, for url " << completeUrl); - curl_easy_cleanup(handle); - return ResultConnectError; - } - curl_easy_setopt(handle, CURLOPT_SSLCERTTYPE, "PEM"); - - if (tlsAllowInsecure_) { - curl_easy_setopt(handle, CURLOPT_SSL_VERIFYPEER, 0L); - } else { - curl_easy_setopt(handle, CURLOPT_SSL_VERIFYPEER, 1L); - } - - if (!tlsTrustCertsFilePath_.empty()) { - curl_easy_setopt(handle, CURLOPT_CAINFO, tlsTrustCertsFilePath_.c_str()); - } - - curl_easy_setopt(handle, CURLOPT_SSL_VERIFYHOST, tlsValidateHostname_ ? 1L : 0L); - - if (authDataContent->hasDataForTls()) { - curl_easy_setopt(handle, CURLOPT_SSLCERT, authDataContent->getTlsCertificates().c_str()); - curl_easy_setopt(handle, CURLOPT_SSLKEY, authDataContent->getTlsPrivateKey().c_str()); - } - } - - LOG_INFO("Curl [" << reqCount << "] Lookup Request sent for " << completeUrl); - - // Make get call to server - res = curl_easy_perform(handle); - - long response_code = -1; - curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &response_code); - LOG_INFO("Response received for url " << completeUrl << " response_code " << response_code - << " curl res " << res); - - // Free header list - curl_slist_free_all(list); - - switch (res) { - case CURLE_OK: - long response_code; - curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &response_code); - LOG_INFO("Response received for url " << completeUrl << " code " << response_code); - if (response_code == 200) { - retResult = ResultOk; - } else if (needRedirection(response_code)) { - char *url = NULL; - curl_easy_getinfo(handle, CURLINFO_REDIRECT_URL, &url); - LOG_INFO("Response from url " << completeUrl << " to new url " << url); - completeUrl = url; - retResult = ResultLookupError; - } else { - retResult = ResultLookupError; - } - break; - case CURLE_COULDNT_CONNECT: - LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); - retResult = ResultRetryable; - break; - case CURLE_COULDNT_RESOLVE_PROXY: - case CURLE_COULDNT_RESOLVE_HOST: - case CURLE_HTTP_RETURNED_ERROR: - LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); - retResult = ResultConnectError; - break; - case CURLE_READ_ERROR: - LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); - retResult = ResultReadError; - break; - case CURLE_OPERATION_TIMEDOUT: - LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); - retResult = ResultTimeout; - break; - default: - LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); - retResult = ResultLookupError; - break; - } - curl_easy_cleanup(handle); - if (!needRedirection(response_code)) { - break; - } - } - - return retResult; -} - -LookupDataResultPtr HTTPLookupService::parsePartitionData(const std::string &json) { - ptree::ptree root; - std::stringstream stream; - stream << json; - try { - ptree::read_json(stream, root); - } catch (ptree::json_parser_error &e) { - LOG_ERROR("Failed to parse json of Partition Metadata: " << e.what() << "\nInput Json = " << json); - return LookupDataResultPtr(); - } - - LookupDataResultPtr lookupDataResultPtr = std::make_shared(); - lookupDataResultPtr->setPartitions(root.get("partitions", 0)); - LOG_INFO("parsePartitionData = " << *lookupDataResultPtr); - return lookupDataResultPtr; -} - -LookupDataResultPtr HTTPLookupService::parseLookupData(const std::string &json) { - ptree::ptree root; - std::stringstream stream; - stream << json; - try { - ptree::read_json(stream, root); - } catch (ptree::json_parser_error &e) { - LOG_ERROR("Failed to parse json : " << e.what() << "\nInput Json = " << json); - return LookupDataResultPtr(); - } - - const std::string defaultNotFoundString = "Url Not found"; - const std::string brokerUrl = root.get("brokerUrl", defaultNotFoundString); - if (brokerUrl == defaultNotFoundString) { - LOG_ERROR("malformed json! - brokerUrl not present" << json); - return LookupDataResultPtr(); - } - - std::string brokerUrlTls = root.get("brokerUrlTls", defaultNotFoundString); - if (brokerUrlTls == defaultNotFoundString) { - brokerUrlTls = root.get("brokerUrlSsl", defaultNotFoundString); - if (brokerUrlTls == defaultNotFoundString) { - LOG_ERROR("malformed json! - brokerUrlTls not present" << json); - return LookupDataResultPtr(); - } - } - - LookupDataResultPtr lookupDataResultPtr = std::make_shared(); - lookupDataResultPtr->setBrokerUrl(brokerUrl); - lookupDataResultPtr->setBrokerUrlTls(brokerUrlTls); - - LOG_INFO("parseLookupData = " << *lookupDataResultPtr); - return lookupDataResultPtr; -} - -NamespaceTopicsPtr HTTPLookupService::parseNamespaceTopicsData(const std::string &json) { - LOG_DEBUG("GetNamespaceTopics json = " << json); - ptree::ptree root; - std::stringstream stream; - stream << json; - try { - ptree::read_json(stream, root); - } catch (ptree::json_parser_error &e) { - LOG_ERROR("Failed to parse json of Topics of Namespace: " << e.what() << "\nInput Json = " << json); - return NamespaceTopicsPtr(); - } - - // passed in json is like: ["topic1", "topic2"...] - // root will be an array of topics - std::set topicSet; - // get all topics - for (const auto &item : root) { - // remove partition part - const std::string topicName = item.second.get_value(); - int pos = topicName.find("-partition-"); - std::string filteredName = topicName.substr(0, pos); - - // filter duped topic name - if (topicSet.find(filteredName) == topicSet.end()) { - topicSet.insert(filteredName); - } - } - - NamespaceTopicsPtr topicsResultPtr = - std::make_shared>(topicSet.begin(), topicSet.end()); - - return topicsResultPtr; -} - -void HTTPLookupService::handleLookupHTTPRequest(LookupPromise promise, const std::string completeUrl, - RequestType requestType) { - std::string responseData; - Result result = sendHTTPRequest(completeUrl, responseData); - - if (result != ResultOk) { - promise.setFailed(result); - } else { - promise.setValue((requestType == PartitionMetaData) ? parsePartitionData(responseData) - : parseLookupData(responseData)); - } -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/HTTPLookupService.h b/pulsar-client-cpp/lib/HTTPLookupService.h deleted file mode 100644 index f401e879a5b3d..0000000000000 --- a/pulsar-client-cpp/lib/HTTPLookupService.h +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CPP_HTTPLOOKUPSERVICE_H -#define PULSAR_CPP_HTTPLOOKUPSERVICE_H - -#include -#include -#include -#include -#include - -namespace pulsar { -class HTTPLookupService : public LookupService, public std::enable_shared_from_this { - class CurlInitializer { - public: - CurlInitializer(); - ~CurlInitializer(); - }; - static CurlInitializer curlInitializer; - - enum RequestType - { - Lookup, - PartitionMetaData - }; - - typedef Promise LookupPromise; - - ExecutorServiceProviderPtr executorProvider_; - ServiceNameResolver& serviceNameResolver_; - AuthenticationPtr authenticationPtr_; - int lookupTimeoutInSeconds_; - std::string tlsTrustCertsFilePath_; - bool isUseTls_; - bool tlsAllowInsecure_; - bool tlsValidateHostname_; - - static LookupDataResultPtr parsePartitionData(const std::string&); - static LookupDataResultPtr parseLookupData(const std::string&); - static NamespaceTopicsPtr parseNamespaceTopicsData(const std::string&); - - void handleLookupHTTPRequest(LookupPromise, const std::string, RequestType); - void handleNamespaceTopicsHTTPRequest(NamespaceTopicsPromise promise, const std::string completeUrl); - - Result sendHTTPRequest(std::string completeUrl, std::string& responseData); - - public: - HTTPLookupService(ServiceNameResolver&, const ClientConfiguration&, const AuthenticationPtr&); - - LookupResultFuture getBroker(const TopicName& topicName) override; - - Future getPartitionMetadataAsync(const TopicNamePtr&) override; - - Future getTopicsOfNamespaceAsync(const NamespaceNamePtr& nsName) override; -}; -} // namespace pulsar - -#endif // PULSAR_CPP_HTTPLOOKUPSERVICE_H diff --git a/pulsar-client-cpp/lib/HandlerBase.cc b/pulsar-client-cpp/lib/HandlerBase.cc deleted file mode 100644 index 506207ea13289..0000000000000 --- a/pulsar-client-cpp/lib/HandlerBase.cc +++ /dev/null @@ -1,155 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "HandlerBase.h" -#include "TimeUtils.h" - -#include - -#include "LogUtils.h" - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -HandlerBase::HandlerBase(const ClientImplPtr& client, const std::string& topic, const Backoff& backoff) - : client_(client), - topic_(topic), - connection_(), - executor_(client->getIOExecutorProvider()->get()), - mutex_(), - creationTimestamp_(TimeUtils::now()), - operationTimeut_(seconds(client->conf().getOperationTimeoutSeconds())), - state_(NotStarted), - backoff_(backoff), - epoch_(0), - timer_(executor_->createDeadlineTimer()) {} - -HandlerBase::~HandlerBase() { timer_->cancel(); } - -void HandlerBase::start() { - // guard against concurrent state changes such as closing - State state = NotStarted; - if (state_.compare_exchange_strong(state, Pending)) { - grabCnx(); - } -} - -void HandlerBase::grabCnx() { - Lock lock(mutex_); - if (connection_.lock()) { - lock.unlock(); - LOG_INFO(getName() << "Ignoring reconnection request since we're already connected"); - return; - } - lock.unlock(); - LOG_INFO(getName() << "Getting connection from pool"); - ClientImplPtr client = client_.lock(); - Future future = client->getConnection(topic_); - future.addListener(std::bind(&HandlerBase::handleNewConnection, std::placeholders::_1, - std::placeholders::_2, get_weak_from_this())); -} - -void HandlerBase::handleNewConnection(Result result, ClientConnectionWeakPtr connection, - HandlerBaseWeakPtr weakHandler) { - HandlerBasePtr handler = weakHandler.lock(); - if (!handler) { - LOG_DEBUG("HandlerBase Weak reference is not valid anymore"); - return; - } - if (result == ResultOk) { - ClientConnectionPtr conn = connection.lock(); - if (conn) { - LOG_DEBUG(handler->getName() << "Connected to broker: " << conn->cnxString()); - handler->connectionOpened(conn); - return; - } - // TODO - look deeper into why the connection is null while the result is ResultOk - LOG_INFO(handler->getName() << "ClientConnectionPtr is no longer valid"); - } - handler->connectionFailed(result); - scheduleReconnection(handler); -} - -void HandlerBase::handleDisconnection(Result result, ClientConnectionWeakPtr connection, - HandlerBaseWeakPtr weakHandler) { - HandlerBasePtr handler = weakHandler.lock(); - if (!handler) { - LOG_DEBUG("HandlerBase Weak reference is not valid anymore"); - return; - } - - State state = handler->state_; - - ClientConnectionPtr currentConnection = handler->connection_.lock(); - if (currentConnection && connection.lock().get() != currentConnection.get()) { - LOG_WARN(handler->getName() - << "Ignoring connection closed since we are already attached to a newer connection"); - return; - } - - handler->connection_.reset(); - - if (result == ResultRetryable) { - scheduleReconnection(handler); - return; - } - - switch (state) { - case Pending: - case Ready: - scheduleReconnection(handler); - break; - - case NotStarted: - case Closing: - case Closed: - case Failed: - LOG_DEBUG(handler->getName() - << "Ignoring connection closed event since the handler is not used anymore"); - break; - } -} - -bool HandlerBase::isRetriableError(Result result) { return result == ResultRetryable; } - -void HandlerBase::scheduleReconnection(HandlerBasePtr handler) { - const auto state = handler->state_.load(); - if (state == Pending || state == Ready) { - TimeDuration delay = handler->backoff_.next(); - - LOG_INFO(handler->getName() << "Schedule reconnection in " << (delay.total_milliseconds() / 1000.0) - << " s"); - handler->timer_->expires_from_now(delay); - // passing shared_ptr here since time_ will get destroyed, so tasks will be cancelled - // so we will not run into the case where grabCnx is invoked on out of scope handler - handler->timer_->async_wait(std::bind(&HandlerBase::handleTimeout, std::placeholders::_1, handler)); - } -} - -void HandlerBase::handleTimeout(const boost::system::error_code& ec, HandlerBasePtr handler) { - if (ec) { - LOG_DEBUG(handler->getName() << "Ignoring timer cancelled event, code[" << ec << "]"); - return; - } else { - handler->epoch_++; - handler->grabCnx(); - } -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/HandlerBase.h b/pulsar-client-cpp/lib/HandlerBase.h deleted file mode 100644 index 1184746da21ba..0000000000000 --- a/pulsar-client-cpp/lib/HandlerBase.h +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _PULSAR_HANDLER_BASE_HEADER_ -#define _PULSAR_HANDLER_BASE_HEADER_ -#include "Backoff.h" -#include "ClientImpl.h" -#include "ClientConnection.h" -#include -#include -#include -#include - -namespace pulsar { - -using namespace boost::posix_time; -using boost::posix_time::milliseconds; -using boost::posix_time::seconds; - -class HandlerBase; -typedef std::weak_ptr HandlerBaseWeakPtr; -typedef std::shared_ptr HandlerBasePtr; - -class HandlerBase { - public: - HandlerBase(const ClientImplPtr&, const std::string&, const Backoff&); - - virtual ~HandlerBase(); - - void start(); - - /* - * get method for derived class to access weak ptr to connection so that they - * have to check if they can get a shared_ptr out of it or not - */ - ClientConnectionWeakPtr getCnx() const { return connection_; } - - protected: - /* - * tries reconnection and sets connection_ to valid object - */ - void grabCnx(); - - /* - * Schedule reconnection after backoff time - */ - static void scheduleReconnection(HandlerBasePtr handler); - - /* - * Should we retry in error that are transient - */ - bool isRetriableError(Result result); - /* - * connectionOpened will be implemented by derived class to receive notification - */ - - virtual void connectionOpened(const ClientConnectionPtr& connection) = 0; - - virtual void connectionFailed(Result result) = 0; - - virtual HandlerBaseWeakPtr get_weak_from_this() = 0; - - virtual const std::string& getName() const = 0; - - private: - static void handleNewConnection(Result result, ClientConnectionWeakPtr connection, HandlerBaseWeakPtr wp); - static void handleDisconnection(Result result, ClientConnectionWeakPtr connection, HandlerBaseWeakPtr wp); - - static void handleTimeout(const boost::system::error_code& ec, HandlerBasePtr handler); - - protected: - ClientImplWeakPtr client_; - const std::string topic_; - ClientConnectionWeakPtr connection_; - ExecutorServicePtr executor_; - mutable std::mutex mutex_; - std::mutex pendingReceiveMutex_; - ptime creationTimestamp_; - - const TimeDuration operationTimeut_; - typedef std::unique_lock Lock; - - enum State - { - NotStarted, - Pending, - Ready, - Closing, - Closed, - Failed - }; - - std::atomic state_; - Backoff backoff_; - uint64_t epoch_; - - private: - DeadlineTimerPtr timer_; - friend class ClientConnection; - friend class PulsarFriend; -}; -} // namespace pulsar -#endif //_PULSAR_HANDLER_BASE_HEADER_ diff --git a/pulsar-client-cpp/lib/Hash.h b/pulsar-client-cpp/lib/Hash.h deleted file mode 100644 index 367e66187de66..0000000000000 --- a/pulsar-client-cpp/lib/Hash.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef HASH_HPP_ -#define HASH_HPP_ - -#include -#include - -namespace pulsar { -class Hash { - public: - virtual ~Hash() {} - - /** - * Generate the hash of a given String - * - * @return The hash of {@param key}, which is non-negative integer. - */ - virtual int32_t makeHash(const std::string& key) = 0; -}; -} // namespace pulsar - -#endif /* HASH_HPP_ */ diff --git a/pulsar-client-cpp/lib/JavaStringHash.cc b/pulsar-client-cpp/lib/JavaStringHash.cc deleted file mode 100644 index bf809bf493f7c..0000000000000 --- a/pulsar-client-cpp/lib/JavaStringHash.cc +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "JavaStringHash.h" -#include - -namespace pulsar { - -JavaStringHash::JavaStringHash() {} - -int32_t JavaStringHash::makeHash(const std::string& key) { - uint64_t len = key.length(); - const char* val = key.c_str(); - uint32_t hash = 0; - - for (int i = 0; i < len; i++) { - hash = 31 * hash + val[i]; - } - - hash &= std::numeric_limits::max(); - - return hash; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/JavaStringHash.h b/pulsar-client-cpp/lib/JavaStringHash.h deleted file mode 100644 index 6059b1a09efe4..0000000000000 --- a/pulsar-client-cpp/lib/JavaStringHash.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef JAVA_DEFAULT_HASH_HPP_ -#define JAVA_DEFAULT_HASH_HPP_ - -#include -#include "Hash.h" - -#include -#include - -namespace pulsar { -class PULSAR_PUBLIC JavaStringHash : public Hash { - public: - JavaStringHash(); - int32_t makeHash(const std::string &key); -}; -} // namespace pulsar - -#endif /* JAVA_DEFAULT_HASH_HPP_ */ diff --git a/pulsar-client-cpp/lib/KeySharedPolicy.cc b/pulsar-client-cpp/lib/KeySharedPolicy.cc deleted file mode 100644 index e23a942ce1001..0000000000000 --- a/pulsar-client-cpp/lib/KeySharedPolicy.cc +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include -#include - -namespace pulsar { - -static const int DefaultHashRangeSize = 2 << 15; - -KeySharedPolicy::KeySharedPolicy() : impl_(std::make_shared()) {} - -KeySharedPolicy::~KeySharedPolicy() {} - -KeySharedPolicy::KeySharedPolicy(const KeySharedPolicy &x) : impl_(x.impl_) {} - -KeySharedPolicy &KeySharedPolicy::operator=(const KeySharedPolicy &x) { - impl_ = x.impl_; - return *this; -} - -KeySharedPolicy &KeySharedPolicy::setKeySharedMode(KeySharedMode keySharedMode) { - impl_->keySharedMode = keySharedMode; - return *this; -} - -KeySharedMode KeySharedPolicy::getKeySharedMode() const { return impl_->keySharedMode; } - -KeySharedPolicy &KeySharedPolicy::setAllowOutOfOrderDelivery(bool allowOutOfOrderDelivery) { - impl_->allowOutOfOrderDelivery = allowOutOfOrderDelivery; - return *this; -} - -bool KeySharedPolicy::isAllowOutOfOrderDelivery() const { return impl_->allowOutOfOrderDelivery; } - -KeySharedPolicy &KeySharedPolicy::setStickyRanges(std::initializer_list ranges) { - if (ranges.size() == 0) { - throw std::invalid_argument("Ranges for KeyShared policy must not be empty."); - } - for (StickyRange range : ranges) { - if (range.first < 0 || range.second >= DefaultHashRangeSize) { - throw std::invalid_argument("KeySharedPolicy Exception: Ranges must be [0, 65535]."); - } - for (StickyRange range2 : ranges) { - int start = std::max(range.first, range2.first); - int end = std::min(range.second, range2.second); - if (range != range2 && end >= start) { - throw std::invalid_argument("Ranges for KeyShared policy with overlap."); - } - } - for (StickyRange range : ranges) { - impl_->ranges.push_back(range); - } - } - return *this; -} - -StickyRanges KeySharedPolicy::getStickyRanges() const { return impl_->ranges; } - -KeySharedPolicy KeySharedPolicy::clone() const { - KeySharedPolicy newConf; - newConf.impl_.reset(new KeySharedPolicyImpl(*this->impl_)); - return newConf; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/KeySharedPolicyImpl.h b/pulsar-client-cpp/lib/KeySharedPolicyImpl.h deleted file mode 100644 index 51ad491cc8404..0000000000000 --- a/pulsar-client-cpp/lib/KeySharedPolicyImpl.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include - -namespace pulsar { - -struct KeySharedPolicyImpl { - bool allowOutOfOrderDelivery; - KeySharedMode keySharedMode; - StickyRanges ranges; - - KeySharedPolicyImpl() : allowOutOfOrderDelivery(false), keySharedMode(AUTO_SPLIT) {} -}; -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Latch.cc b/pulsar-client-cpp/lib/Latch.cc deleted file mode 100644 index c9cfab8624a46..0000000000000 --- a/pulsar-client-cpp/lib/Latch.cc +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "Latch.h" - -namespace pulsar { - -Latch::Latch(int count) : state_(std::make_shared()) { state_->count = count; } - -void Latch::countdown() { - Lock lock(state_->mutex); - - state_->count--; - - if (state_->count == 0) { - state_->condition.notify_all(); - } -} - -int Latch::getCount() { - Lock lock(state_->mutex); - - return state_->count; -} - -void Latch::wait() { - Lock lock(state_->mutex); - - state_->condition.wait(lock, CountIsZero(state_->count)); -} - -} /* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/Latch.h b/pulsar-client-cpp/lib/Latch.h deleted file mode 100644 index f5b711bcaf7a8..0000000000000 --- a/pulsar-client-cpp/lib/Latch.h +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_LATCH_H_ -#define LIB_LATCH_H_ - -#include -#include -#include -#include - -namespace pulsar { - -class PULSAR_PUBLIC Latch { - public: - Latch(int count); - - void countdown(); - - void wait(); - - template - bool wait(const Duration& timeout) { - Lock lock(state_->mutex); - return state_->condition.wait_for(lock, timeout, CountIsZero(state_->count)); - } - - int getCount(); - - private: - struct InternalState { - std::mutex mutex; - std::condition_variable condition; - int count; - }; - - struct CountIsZero { - const int& count_; - - CountIsZero(const int& count) : count_(count) {} - - bool operator()() const { return count_ == 0; } - }; - - typedef std::unique_lock Lock; - std::shared_ptr state_; -}; -typedef std::shared_ptr LatchPtr; -} /* namespace pulsar */ - -#endif /* LIB_LATCH_H_ */ diff --git a/pulsar-client-cpp/lib/Log4CxxLogger.h b/pulsar-client-cpp/lib/Log4CxxLogger.h deleted file mode 100644 index cd2fe9e7f647b..0000000000000 --- a/pulsar-client-cpp/lib/Log4CxxLogger.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include - -#ifdef USE_LOG4CXX - -namespace pulsar { - -class PULSAR_PUBLIC Log4CxxLoggerFactory : public LoggerFactory { - public: - static std::unique_ptr create(); - static std::unique_ptr create(const std::string& log4cxxConfFile); - - Logger* getLogger(const std::string& fileName); -}; - -} // namespace pulsar - -#endif diff --git a/pulsar-client-cpp/lib/Log4cxxLogger.cc b/pulsar-client-cpp/lib/Log4cxxLogger.cc deleted file mode 100644 index fdd0395da1df0..0000000000000 --- a/pulsar-client-cpp/lib/Log4cxxLogger.cc +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "Log4CxxLogger.h" -#include - -#ifdef USE_LOG4CXX - -#include -#include -#include -#include -#include - -using namespace log4cxx; - -namespace pulsar { - -class Log4CxxLogger : public Logger { - std::string _fileName; - LoggerPtr _logger; - - public: - Log4CxxLogger(const std::string &fileName) - : _fileName(fileName), _logger(log4cxx::Logger::getLogger(LOG_CATEGORY_NAME + fileName)) {} - - bool isEnabled(Level level) { return _logger->isEnabledFor(getLevel(level)); } - - void log(Level level, int line, const std::string &message) { - spi::LocationInfo location(_fileName.c_str(), "", line); - _logger->forcedLogLS(getLevel(level), message, location); - } - - private: - static log4cxx::LevelPtr getLevel(Level level) { - switch (level) { - case LEVEL_DEBUG: - return log4cxx::Level::getDebug(); - case LEVEL_INFO: - return log4cxx::Level::getInfo(); - case LEVEL_WARN: - return log4cxx::Level::getWarn(); - case LEVEL_ERROR: - return log4cxx::Level::getError(); - } - } -}; - -std::unique_ptr Log4CxxLoggerFactory::create() { - if (!LogManager::getLoggerRepository()->isConfigured()) { - LogManager::getLoggerRepository()->setConfigured(true); - LoggerPtr root = log4cxx::Logger::getRootLogger(); - static const LogString TTCC_CONVERSION_PATTERN( - LOG4CXX_STR("%d{yyyy-MM-dd HH:mm:ss,SSS Z} [%t] %-5p %l - %m%n")); - LayoutPtr layout(new PatternLayout(TTCC_CONVERSION_PATTERN)); - AppenderPtr appender(new ConsoleAppender(layout)); - root->setLevel(log4cxx::Level::getInfo()); - root->addAppender(appender); - } - - return std::unique_ptr(new Log4CxxLoggerFactory()); -} - -std::unique_ptr Log4CxxLoggerFactory::create(const std::string &log4cxxConfFile) { - try { - log4cxx::PropertyConfigurator::configure(log4cxxConfFile); - } catch (const std::exception &e) { - std::cerr << "exception caught while configuring log4cpp via '" << log4cxxConfFile - << "': " << e.what() << std::endl; - } catch (...) { - std::cerr << "unknown exception while configuring log4cpp via '" << log4cxxConfFile << "'." - << std::endl; - } - - return std::unique_ptr(new Log4CxxLoggerFactory()); -} - -Logger *Log4CxxLoggerFactory::getLogger(const std::string &fileName) { return new Log4CxxLogger(fileName); } -} // namespace pulsar - -#endif // USE_LOG4CXX diff --git a/pulsar-client-cpp/lib/LogUtils.cc b/pulsar-client-cpp/lib/LogUtils.cc deleted file mode 100644 index 31746087b924a..0000000000000 --- a/pulsar-client-cpp/lib/LogUtils.cc +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "LogUtils.h" - -#include -#include -#include - -#include "Log4CxxLogger.h" - -namespace pulsar { - -void LogUtils::init(const std::string& logfilePath) { - // If this is called explicitely, we fallback to Log4cxx config, if enabled - -#ifdef USE_LOG4CXX - if (!logfilePath.empty()) { - setLoggerFactory(Log4CxxLoggerFactory::create(logfilePath)); - } else { - setLoggerFactory(Log4CxxLoggerFactory::create()); - } -#endif // USE_LOG4CXX -} - -static std::atomic s_loggerFactory(nullptr); - -void LogUtils::setLoggerFactory(std::unique_ptr loggerFactory) { - LoggerFactory* oldFactory = nullptr; - LoggerFactory* newFactory = loggerFactory.release(); - if (!s_loggerFactory.compare_exchange_strong(oldFactory, newFactory)) { - delete newFactory; // there's already a factory set - } -} - -LoggerFactory* LogUtils::getLoggerFactory() { - if (s_loggerFactory.load() == nullptr) { - std::unique_ptr newFactory(new ConsoleLoggerFactory()); - setLoggerFactory(std::move(newFactory)); - } - return s_loggerFactory.load(); -} - -std::string LogUtils::getLoggerName(const std::string& path) { - // Remove all directories from filename - int startIdx = path.find_last_of("/"); - int endIdx = path.find_last_of("."); - return path.substr(startIdx + 1, endIdx - startIdx - 1); -} - -void LogUtils::resetLoggerFactory() { s_loggerFactory.exchange(nullptr, std::memory_order_release); } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/LogUtils.h b/pulsar-client-cpp/lib/LogUtils.h deleted file mode 100644 index 67ddf431c2e6f..0000000000000 --- a/pulsar-client-cpp/lib/LogUtils.h +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include -#include - -#include -#include - -namespace pulsar { - -#ifdef __GNUC__ -#define PULSAR_UNLIKELY(expr) __builtin_expect(expr, 0) -#else -#define PULSAR_UNLIKELY(expr) (expr) -#endif - -#define DECLARE_LOG_OBJECT() \ - static pulsar::Logger* logger() { \ - static thread_local std::unique_ptr threadSpecificLogPtr; \ - pulsar::Logger* ptr = threadSpecificLogPtr.get(); \ - if (PULSAR_UNLIKELY(!ptr)) { \ - std::string logger = pulsar::LogUtils::getLoggerName(__FILE__); \ - threadSpecificLogPtr.reset(pulsar::LogUtils::getLoggerFactory()->getLogger(logger)); \ - ptr = threadSpecificLogPtr.get(); \ - } \ - return ptr; \ - } - -#define LOG_DEBUG(message) \ - { \ - if (PULSAR_UNLIKELY(logger()->isEnabled(pulsar::Logger::LEVEL_DEBUG))) { \ - std::stringstream ss; \ - ss << message; \ - logger()->log(pulsar::Logger::LEVEL_DEBUG, __LINE__, ss.str()); \ - } \ - } - -#define LOG_INFO(message) \ - { \ - if (logger()->isEnabled(pulsar::Logger::LEVEL_INFO)) { \ - std::stringstream ss; \ - ss << message; \ - logger()->log(pulsar::Logger::LEVEL_INFO, __LINE__, ss.str()); \ - } \ - } - -#define LOG_WARN(message) \ - { \ - if (logger()->isEnabled(pulsar::Logger::LEVEL_WARN)) { \ - std::stringstream ss; \ - ss << message; \ - logger()->log(pulsar::Logger::LEVEL_WARN, __LINE__, ss.str()); \ - } \ - } - -#define LOG_ERROR(message) \ - { \ - if (logger()->isEnabled(pulsar::Logger::LEVEL_ERROR)) { \ - std::stringstream ss; \ - ss << message; \ - logger()->log(pulsar::Logger::LEVEL_ERROR, __LINE__, ss.str()); \ - } \ - } - -class PULSAR_PUBLIC LogUtils { - public: - static void init(const std::string& logConfFilePath); - - static void setLoggerFactory(std::unique_ptr loggerFactory); - - static void resetLoggerFactory(); - - static LoggerFactory* getLoggerFactory(); - - static std::string getLoggerName(const std::string& path); -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/LookupDataResult.h b/pulsar-client-cpp/lib/LookupDataResult.h deleted file mode 100644 index b48b854593a55..0000000000000 --- a/pulsar-client-cpp/lib/LookupDataResult.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _PULSAR_LOOKUP_DATA_RESULT_HEADER_ -#define _PULSAR_LOOKUP_DATA_RESULT_HEADER_ -#include -#include -#include - -#include -#include - -namespace pulsar { -class LookupDataResult; -typedef std::shared_ptr LookupDataResultPtr; -typedef Promise LookupDataResultPromise; -typedef std::shared_ptr LookupDataResultPromisePtr; - -class LookupDataResult { - public: - void setBrokerUrl(const std::string& brokerUrl) { brokerUrl_ = brokerUrl; } - void setBrokerUrlTls(const std::string& brokerUrlTls) { brokerUrlTls_ = brokerUrlTls; } - const std::string& getBrokerUrl() const { return brokerUrl_; } - const std::string& getBrokerUrlTls() const { return brokerUrlTls_; } - - bool isAuthoritative() const { return authoritative; } - - void setAuthoritative(bool authoritative) { this->authoritative = authoritative; } - - int getPartitions() const { return partitions; } - - void setPartitions(int partitions) { this->partitions = partitions; } - - bool isRedirect() const { return redirect; } - - void setRedirect(bool redirect) { this->redirect = redirect; } - - bool shouldProxyThroughServiceUrl() const { return proxyThroughServiceUrl_; } - - void setShouldProxyThroughServiceUrl(bool proxyThroughServiceUrl) { - proxyThroughServiceUrl_ = proxyThroughServiceUrl; - } - - private: - friend inline std::ostream& operator<<(std::ostream& os, const LookupDataResult& b); - std::string brokerUrl_; - std::string brokerUrlTls_; - int partitions; - bool authoritative; - bool redirect; - - bool proxyThroughServiceUrl_; -}; - -std::ostream& operator<<(std::ostream& os, const LookupDataResult& b) { - os << "{ LookupDataResult [brokerUrl_ = " << b.brokerUrl_ << "] [brokerUrlTls_ = " << b.brokerUrlTls_ - << "] [partitions = " << b.partitions << "] [authoritative = " << b.authoritative - << "] [redirect = " << b.redirect << "] proxyThroughServiceUrl = " << b.proxyThroughServiceUrl_ - << "] }"; - return os; -} -} // namespace pulsar - -#endif // _PULSAR_LOOKUP_DATA_RESULT_HEADER_ diff --git a/pulsar-client-cpp/lib/LookupService.h b/pulsar-client-cpp/lib/LookupService.h deleted file mode 100644 index 50f2d84f87659..0000000000000 --- a/pulsar-client-cpp/lib/LookupService.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CPP_LOOKUPSERVICE_H -#define PULSAR_CPP_LOOKUPSERVICE_H - -#include -#include -#include -#include -#include - -#include -#include - -namespace pulsar { -typedef std::shared_ptr> NamespaceTopicsPtr; -typedef Promise NamespaceTopicsPromise; -typedef std::shared_ptr> NamespaceTopicsPromisePtr; - -class LookupService { - public: - struct LookupResult { - std::string logicalAddress; - std::string physicalAddress; - - friend std::ostream& operator<<(std::ostream& os, const LookupResult& lookupResult) { - return os << "logical address: " << lookupResult.logicalAddress - << ", physical address: " << lookupResult.physicalAddress; - } - }; - using LookupResultFuture = Future; - using LookupResultPromise = Promise; - - /** - * Call broker lookup-api to get broker which serves namespace bundle that contains the given topic. - * - * @param topicName the topic name - * @return a pair of addresses, representing the logical and physical addresses of the broker that serves - * the topic - */ - virtual LookupResultFuture getBroker(const TopicName& topicName) = 0; - - /* - * @param topicName - pointer to topic name - * - * Gets Partition metadata - */ - virtual Future getPartitionMetadataAsync(const TopicNamePtr& topicName) = 0; - - /** - * @param namespace - namespace-name - * - * Returns all the topics name for a given namespace. - */ - virtual Future getTopicsOfNamespaceAsync(const NamespaceNamePtr& nsName) = 0; - - virtual ~LookupService() {} -}; - -typedef std::shared_ptr LookupServicePtr; - -} // namespace pulsar -#endif // PULSAR_CPP_LOOKUPSERVICE_H diff --git a/pulsar-client-cpp/lib/MapCache.h b/pulsar-client-cpp/lib/MapCache.h deleted file mode 100644 index b9a0069eaf44a..0000000000000 --- a/pulsar-client-cpp/lib/MapCache.h +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include -#include - -namespace pulsar { - -// A map cache that supports removing the first N oldest entries from the map. -// Value must be moveable and have the default constructor. -template -class MapCache { - std::unordered_map map_; - std::deque keys_; - - public: - using const_iterator = typename decltype(map_)::const_iterator; - using iterator = typename decltype(map_)::iterator; - - MapCache() = default; - // Here we don't use =default to be compatible with GCC 4.8 - MapCache(MapCache&& rhs) noexcept : map_(std::move(rhs.map_)), keys_(std::move(rhs.keys_)) {} - - size_t size() const noexcept { return map_.size(); } - - const_iterator find(const Key& key) const { return map_.find(key); } - iterator find(const Key& key) { return map_.find(key); } - - const_iterator end() const noexcept { return map_.end(); } - iterator end() noexcept { return map_.end(); } - - iterator putIfAbsent(const Key& key, Value&& value) { - auto it = map_.find(key); - if (it == map_.end()) { - keys_.push_back(key); - return map_.emplace(key, std::move(value)).first; - } else { - return end(); - } - } - - void removeOldestValues(size_t numToRemove, - const std::function& callback) { - for (size_t i = 0; !keys_.empty() && i < numToRemove; i++) { - const auto key = keys_.front(); - auto it = map_.find(key); - if (it != map_.end()) { - if (callback) { - callback(it->first, it->second); - } - map_.erase(it); - } - keys_.pop_front(); - } - } - - void remove(const Key& key) { - auto it = map_.find(key); - if (it != map_.end()) { - removeKeyFromKeys(key); - map_.erase(it); - } - } - - // Following methods are only used for tests - std::vector getKeys() const { - std::vector keys; - for (auto key : keys_) { - keys.emplace_back(key); - } - return keys; - } - - private: - void removeKeyFromKeys(const Key& key) { - for (auto it = keys_.begin(); it != keys_.end(); ++it) { - if (*it == key) { - keys_.erase(it); - break; - } - } - } -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MemoryLimitController.cc b/pulsar-client-cpp/lib/MemoryLimitController.cc deleted file mode 100644 index f55da8e68a4c5..0000000000000 --- a/pulsar-client-cpp/lib/MemoryLimitController.cc +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "MemoryLimitController.h" - -namespace pulsar { - -MemoryLimitController::MemoryLimitController(uint64_t memoryLimit) - : memoryLimit_(memoryLimit), currentUsage_(0), mutex_(), condition_() {} - -bool MemoryLimitController::tryReserveMemory(uint64_t size) { - // Avoid CAS operation when size is 0 - if (size == 0) { - return true; - } - while (true) { - uint64_t current = currentUsage_; - uint64_t newUsage = current + size; - - // We allow one request to go over the limit, to make the notification - // path simpler and more efficient - if (current > memoryLimit_ && memoryLimit_ > 0) { - return false; - } - - if (currentUsage_.compare_exchange_strong(current, newUsage)) { - return true; - } - } -} - -bool MemoryLimitController::reserveMemory(uint64_t size) { - if (!tryReserveMemory(size)) { - std::unique_lock lock(mutex_); - - // Check again, while holding the lock, to ensure we reserve attempt and the waiting for the condition - // are synchronized. - while (!tryReserveMemory(size)) { - if (isClosed_) { - // Interrupt the waiting if the client is closing - return false; - } - - condition_.wait(lock); - } - } - - return true; -} - -void MemoryLimitController::releaseMemory(uint64_t size) { - uint64_t oldUsage = currentUsage_.fetch_sub(size); - uint64_t newUsage = oldUsage - size; - - if (newUsage + size > memoryLimit_ && newUsage <= memoryLimit_) { - // We just crossed the limit. Now we have more space - std::lock_guard lock(mutex_); - condition_.notify_all(); - } -} - -uint64_t MemoryLimitController::currentUsage() const { return currentUsage_; } - -void MemoryLimitController::close() { - std::unique_lock lock(mutex_); - isClosed_ = true; - condition_.notify_all(); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MemoryLimitController.h b/pulsar-client-cpp/lib/MemoryLimitController.h deleted file mode 100644 index 38987ea0b68a0..0000000000000 --- a/pulsar-client-cpp/lib/MemoryLimitController.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include -#include -#include - -namespace pulsar { - -class MemoryLimitController { - public: - explicit MemoryLimitController(uint64_t memoryLimit); - bool tryReserveMemory(uint64_t size); - bool reserveMemory(uint64_t size); - void releaseMemory(uint64_t size); - uint64_t currentUsage() const; - - void close(); - - private: - const uint64_t memoryLimit_; - std::atomic currentUsage_; - std::mutex mutex_; - std::condition_variable condition_; - bool isClosed_ = false; -}; - -} // namespace pulsar \ No newline at end of file diff --git a/pulsar-client-cpp/lib/Message.cc b/pulsar-client-cpp/lib/Message.cc deleted file mode 100644 index b928945cfae21..0000000000000 --- a/pulsar-client-cpp/lib/Message.cc +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include - -#include "PulsarApi.pb.h" - -#include "MessageImpl.h" -#include "SharedBuffer.h" - -#include - -using namespace pulsar; - -namespace pulsar { - -const static std::string emptyString; -const static MessageId invalidMessageId; - -const Message::StringMap& Message::getProperties() const { return impl_->properties(); } - -bool Message::hasProperty(const std::string& name) const { - const StringMap& m = impl_->properties(); - return m.find(name) != m.end(); -} - -const std::string& Message::getProperty(const std::string& name) const { - if (hasProperty(name)) { - const StringMap& m = impl_->properties(); - return m.at(name); - } else { - return emptyString; - } -} - -const void* Message::getData() const { return impl_->payload.data(); } - -std::size_t Message::getLength() const { return impl_->payload.readableBytes(); } - -std::string Message::getDataAsString() const { return std::string((const char*)getData(), getLength()); } - -Message::Message() : impl_() {} - -Message::Message(MessageImplPtr& impl) : impl_(impl) {} - -Message::Message(const proto::CommandMessage& msg, proto::MessageMetadata& metadata, SharedBuffer& payload, - int32_t partition) - : impl_(std::make_shared()) { - impl_->messageId = - MessageId(partition, msg.message_id().ledgerid(), msg.message_id().entryid(), /* batchId */ - -1); - impl_->metadata = metadata; - impl_->payload = payload; -} - -Message::Message(const MessageId& messageID, proto::MessageMetadata& metadata, SharedBuffer& payload, - proto::SingleMessageMetadata& singleMetadata, const std::string& topicName) - : impl_(std::make_shared()) { - impl_->messageId = messageID; - impl_->metadata = metadata; - impl_->payload = payload; - impl_->metadata.mutable_properties()->CopyFrom(singleMetadata.properties()); - impl_->topicName_ = &topicName; - - impl_->metadata.clear_properties(); - if (singleMetadata.properties_size() > 0) { - impl_->metadata.mutable_properties()->Reserve(singleMetadata.properties_size()); - for (int i = 0; i < singleMetadata.properties_size(); i++) { - auto keyValue = proto::KeyValue().New(); - *keyValue = singleMetadata.properties(i); - impl_->metadata.mutable_properties()->AddAllocated(keyValue); - } - } - - if (singleMetadata.has_partition_key()) { - impl_->metadata.set_partition_key(singleMetadata.partition_key()); - } else { - impl_->metadata.clear_partition_key(); - } - - if (singleMetadata.has_ordering_key()) { - impl_->metadata.set_ordering_key(singleMetadata.ordering_key()); - } else { - impl_->metadata.clear_ordering_key(); - } - - if (singleMetadata.has_event_time()) { - impl_->metadata.set_event_time(singleMetadata.event_time()); - } else { - impl_->metadata.clear_event_time(); - } - - if (singleMetadata.has_sequence_id()) { - impl_->metadata.set_sequence_id(singleMetadata.sequence_id()); - } else { - impl_->metadata.clear_sequence_id(); - } -} - -const MessageId& Message::getMessageId() const { - if (!impl_) { - return invalidMessageId; - } else { - return impl_->messageId; - } -} - -void Message::setMessageId(const MessageId& messageID) const { - if (impl_) { - impl_->messageId = messageID; - } - return; -} - -bool Message::hasPartitionKey() const { - if (impl_) { - return impl_->hasPartitionKey(); - } - return false; -} - -const std::string& Message::getPartitionKey() const { - if (!impl_) { - return emptyString; - } - return impl_->getPartitionKey(); -} - -bool Message::hasOrderingKey() const { - if (impl_) { - return impl_->hasOrderingKey(); - } - return false; -} - -const std::string& Message::getOrderingKey() const { - if (!impl_) { - return emptyString; - } - return impl_->getOrderingKey(); -} - -const std::string& Message::getTopicName() const { - if (!impl_) { - return emptyString; - } - return impl_->getTopicName(); -} - -const int Message::getRedeliveryCount() const { - if (!impl_) { - return 0; - } - return impl_->getRedeliveryCount(); -} - -bool Message::hasSchemaVersion() const { - if (impl_) { - return impl_->hasSchemaVersion(); - } - return false; -} - -const std::string& Message::getSchemaVersion() const { - if (!impl_) { - return emptyString; - } - return impl_->getSchemaVersion(); -} - -uint64_t Message::getPublishTimestamp() const { return impl_ ? impl_->getPublishTimestamp() : 0ull; } - -uint64_t Message::getEventTimestamp() const { return impl_ ? impl_->getEventTimestamp() : 0ull; } - -bool Message::operator==(const Message& msg) const { return getMessageId() == msg.getMessageId(); } - -PULSAR_PUBLIC std::ostream& operator<<(std::ostream& s, const Message::StringMap& map) { - // Output at most 10 elements -- appropriate if used for logging. - s << '{'; - - Message::StringMap::const_iterator begin = map.begin(); - Message::StringMap::const_iterator end = map.end(); - for (int i = 0; begin != end && i < 10; ++i, ++begin) { - if (i > 0) { - s << ", "; - } - - s << "'" << begin->first << "':'" << begin->second << "'"; - } - - if (begin != end) { - s << " ..."; - } - - s << '}'; - return s; -} - -PULSAR_PUBLIC std::ostream& operator<<(std::ostream& s, const Message& msg) { - assert(msg.impl_.get()); - assert(msg.impl_->metadata.has_sequence_id()); - assert(msg.impl_->metadata.has_publish_time()); - s << "Message(prod=" << msg.impl_->metadata.producer_name() - << ", seq=" << msg.impl_->metadata.sequence_id() - << ", publish_time=" << msg.impl_->metadata.publish_time() << ", payload_size=" << msg.getLength() - << ", msg_id=" << msg.getMessageId() << ", props=" << msg.getProperties() << ')'; - return s; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MessageAndCallbackBatch.cc b/pulsar-client-cpp/lib/MessageAndCallbackBatch.cc deleted file mode 100644 index 3e229b0fbb8dc..0000000000000 --- a/pulsar-client-cpp/lib/MessageAndCallbackBatch.cc +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "MessageAndCallbackBatch.h" -#include "ClientConnection.h" -#include "Commands.h" -#include "LogUtils.h" -#include "MessageImpl.h" - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -void MessageAndCallbackBatch::add(const Message& msg, const SendCallback& callback) { - if (empty()) { - msgImpl_.reset(new MessageImpl); - Commands::initBatchMessageMetadata(msg, msgImpl_->metadata); - } - LOG_DEBUG(" Before serialization payload size in bytes = " << msgImpl_->payload.readableBytes()); - sequenceId_ = Commands::serializeSingleMessageInBatchWithPayload(msg, msgImpl_->payload, - ClientConnection::getMaxMessageSize()); - LOG_DEBUG(" After serialization payload size in bytes = " << msgImpl_->payload.readableBytes()); - callbacks_.emplace_back(callback); - - ++messagesCount_; - messagesSize_ += msg.getLength(); -} - -void MessageAndCallbackBatch::clear() { - msgImpl_.reset(); - callbacks_.clear(); - messagesCount_ = 0; - messagesSize_ = 0; -} - -static void completeSendCallbacks(const std::vector& callbacks, Result result, - const MessageId& id) { - int32_t numOfMessages = static_cast(callbacks.size()); - LOG_DEBUG("Batch complete [Result = " << result << "] [numOfMessages = " << numOfMessages << "]"); - for (int32_t i = 0; i < numOfMessages; i++) { - MessageId idInBatch(id.partition(), id.ledgerId(), id.entryId(), i); - callbacks[i](result, idInBatch); - } -} - -void MessageAndCallbackBatch::complete(Result result, const MessageId& id) const { - completeSendCallbacks(callbacks_, result, id); -} - -SendCallback MessageAndCallbackBatch::createSendCallback() const { - const auto& callbacks = callbacks_; - return [callbacks] // save a copy of `callbacks_` - (Result result, const MessageId& id) { completeSendCallbacks(callbacks, result, id); }; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MessageAndCallbackBatch.h b/pulsar-client-cpp/lib/MessageAndCallbackBatch.h deleted file mode 100644 index 38c0d12e2f768..0000000000000 --- a/pulsar-client-cpp/lib/MessageAndCallbackBatch.h +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_MESSAGEANDCALLBACK_BATCH_H_ -#define LIB_MESSAGEANDCALLBACK_BATCH_H_ - -#include -#include - -#include -#include - -#include - -namespace pulsar { - -class MessageImpl; -using MessageImplPtr = std::shared_ptr; - -class MessageAndCallbackBatch : public boost::noncopyable { - public: - // Wrapper methods of STL container - bool empty() const noexcept { return callbacks_.empty(); } - size_t size() const noexcept { return callbacks_.size(); } - - /** - * Add a message and the associated send callback to the batch - * - * @param message - * @callback the associated send callback - */ - void add(const Message& msg, const SendCallback& callback); - - /** - * Clear the internal stats - */ - void clear(); - - /** - * Complete all the callbacks with given parameters - * - * @param result this batch's send result - * @param id this batch's message id - */ - void complete(Result result, const MessageId& id) const; - - /** - * Create a single callback to trigger all the internal callbacks in order - * It's used when you want to clear and add new messages and callbacks but current callbacks need to be - * triggered later. - * - * @return the merged send callback - */ - SendCallback createSendCallback() const; - - const MessageImplPtr& msgImpl() const { return msgImpl_; } - uint64_t sequenceId() const noexcept { return sequenceId_; } - - uint32_t messagesCount() const { return messagesCount_; } - uint64_t messagesSize() const { return messagesSize_; } - - private: - MessageImplPtr msgImpl_; - std::vector callbacks_; - std::atomic sequenceId_{static_cast(-1L)}; - - uint32_t messagesCount_{0}; - uint64_t messagesSize_{0ull}; -}; - -} // namespace pulsar - -#endif diff --git a/pulsar-client-cpp/lib/MessageBatch.cc b/pulsar-client-cpp/lib/MessageBatch.cc deleted file mode 100644 index 12144ff51f6b3..0000000000000 --- a/pulsar-client-cpp/lib/MessageBatch.cc +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include "Commands.h" -#include "MessageImpl.h" -#include "SharedBuffer.h" - -namespace pulsar { - -const static std::string emptyString; - -MessageBatch::MessageBatch() : impl_(std::make_shared()), batchMessage_(impl_) { - impl_->setTopicName(emptyString); -} - -MessageBatch& MessageBatch::withMessageId(const MessageId& messageId) { - impl_->messageId = messageId; - return *this; -} - -MessageBatch& MessageBatch::parseFrom(const std::string& payload, uint32_t batchSize) { - const SharedBuffer& payloadBuffer = - SharedBuffer::copy((char*)payload.data(), static_cast(payload.size())); - return parseFrom(payloadBuffer, batchSize); -} - -MessageBatch& MessageBatch::parseFrom(const SharedBuffer& payload, uint32_t batchSize) { - impl_->payload = payload; - impl_->metadata.set_num_messages_in_batch(batchSize); - batch_.clear(); - - for (int i = 0; i < batchSize; ++i) { - batch_.push_back(Commands::deSerializeSingleMessageInBatch(batchMessage_, i)); - } - return *this; -} - -const std::vector& MessageBatch::messages() { return batch_; } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MessageBuilder.cc b/pulsar-client-cpp/lib/MessageBuilder.cc deleted file mode 100644 index 977331b9a4f9a..0000000000000 --- a/pulsar-client-cpp/lib/MessageBuilder.cc +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include -#include -#include -#include - -#include "LogUtils.h" -#include "MessageImpl.h" -#include "PulsarApi.pb.h" -#include "SharedBuffer.h" - -DECLARE_LOG_OBJECT() - -#include "ObjectPool.h" -#include "TimeUtils.h" - -using namespace pulsar; - -namespace pulsar { - -ObjectPool messagePool; - -std::shared_ptr MessageBuilder::createMessageImpl() { return messagePool.create(); } - -MessageBuilder::MessageBuilder() { impl_ = createMessageImpl(); } - -MessageBuilder& MessageBuilder::create() { - impl_ = createMessageImpl(); - return *this; -} - -Message MessageBuilder::build() { return Message(impl_); } - -void MessageBuilder::checkMetadata() { - if (!impl_.get()) { - LOG_ERROR("Cannot reuse the same message builder to build a message"); - abort(); - } -} - -MessageBuilder& MessageBuilder::setContent(const void* data, size_t size) { - checkMetadata(); - impl_->payload = SharedBuffer::copy((char*)data, size); - return *this; -} - -MessageBuilder& MessageBuilder::setAllocatedContent(void* data, size_t size) { - checkMetadata(); - impl_->payload = SharedBuffer::wrap((char*)data, size); - return *this; -} - -MessageBuilder& MessageBuilder::setContent(const std::string& data) { - checkMetadata(); - impl_->payload = SharedBuffer::copy((char*)data.c_str(), data.length()); - return *this; -} - -MessageBuilder& MessageBuilder::setContent(std::string&& data) { - checkMetadata(); - impl_->payload = SharedBuffer::take(std::move(data)); - return *this; -} - -MessageBuilder& MessageBuilder::setProperty(const std::string& name, const std::string& value) { - checkMetadata(); - proto::KeyValue* keyValue = proto::KeyValue().New(); - keyValue->set_key(name); - keyValue->set_value(value); - impl_->metadata.mutable_properties()->AddAllocated(keyValue); - return *this; -} - -MessageBuilder& MessageBuilder::setProperties(const StringMap& properties) { - checkMetadata(); - for (StringMap::const_iterator it = properties.begin(); it != properties.end(); it++) { - setProperty(it->first, it->second); - } - return *this; -} - -MessageBuilder& MessageBuilder::setPartitionKey(const std::string& partitionKey) { - checkMetadata(); - impl_->metadata.set_partition_key(partitionKey); - return *this; -} - -MessageBuilder& MessageBuilder::setOrderingKey(const std::string& orderingKey) { - checkMetadata(); - impl_->metadata.set_ordering_key(orderingKey); - return *this; -} - -MessageBuilder& MessageBuilder::setEventTimestamp(uint64_t eventTimestamp) { - checkMetadata(); - impl_->metadata.set_event_time(eventTimestamp); - return *this; -} - -MessageBuilder& MessageBuilder::setSequenceId(int64_t sequenceId) { - if (sequenceId < 0) { - throw std::invalid_argument("sequenceId needs to be >= 0"); - } - checkMetadata(); - impl_->metadata.set_sequence_id(sequenceId); - return *this; -} - -MessageBuilder& MessageBuilder::setDeliverAfter(std::chrono::milliseconds delay) { - return setDeliverAt(TimeUtils::currentTimeMillis() + delay.count()); -} - -MessageBuilder& MessageBuilder::setDeliverAt(uint64_t deliveryTimestamp) { - checkMetadata(); - impl_->metadata.set_deliver_at_time(deliveryTimestamp); - return *this; -} - -MessageBuilder& MessageBuilder::setReplicationClusters(const std::vector& clusters) { - checkMetadata(); - google::protobuf::RepeatedPtrField r(clusters.begin(), clusters.end()); - r.Swap(impl_->metadata.mutable_replicate_to()); - return *this; -} - -MessageBuilder& MessageBuilder::disableReplication(bool flag) { - checkMetadata(); - google::protobuf::RepeatedPtrField r; - if (flag) { - r.AddAllocated(new std::string("__local__")); - } - r.Swap(impl_->metadata.mutable_replicate_to()); - return *this; -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MessageCrypto.cc b/pulsar-client-cpp/lib/MessageCrypto.cc deleted file mode 100644 index 8798dbf89fad9..0000000000000 --- a/pulsar-client-cpp/lib/MessageCrypto.cc +++ /dev/null @@ -1,518 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "LogUtils.h" -#include "MessageCrypto.h" - -namespace pulsar { - -DECLARE_LOG_OBJECT() - -MessageCrypto::MessageCrypto(std::string& logCtx, bool keyGenNeeded) - : dataKeyLen_(32), - dataKey_(new unsigned char[dataKeyLen_]), - tagLen_(16), - ivLen_(12), - iv_(new unsigned char[ivLen_]), - logCtx_(logCtx) { - SSL_library_init(); - SSL_load_error_strings(); - - if (!keyGenNeeded) { - mdCtx_ = EVP_MD_CTX_create(); - EVP_MD_CTX_init(mdCtx_); - return; - } - - RAND_bytes(dataKey_.get(), dataKeyLen_); - RAND_bytes(iv_.get(), ivLen_); -} - -MessageCrypto::~MessageCrypto() {} - -RSA* MessageCrypto::loadPublicKey(std::string& pubKeyStr) { - BIO* pubBio = NULL; - RSA* rsaPub = NULL; - - pubBio = BIO_new_mem_buf((char*)pubKeyStr.c_str(), -1); - if (pubBio == NULL) { - LOG_ERROR(logCtx_ << " Failed to get memory for public key"); - return rsaPub; - } - - rsaPub = PEM_read_bio_RSA_PUBKEY(pubBio, NULL, NULL, NULL); - if (rsaPub == NULL) { - LOG_ERROR(logCtx_ << " Failed to load public key"); - } - - BIO_free(pubBio); - return rsaPub; -} - -RSA* MessageCrypto::loadPrivateKey(std::string& privateKeyStr) { - BIO* privBio = NULL; - RSA* rsaPriv = NULL; - - privBio = BIO_new_mem_buf((char*)privateKeyStr.c_str(), -1); - if (privBio == NULL) { - LOG_ERROR(logCtx_ << " Failed to get memory for private key"); - return rsaPriv; - } - - rsaPriv = PEM_read_bio_RSAPrivateKey(privBio, NULL, NULL, NULL); - if (rsaPriv == NULL) { - LOG_ERROR(logCtx_ << " Failed to load private key"); - } - - BIO_free(privBio); - return rsaPriv; -} - -bool MessageCrypto::getDigest(const std::string& keyName, const void* input, unsigned int inputLen, - unsigned char keyDigest[], unsigned int& digestLen) { - if (EVP_DigestInit_ex(mdCtx_, EVP_md5(), NULL) != 1) { - LOG_ERROR(logCtx_ << "Failed to initialize md5 digest for key " << keyName); - return false; - } - - digestLen = 0; - if (EVP_DigestUpdate(mdCtx_, input, inputLen) != 1) { - LOG_ERROR(logCtx_ << "Failed to get md5 hash for data key " << keyName); - return false; - } - - if (EVP_DigestFinal_ex(mdCtx_, keyDigest, &digestLen) != 1) { - LOG_ERROR(logCtx_ << "Failed to finalize md hash for data key " << keyName); - return false; - } - - return true; -} - -void MessageCrypto::removeExpiredDataKey() { - boost::posix_time::ptime now = boost::posix_time::second_clock::universal_time(); - boost::posix_time::time_duration expireTime = boost::posix_time::hours(4); - - auto dataKeyCacheIter = dataKeyCache_.begin(); - while (dataKeyCacheIter != dataKeyCache_.end()) { - const auto dataKeyEntry = dataKeyCacheIter->second; - - if ((now - dataKeyEntry.second) > expireTime) { - dataKeyCache_.erase(dataKeyCacheIter++); - } else { - dataKeyCacheIter++; - } - } -} - -std::string MessageCrypto::stringToHex(const char* inputStr, size_t len) { - static const char* hexVals = "0123456789ABCDEF"; - - std::string outHex; - outHex.reserve(2 * len + 2); - outHex.push_back('0'); - outHex.push_back('x'); - for (size_t i = 0; i < len; ++i) { - const unsigned char c = *(inputStr + i); - outHex.push_back(hexVals[c >> 4]); - outHex.push_back(hexVals[c & 15]); - } - return outHex; -} - -std::string MessageCrypto::stringToHex(const std::string& inputStr, size_t len) { - return stringToHex(inputStr.c_str(), len); -} - -Result MessageCrypto::addPublicKeyCipher(const std::set& keyNames, - const CryptoKeyReaderPtr keyReader) { - Lock lock(mutex_); - - // Generate data key - RAND_bytes(dataKey_.get(), dataKeyLen_); - if (PULSAR_UNLIKELY(logger()->isEnabled(Logger::LEVEL_DEBUG))) { - std::string dataKeyStr(reinterpret_cast(dataKey_.get()), dataKeyLen_); - std::string strHex = stringToHex(dataKeyStr, dataKeyStr.size()); - LOG_DEBUG(logCtx_ << "Generated Data key " << strHex); - } - - Result result = ResultOk; - for (auto it = keyNames.begin(); it != keyNames.end(); it++) { - result = addPublicKeyCipher(*it, keyReader); - if (result != ResultOk) { - return result; - } - } - return result; -} - -Result MessageCrypto::addPublicKeyCipher(const std::string& keyName, const CryptoKeyReaderPtr keyReader) { - if (keyName.empty()) { - LOG_ERROR(logCtx_ << "Keyname is empty "); - return ResultCryptoError; - } - - // Read the public key and its info using callback - StringMap keyMeta; - EncryptionKeyInfo keyInfo; - Result result = keyReader->getPublicKey(keyName, keyMeta, keyInfo); - if (result != ResultOk) { - LOG_ERROR(logCtx_ << "Failed to get public key from KeyReader for key " << keyName); - return result; - } - - RSA* pubKey = loadPublicKey(keyInfo.getKey()); - if (pubKey == NULL) { - LOG_ERROR(logCtx_ << "Failed to load public key " << keyName); - return ResultCryptoError; - } - LOG_DEBUG(logCtx_ << " Public key " << keyName << " loaded successfully."); - - int inSize = RSA_size(pubKey); - boost::scoped_array encryptedKey(new unsigned char[inSize]); - - int outSize = - RSA_public_encrypt(dataKeyLen_, dataKey_.get(), encryptedKey.get(), pubKey, RSA_PKCS1_OAEP_PADDING); - - if (inSize != outSize) { - LOG_ERROR(logCtx_ << "Ciphertext is length not matching input key length for key " << keyName); - return ResultCryptoError; - } - std::string encryptedKeyStr(reinterpret_cast(encryptedKey.get()), inSize); - std::shared_ptr eki(new EncryptionKeyInfo()); - eki->setKey(encryptedKeyStr); - eki->setMetadata(keyInfo.getMetadata()); - - // Add a new entry or replace existing entry, if one is present. - encryptedDataKeyMap_[keyName] = eki; - - if (PULSAR_UNLIKELY(logger()->isEnabled(Logger::LEVEL_DEBUG))) { - std::string strHex = stringToHex(encryptedKeyStr, encryptedKeyStr.size()); - LOG_DEBUG(logCtx_ << " Data key encrypted for key " << keyName - << ". Encrypted key size = " << encryptedKeyStr.size() << ", value = " << strHex); - } - return ResultOk; -} - -bool MessageCrypto::removeKeyCipher(const std::string& keyName) { - if (!keyName.size()) { - return false; - } - encryptedDataKeyMap_.erase(keyName); - return true; -} - -bool MessageCrypto::encrypt(const std::set& encKeys, const CryptoKeyReaderPtr keyReader, - proto::MessageMetadata& msgMetadata, SharedBuffer& payload, - SharedBuffer& encryptedPayload) { - if (!encKeys.size()) { - return false; - } - - Lock lock(mutex_); - - // Update message metadata with encrypted data key - for (auto it = encKeys.begin(); it != encKeys.end(); it++) { - const std::string& keyName = *it; - auto keyInfoIter = encryptedDataKeyMap_.find(keyName); - - if (keyInfoIter == encryptedDataKeyMap_.end()) { - // Attempt to load the key. This will allow us to load keys as soon as - // a new key is added to producer config - Result result = addPublicKeyCipher(keyName, keyReader); - if (result != ResultOk) { - return false; - } - - keyInfoIter = encryptedDataKeyMap_.find(keyName); - - if (keyInfoIter == encryptedDataKeyMap_.end()) { - LOG_ERROR(logCtx_ << "Unable to find encrypted data key for " << keyName); - return false; - } - } - EncryptionKeyInfo* keyInfo = keyInfoIter->second.get(); - - proto::EncryptionKeys* encKeys = proto::EncryptionKeys().New(); - encKeys->set_key(keyName); - encKeys->set_value(keyInfo->getKey()); - if (PULSAR_UNLIKELY(logger()->isEnabled(Logger::LEVEL_DEBUG))) { - std::string strHex = stringToHex(keyInfo->getKey(), keyInfo->getKey().size()); - LOG_DEBUG(logCtx_ << " Encrypted data key added for key " << keyName << ". Encrypted key size = " - << keyInfo->getKey().size() << ", value = " << strHex); - } - - if (keyInfo->getMetadata().size()) { - for (auto metaIter = keyInfo->getMetadata().begin(); metaIter != keyInfo->getMetadata().end(); - metaIter++) { - proto::KeyValue* keyValue = proto::KeyValue().New(); - keyValue->set_key(metaIter->first); - keyValue->set_value(metaIter->second); - encKeys->mutable_metadata()->AddAllocated(keyValue); - LOG_DEBUG(logCtx_ << " Adding metadata for key " << keyName << ". Metadata key = " - << metaIter->first << ", value =" << metaIter->second); - } - } - - msgMetadata.mutable_encryption_keys()->AddAllocated(encKeys); - } - - // TODO: Replace random with counter and periodic refreshing based on timer/counter value - RAND_bytes(iv_.get(), ivLen_); - msgMetadata.set_encryption_param(reinterpret_cast(iv_.get()), ivLen_); - - EVP_CIPHER_CTX* cipherCtx = NULL; - encryptedPayload = SharedBuffer::allocate(payload.readableBytes() + EVP_MAX_BLOCK_LENGTH + tagLen_); - int encLen = 0; - - if (!(cipherCtx = EVP_CIPHER_CTX_new())) { - LOG_ERROR(logCtx_ << " Failed to cipher ctx."); - return false; - } - - if (EVP_EncryptInit_ex(cipherCtx, EVP_aes_256_gcm(), NULL, dataKey_.get(), iv_.get()) != 1) { - LOG_ERROR(logCtx_ << " Failed to init cipher ctx."); - EVP_CIPHER_CTX_free(cipherCtx); - return false; - } - - if (EVP_CIPHER_CTX_set_padding(cipherCtx, EVP_CIPH_NO_PADDING) != 1) { - LOG_ERROR(logCtx_ << " Failed to set cipher padding."); - EVP_CIPHER_CTX_free(cipherCtx); - return false; - } - - if (EVP_EncryptUpdate(cipherCtx, reinterpret_cast(encryptedPayload.mutableData()), - &encLen, reinterpret_cast(payload.data()), - payload.readableBytes()) != 1) { - LOG_ERROR(logCtx_ << " Failed to encrypt payload."); - EVP_CIPHER_CTX_free(cipherCtx); - return false; - } - encryptedPayload.bytesWritten(encLen); - encLen = 0; - - if (EVP_EncryptFinal_ex(cipherCtx, reinterpret_cast(encryptedPayload.mutableData()), - &encLen) != 1) { - LOG_ERROR(logCtx_ << " Failed to finalize encryption."); - EVP_CIPHER_CTX_free(cipherCtx); - return false; - } - encryptedPayload.bytesWritten(encLen); - - if (EVP_CIPHER_CTX_ctrl(cipherCtx, EVP_CTRL_GCM_GET_TAG, tagLen_, encryptedPayload.mutableData()) != 1) { - LOG_ERROR(logCtx_ << " Failed to get cipher tag info."); - EVP_CIPHER_CTX_free(cipherCtx); - return false; - } - encryptedPayload.bytesWritten(tagLen_); - if (PULSAR_UNLIKELY(logger()->isEnabled(Logger::LEVEL_DEBUG))) { - std::string strPayloadHex = stringToHex(payload.data(), payload.readableBytes()); - std::string strHex = stringToHex(encryptedPayload.data(), encryptedPayload.readableBytes()); - LOG_DEBUG(logCtx_ << " Original size = " << payload.readableBytes() << ", value = " << strPayloadHex - << ". Encrypted size " << encryptedPayload.readableBytes() - << ", value =" << strHex); - } - - EVP_CIPHER_CTX_free(cipherCtx); - - return true; -} - -bool MessageCrypto::decryptDataKey(const std::string& keyName, const std::string& encryptedDataKey, - const google::protobuf::RepeatedPtrField& encKeyMeta, - const CryptoKeyReaderPtr keyReader) { - StringMap keyMeta; - for (auto iter = encKeyMeta.begin(); iter != encKeyMeta.end(); iter++) { - keyMeta[iter->key()] = iter->value(); - } - - // Read the private key info using callback - EncryptionKeyInfo keyInfo; - keyReader->getPrivateKey(keyName, keyMeta, keyInfo); - - // Convert key from string to RSA key - RSA* privKey = loadPrivateKey(keyInfo.getKey()); - if (privKey == NULL) { - LOG_ERROR(logCtx_ << " Failed to load private key " << keyName); - return false; - } - LOG_DEBUG(logCtx_ << " Private key " << keyName << " loaded successfully."); - - // Decrypt data key - int outSize = RSA_private_decrypt(encryptedDataKey.size(), - reinterpret_cast(encryptedDataKey.c_str()), - dataKey_.get(), privKey, RSA_PKCS1_OAEP_PADDING); - - if (outSize == -1) { - LOG_ERROR(logCtx_ << "Failed to decrypt AES key for " << keyName); - return false; - } - - unsigned char keyDigest[EVP_MAX_MD_SIZE]; - unsigned int digestLen = 0; - if (!getDigest(keyName, encryptedDataKey.c_str(), encryptedDataKey.size(), keyDigest, digestLen)) { - LOG_ERROR(logCtx_ << "Failed to get digest for data key " << keyName); - return false; - } - - std::string keyDigestStr(reinterpret_cast(keyDigest), digestLen); - std::string dataKeyStr(reinterpret_cast(dataKey_.get()), dataKeyLen_); - dataKeyCache_[keyDigestStr] = make_pair(dataKeyStr, boost::posix_time::second_clock::universal_time()); - if (PULSAR_UNLIKELY(logger()->isEnabled(Logger::LEVEL_DEBUG))) { - std::string strHex = stringToHex(dataKeyStr, dataKeyStr.size()); - LOG_DEBUG(logCtx_ << "Data key for key " << keyName << " decrypted. Decrypted data key is " - << strHex); - } - - // Remove expired entries from the cache - removeExpiredDataKey(); - return true; -} - -bool MessageCrypto::decryptData(const std::string& dataKeySecret, const proto::MessageMetadata& msgMetadata, - SharedBuffer& payload, SharedBuffer& decryptedPayload) { - // unpack iv and encrypted data - msgMetadata.encryption_param().copy(reinterpret_cast(iv_.get()), - msgMetadata.encryption_param().size()); - - EVP_CIPHER_CTX* cipherCtx = NULL; - decryptedPayload = SharedBuffer::allocate(payload.readableBytes() + EVP_MAX_BLOCK_LENGTH + tagLen_); - if (PULSAR_UNLIKELY(logger()->isEnabled(Logger::LEVEL_DEBUG))) { - std::string strHex = stringToHex(payload.data(), payload.readableBytes()); - LOG_DEBUG(logCtx_ << "Attempting to decrypt data with encrypted size " << payload.readableBytes() - << ", data = " << strHex); - } - - if (!(cipherCtx = EVP_CIPHER_CTX_new())) { - LOG_ERROR(logCtx_ << " Failed to get cipher ctx"); - return false; - } - - if (!EVP_DecryptInit_ex(cipherCtx, EVP_aes_256_gcm(), NULL, - reinterpret_cast(dataKeySecret.c_str()), - reinterpret_cast(iv_.get()))) { - LOG_ERROR(logCtx_ << " Failed to init decrypt cipher ctx"); - EVP_CIPHER_CTX_free(cipherCtx); - return false; - } - - if (EVP_CIPHER_CTX_set_padding(cipherCtx, EVP_CIPH_NO_PADDING) != 1) { - LOG_ERROR(logCtx_ << " Failed to set cipher padding"); - EVP_CIPHER_CTX_free(cipherCtx); - return false; - } - - int cipherLen = payload.readableBytes() - tagLen_; - int decLen = 0; - if (!EVP_DecryptUpdate(cipherCtx, reinterpret_cast(decryptedPayload.mutableData()), - &decLen, reinterpret_cast(payload.data()), cipherLen)) { - LOG_ERROR(logCtx_ << " Failed to decrypt update"); - EVP_CIPHER_CTX_free(cipherCtx); - return false; - }; - decryptedPayload.bytesWritten(decLen); - - if (!EVP_CIPHER_CTX_ctrl(cipherCtx, EVP_CTRL_GCM_SET_TAG, tagLen_, (void*)(payload.data() + cipherLen))) { - LOG_ERROR(logCtx_ << " Failed to set gcm tag"); - EVP_CIPHER_CTX_free(cipherCtx); - return false; - } - - if (!EVP_DecryptFinal_ex(cipherCtx, reinterpret_cast(decryptedPayload.mutableData()), - &decLen)) { - LOG_ERROR(logCtx_ << " Failed to finalize encrypted message"); - EVP_CIPHER_CTX_free(cipherCtx); - return false; - } - decryptedPayload.bytesWritten(decLen); - if (PULSAR_UNLIKELY(logger()->isEnabled(Logger::LEVEL_DEBUG))) { - std::string strHex = stringToHex(decryptedPayload.data(), decryptedPayload.readableBytes()); - LOG_DEBUG(logCtx_ << "Data decrypted. Decrypted size = " << decryptedPayload.readableBytes() - << ", data = " << strHex); - } - - EVP_CIPHER_CTX_free(cipherCtx); - - return true; -} - -bool MessageCrypto::getKeyAndDecryptData(const proto::MessageMetadata& msgMetadata, SharedBuffer& payload, - SharedBuffer& decryptedPayload) { - SharedBuffer decryptedData; - bool dataDecrypted = false; - - for (auto iter = msgMetadata.encryption_keys().begin(); iter != msgMetadata.encryption_keys().end(); - iter++) { - const std::string& keyName = iter->key(); - const std::string& encDataKey = iter->value(); - unsigned char keyDigest[EVP_MAX_MD_SIZE]; - unsigned int digestLen = 0; - getDigest(keyName, encDataKey.c_str(), encDataKey.size(), keyDigest, digestLen); - - std::string keyDigestStr(reinterpret_cast(keyDigest), digestLen); - - auto dataKeyCacheIter = dataKeyCache_.find(keyDigestStr); - if (dataKeyCacheIter != dataKeyCache_.end()) { - // Taking a small performance hit here if the hash collides. When it - // retruns a different key, decryption fails. At this point, we would - // call decryptDataKey to refresh the cache and come here again to decrypt. - auto dataKeyEntry = dataKeyCacheIter->second; - if (decryptData(dataKeyEntry.first, msgMetadata, payload, decryptedPayload)) { - dataDecrypted = true; - break; - } - } else { - // First time, entry won't be present in cache - LOG_DEBUG(logCtx_ << " Failed to decrypt data or data key is not in cache for " - << keyName + ". Will attempt to refresh."); - } - } - return dataDecrypted; -} - -bool MessageCrypto::decrypt(const proto::MessageMetadata& msgMetadata, SharedBuffer& payload, - const CryptoKeyReaderPtr keyReader, SharedBuffer& decryptedPayload) { - // Attempt to decrypt using the existing key - if (getKeyAndDecryptData(msgMetadata, payload, decryptedPayload)) { - return true; - } - - // Either first time, or decryption failed. Attempt to regenerate data key - bool isDataKeyDecrypted = false; - for (int index = 0; index < msgMetadata.encryption_keys_size(); index++) { - const proto::EncryptionKeys& encKeys = msgMetadata.encryption_keys(index); - - const std::string& encDataKey = encKeys.value(); - const google::protobuf::RepeatedPtrField& encKeyMeta = encKeys.metadata(); - if (decryptDataKey(encKeys.key(), encDataKey, encKeyMeta, keyReader)) { - isDataKeyDecrypted = true; - break; - } - } - - if (!isDataKeyDecrypted) { - // Unable to decrypt data key - return false; - } - - return getKeyAndDecryptData(msgMetadata, payload, decryptedPayload); -} - -} /* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/MessageCrypto.h b/pulsar-client-cpp/lib/MessageCrypto.h deleted file mode 100644 index 21720665f3895..0000000000000 --- a/pulsar-client-cpp/lib/MessageCrypto.h +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_MESSAGECRYPTO_H_ -#define LIB_MESSAGECRYPTO_H_ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "SharedBuffer.h" -#include "ExecutorService.h" -#include "pulsar/CryptoKeyReader.h" -#include "PulsarApi.pb.h" - -namespace pulsar { - -class MessageCrypto { - public: - typedef std::map StringMap; - typedef std::map> DataKeyCacheMap; - - MessageCrypto(std::string& logCtx, bool keyGenNeeded); - ~MessageCrypto(); - - /* - * Encrypt data key using the public key(s) in the argument.

If more than one key name is specified, - * data key is encrypted using each of those keys. If the public key is expired or changed, application is - * responsible to remove the old key and add the new key

- * - * @param keyNames List of public keys to encrypt data key - * @param keyReader Implementation to read the key values - * @return ResultOk if succeeded - * - */ - Result addPublicKeyCipher(const std::set& keyNames, const CryptoKeyReaderPtr keyReader); - - /* - * Remove a key

Remove the key identified by the keyName from the list of keys.

- * - * @param keyName Unique name to identify the key - * @return true if succeeded, false otherwise - */ - bool removeKeyCipher(const std::string& keyName); - - /* - * Encrypt the payload using the data key and update message metadata with the keyname & encrypted data - * key - * - * @param encKeys One or more public keys to encrypt data key - * @param keyReader Implementation to read the key values - * @param msgMetadata Message Metadata - * @param payload Message which needs to be encrypted - * @param encryptedPayload Contains encrypted payload if success - * - * @return true if success - */ - bool encrypt(const std::set& encKeys, const CryptoKeyReaderPtr keyReader, - proto::MessageMetadata& msgMetadata, SharedBuffer& payload, SharedBuffer& encryptedPayload); - - /* - * Decrypt the payload using the data key. Keys used to encrypt data key can be retrieved from msgMetadata - * - * @param msgMetadata Message Metadata - * @param payload Message which needs to be decrypted - * @param keyReader KeyReader implementation to retrieve key value - * @param decryptedPayload Contains decrypted payload if success - * - * @return true if success - */ - bool decrypt(const proto::MessageMetadata& msgMetadata, SharedBuffer& payload, - const CryptoKeyReaderPtr keyReader, SharedBuffer& decryptedPayload); - - private: - typedef std::unique_lock Lock; - std::mutex mutex_; - - int dataKeyLen_; - boost::scoped_array dataKey_; - - int tagLen_; - int ivLen_; - boost::scoped_array iv_; - - std::string logCtx_; - - /* This cache uses the digest of encrypted data key as it's key. It's possible - * for consumers to receive messages with data key encrypted using older or - * newer version of public key. If we use the key name as the key for dataKeyCache, - * we will end up decrypting data key way too often which is costly. - */ - DataKeyCacheMap dataKeyCache_; - - // Map of key name and encrypted gcm key, metadata pair which is sent with encrypted message - std::map> encryptedDataKeyMap_; - - EVP_MD_CTX* mdCtx_; - - RSA* loadPublicKey(std::string& pubKeyStr); - RSA* loadPrivateKey(std::string& privateKeyStr); - bool getDigest(const std::string& keyName, const void* input, unsigned int inputLen, - unsigned char keyDigest[], unsigned int& digestLen); - void removeExpiredDataKey(); - - Result addPublicKeyCipher(const std::string& keyName, const CryptoKeyReaderPtr keyReader); - - bool decryptDataKey(const std::string& keyName, const std::string& encryptedDataKey, - const google::protobuf::RepeatedPtrField& encKeyMeta, - const CryptoKeyReaderPtr keyReader); - bool decryptData(const std::string& dataKeySecret, const proto::MessageMetadata& msgMetadata, - SharedBuffer& payload, SharedBuffer& decPayload); - bool getKeyAndDecryptData(const proto::MessageMetadata& msgMetadata, SharedBuffer& payload, - SharedBuffer& decryptedPayload); - std::string stringToHex(const std::string& inputStr, size_t len); - std::string stringToHex(const char* inputStr, size_t len); -}; - -} /* namespace pulsar */ - -#endif /* LIB_MESSAGECRYPTO_H_ */ diff --git a/pulsar-client-cpp/lib/MessageId.cc b/pulsar-client-cpp/lib/MessageId.cc deleted file mode 100644 index 31b01548dadf2..0000000000000 --- a/pulsar-client-cpp/lib/MessageId.cc +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include - -#include "PulsarApi.pb.h" -#include "MessageIdImpl.h" - -#include -#include -#include -#include -#include -#include - -namespace pulsar { - -MessageId::MessageId() { - static const MessageIdImplPtr emptyMessageId = std::make_shared(); - impl_ = emptyMessageId; -} - -MessageId& MessageId::operator=(const MessageId& m) { - impl_ = m.impl_; - return *this; -} - -MessageId::MessageId(int32_t partition, int64_t ledgerId, int64_t entryId, int32_t batchIndex) - : impl_(std::make_shared(partition, ledgerId, entryId, batchIndex)) {} - -const MessageId& MessageId::earliest() { - static const MessageId _earliest(-1, -1, -1, -1); - return _earliest; -} - -const MessageId& MessageId::latest() { - static const int64_t long_max = std::numeric_limits::max(); - static const MessageId _latest(-1, long_max, long_max, -1); - return _latest; -} - -void MessageId::serialize(std::string& result) const { - proto::MessageIdData idData; - idData.set_ledgerid(impl_->ledgerId_); - idData.set_entryid(impl_->entryId_); - if (impl_->partition_ != -1) { - idData.set_partition(impl_->partition_); - } - - if (impl_->batchIndex_ != -1) { - idData.set_batch_index(impl_->batchIndex_); - } - - idData.SerializeToString(&result); -} - -/** - * Deserialize a message id from a binary string - */ -MessageId MessageId::deserialize(const std::string& serializedMessageId) { - proto::MessageIdData idData; - if (!idData.ParseFromString(serializedMessageId)) { - throw std::invalid_argument("Failed to parse serialized message id"); - } - - return MessageId(idData.partition(), idData.ledgerid(), idData.entryid(), idData.batch_index()); -} - -int64_t MessageId::ledgerId() const { return impl_->ledgerId_; } - -int64_t MessageId::entryId() const { return impl_->entryId_; } - -int32_t MessageId::batchIndex() const { return impl_->batchIndex_; } - -int32_t MessageId::partition() const { return impl_->partition_; } - -PULSAR_PUBLIC std::ostream& operator<<(std::ostream& s, const pulsar::MessageId& messageId) { - s << '(' << messageId.impl_->ledgerId_ << ',' << messageId.impl_->entryId_ << ',' - << messageId.impl_->partition_ << ',' << messageId.impl_->batchIndex_ << ')'; - return s; -} - -PULSAR_PUBLIC bool MessageId::operator<(const MessageId& other) const { - if (impl_->ledgerId_ < other.impl_->ledgerId_) { - return true; - } else if (impl_->ledgerId_ > other.impl_->ledgerId_) { - return false; - } - - if (impl_->entryId_ < other.impl_->entryId_) { - return true; - } else if (impl_->entryId_ > other.impl_->entryId_) { - return false; - } - - if (impl_->batchIndex_ < other.impl_->batchIndex_) { - return true; - } else { - return false; - } -} - -PULSAR_PUBLIC bool MessageId::operator<=(const MessageId& other) const { - return *this < other || *this == other; -} - -PULSAR_PUBLIC bool MessageId::operator>(const MessageId& other) const { return !(*this <= other); } - -PULSAR_PUBLIC bool MessageId::operator>=(const MessageId& other) const { return !(*this < other); } - -PULSAR_PUBLIC bool MessageId::operator==(const MessageId& other) const { - return impl_->ledgerId_ == other.impl_->ledgerId_ && impl_->entryId_ == other.impl_->entryId_ && - impl_->batchIndex_ == other.impl_->batchIndex_ && impl_->partition_ == other.impl_->partition_; -} - -PULSAR_PUBLIC bool MessageId::operator!=(const MessageId& other) const { return !(*this == other); } - -PULSAR_PUBLIC const std::string& MessageId::getTopicName() const { return impl_->getTopicName(); } - -PULSAR_PUBLIC void MessageId::setTopicName(const std::string& topicName) { - return impl_->setTopicName(topicName); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MessageIdImpl.h b/pulsar-client-cpp/lib/MessageIdImpl.h deleted file mode 100644 index ae33da4cfe431..0000000000000 --- a/pulsar-client-cpp/lib/MessageIdImpl.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -namespace pulsar { - -class MessageIdImpl { - public: - MessageIdImpl() : ledgerId_(-1), entryId_(-1), partition_(-1), batchIndex_(-1), topicName_() {} - MessageIdImpl(int32_t partition, int64_t ledgerId, int64_t entryId, int32_t batchIndex) - : ledgerId_(ledgerId), - entryId_(entryId), - partition_(partition), - batchIndex_(batchIndex), - topicName_() {} - const int64_t ledgerId_; - const int64_t entryId_; - const int32_t partition_; - const int32_t batchIndex_; - - const std::string& getTopicName() { return *topicName_; } - void setTopicName(const std::string& topicName) { topicName_ = &topicName; } - - private: - const std::string* topicName_; - friend class MessageImpl; - friend class MultiTopicsConsumerImpl; - friend class UnAckedMessageTrackerEnabled; -}; -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MessageIdUtil.h b/pulsar-client-cpp/lib/MessageIdUtil.h deleted file mode 100644 index d6f80a10ea015..0000000000000 --- a/pulsar-client-cpp/lib/MessageIdUtil.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include "PulsarApi.pb.h" - -namespace pulsar { - -inline MessageId toMessageId(const proto::MessageIdData& messageIdData) { - return MessageId{messageIdData.partition(), static_cast(messageIdData.ledgerid()), - static_cast(messageIdData.entryid()), messageIdData.batch_index()}; -} - -namespace internal { -template -static int compare(T lhs, T rhs) { - return (lhs < rhs) ? -1 : ((lhs == rhs) ? 0 : 1); -} -} // namespace internal - -inline int compareLedgerAndEntryId(const MessageId& lhs, const MessageId& rhs) { - auto result = internal::compare(lhs.ledgerId(), rhs.ledgerId()); - if (result != 0) { - return result; - } - return internal::compare(lhs.entryId(), rhs.entryId()); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MessageImpl.cc b/pulsar-client-cpp/lib/MessageImpl.cc deleted file mode 100644 index 5d1edbfe05cde..0000000000000 --- a/pulsar-client-cpp/lib/MessageImpl.cc +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "MessageImpl.h" - -namespace pulsar { - -MessageImpl::MessageImpl() : metadata(), payload(), messageId(), cnx_(0), topicName_(), redeliveryCount_() {} - -const Message::StringMap& MessageImpl::properties() { - if (properties_.size() == 0) { - for (int i = 0; i < metadata.properties_size(); i++) { - const std::string& key = metadata.properties(i).key(); - const std::string& value = metadata.properties(i).value(); - properties_.insert(std::make_pair(key, value)); - } - } - return properties_; -} - -const std::string& MessageImpl::getPartitionKey() const { return metadata.partition_key(); } - -bool MessageImpl::hasPartitionKey() const { return metadata.has_partition_key(); } - -const std::string& MessageImpl::getOrderingKey() const { return metadata.ordering_key(); } - -bool MessageImpl::hasOrderingKey() const { return metadata.has_ordering_key(); } - -uint64_t MessageImpl::getPublishTimestamp() const { - if (metadata.has_publish_time()) { - return metadata.publish_time(); - } else { - return 0ull; - } -} - -uint64_t MessageImpl::getEventTimestamp() const { - if (metadata.has_event_time()) { - return metadata.event_time(); - } else { - return 0ull; - } -} - -void MessageImpl::setReplicationClusters(const std::vector& clusters) { - google::protobuf::RepeatedPtrField r(clusters.begin(), clusters.end()); - r.Swap(metadata.mutable_replicate_to()); -} - -void MessageImpl::disableReplication(bool flag) { - google::protobuf::RepeatedPtrField r; - if (flag) { - r.AddAllocated(new std::string("__local__")); - } - r.Swap(metadata.mutable_replicate_to()); -} - -void MessageImpl::setProperty(const std::string& name, const std::string& value) { - proto::KeyValue* keyValue = proto::KeyValue().New(); - keyValue->set_key(name); - keyValue->set_value(value); - metadata.mutable_properties()->AddAllocated(keyValue); -} - -void MessageImpl::setPartitionKey(const std::string& partitionKey) { - metadata.set_partition_key(partitionKey); -} - -void MessageImpl::setOrderingKey(const std::string& orderingKey) { metadata.set_ordering_key(orderingKey); } - -void MessageImpl::setEventTimestamp(uint64_t eventTimestamp) { metadata.set_event_time(eventTimestamp); } - -void MessageImpl::setTopicName(const std::string& topicName) { - topicName_ = &topicName; - messageId.setTopicName(topicName); -} - -const std::string& MessageImpl::getTopicName() { return *topicName_; } - -int MessageImpl::getRedeliveryCount() { return redeliveryCount_; } - -void MessageImpl::setRedeliveryCount(int count) { redeliveryCount_ = count; } - -bool MessageImpl::hasSchemaVersion() const { return metadata.has_schema_version(); } - -void MessageImpl::setSchemaVersion(const std::string& schemaVersion) { schemaVersion_ = &schemaVersion; } - -const std::string& MessageImpl::getSchemaVersion() const { return metadata.schema_version(); } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MessageImpl.h b/pulsar-client-cpp/lib/MessageImpl.h deleted file mode 100644 index c9a37f43d0967..0000000000000 --- a/pulsar-client-cpp/lib/MessageImpl.h +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_MESSAGEIMPL_H_ -#define LIB_MESSAGEIMPL_H_ - -#include -#include -#include "SharedBuffer.h" -#include "PulsarApi.pb.h" - -using namespace pulsar; -namespace pulsar { - -class PulsarWrapper; -class ClientConnection; -class BatchMessageContainer; - -class MessageImpl { - public: - MessageImpl(); - - const Message::StringMap& properties(); - - proto::MessageMetadata metadata; - SharedBuffer payload; - MessageId messageId; - ClientConnection* cnx_; - const std::string* topicName_; - int redeliveryCount_; - bool hasSchemaVersion_; - const std::string* schemaVersion_; - - const std::string& getPartitionKey() const; - bool hasPartitionKey() const; - - const std::string& getOrderingKey() const; - bool hasOrderingKey() const; - - uint64_t getPublishTimestamp() const; - uint64_t getEventTimestamp() const; - - /** - * Get the topic Name from which this message originated from - */ - const std::string& getTopicName(); - - /** - * Set a valid topicName - */ - void setTopicName(const std::string& topicName); - - int getRedeliveryCount(); - void setRedeliveryCount(int count); - - bool hasSchemaVersion() const; - const std::string& getSchemaVersion() const; - void setSchemaVersion(const std::string& value); - - friend class PulsarWrapper; - friend class MessageBuilder; - - private: - void setReplicationClusters(const std::vector& clusters); - void setProperty(const std::string& name, const std::string& value); - void disableReplication(bool flag); - void setPartitionKey(const std::string& partitionKey); - void setOrderingKey(const std::string& orderingKey); - void setEventTimestamp(uint64_t eventTimestamp); - Message::StringMap properties_; -}; -} // namespace pulsar - -#endif /* LIB_MESSAGEIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/MessageRouterBase.cc b/pulsar-client-cpp/lib/MessageRouterBase.cc deleted file mode 100644 index c0824f9e0a301..0000000000000 --- a/pulsar-client-cpp/lib/MessageRouterBase.cc +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "MessageRouterBase.h" - -#include "BoostHash.h" -#include "JavaStringHash.h" -#include "Murmur3_32Hash.h" - -namespace pulsar { -MessageRouterBase::MessageRouterBase(ProducerConfiguration::HashingScheme hashingScheme) { - switch (hashingScheme) { - case ProducerConfiguration::BoostHash: - hash = HashPtr(new BoostHash()); - break; - case ProducerConfiguration::JavaStringHash: - hash = HashPtr(new JavaStringHash()); - break; - case ProducerConfiguration::Murmur3_32Hash: - default: - hash = HashPtr(new Murmur3_32Hash()); - break; - } -} -} // namespace pulsar \ No newline at end of file diff --git a/pulsar-client-cpp/lib/MessageRouterBase.h b/pulsar-client-cpp/lib/MessageRouterBase.h deleted file mode 100644 index 39374a1871658..0000000000000 --- a/pulsar-client-cpp/lib/MessageRouterBase.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CPP_MESSAGEROUTERBASE_H -#define PULSAR_CPP_MESSAGEROUTERBASE_H - -#include - -#include -#include -#include "Hash.h" - -namespace pulsar { -typedef std::unique_ptr HashPtr; - -class MessageRouterBase : public MessageRoutingPolicy { - public: - MessageRouterBase(ProducerConfiguration::HashingScheme hashingScheme); - - protected: - HashPtr hash; -}; -} // namespace pulsar - -#endif // PULSAR_CPP_MESSAGEROUTERBASE_H diff --git a/pulsar-client-cpp/lib/MultiResultCallback.h b/pulsar-client-cpp/lib/MultiResultCallback.h deleted file mode 100644 index 739bc4a6dd18a..0000000000000 --- a/pulsar-client-cpp/lib/MultiResultCallback.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include // for ResultCallback - -#include -#include - -namespace pulsar { - -class MultiResultCallback { - public: - MultiResultCallback(ResultCallback callback, int numToComplete) - : callback_(callback), - numToComplete_(numToComplete), - numCompletedPtr_(std::make_shared(0)) {} - - void operator()(Result result) { - if (result == ResultOk) { - if (++(*numCompletedPtr_) == numToComplete_) { - callback_(result); - } - } else { - callback_(result); - } - } - - private: - ResultCallback callback_; - const int numToComplete_; - const std::shared_ptr numCompletedPtr_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/MultiTopicsBrokerConsumerStatsImpl.cc b/pulsar-client-cpp/lib/MultiTopicsBrokerConsumerStatsImpl.cc deleted file mode 100644 index 5220307bdb7a6..0000000000000 --- a/pulsar-client-cpp/lib/MultiTopicsBrokerConsumerStatsImpl.cc +++ /dev/null @@ -1,158 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include - -using namespace pulsar; - -const std::string MultiTopicsBrokerConsumerStatsImpl::DELIMITER = ";"; - -MultiTopicsBrokerConsumerStatsImpl::MultiTopicsBrokerConsumerStatsImpl(size_t size) { - statsList_.resize(size); -} - -bool MultiTopicsBrokerConsumerStatsImpl::isValid() const { - bool isValid = true; - for (int i = 0; i < statsList_.size(); i++) { - isValid = isValid && statsList_[i].isValid(); - } - return isValid; -} - -std::ostream& operator<<(std::ostream& os, const MultiTopicsBrokerConsumerStatsImpl& obj) { - os << "\nMultiTopicsBrokerConsumerStatsImpl [" - << "validTill_ = " << obj.isValid() << ", msgRateOut_ = " << obj.getMsgRateOut() - << ", msgThroughputOut_ = " << obj.getMsgThroughputOut() - << ", msgRateRedeliver_ = " << obj.getMsgRateRedeliver() - << ", consumerName_ = " << obj.getConsumerName() - << ", availablePermits_ = " << obj.getAvailablePermits() - << ", unackedMessages_ = " << obj.getUnackedMessages() - << ", blockedConsumerOnUnackedMsgs_ = " << obj.isBlockedConsumerOnUnackedMsgs() - << ", address_ = " << obj.getAddress() << ", connectedSince_ = " << obj.getConnectedSince() - << ", type_ = " << obj.getType() << ", msgRateExpired_ = " << obj.getMsgRateExpired() - << ", msgBacklog_ = " << obj.getMsgBacklog() << "]"; - return os; -} - -double MultiTopicsBrokerConsumerStatsImpl::getMsgRateOut() const { - double sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getMsgRateOut(); - } - return sum; -} - -double MultiTopicsBrokerConsumerStatsImpl::getMsgThroughputOut() const { - double sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getMsgThroughputOut(); - } - return sum; -} - -double MultiTopicsBrokerConsumerStatsImpl::getMsgRateRedeliver() const { - double sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getMsgRateRedeliver(); - } - return sum; -} - -const std::string MultiTopicsBrokerConsumerStatsImpl::getConsumerName() const { - std::string str; - for (int i = 0; i < statsList_.size(); i++) { - str += statsList_[i].getConsumerName() + DELIMITER; - } - return str; -} - -uint64_t MultiTopicsBrokerConsumerStatsImpl::getAvailablePermits() const { - uint64_t sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getAvailablePermits(); - } - return sum; -} - -uint64_t MultiTopicsBrokerConsumerStatsImpl::getUnackedMessages() const { - uint64_t sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getUnackedMessages(); - } - return sum; -} - -bool MultiTopicsBrokerConsumerStatsImpl::isBlockedConsumerOnUnackedMsgs() const { - if (statsList_.size() == 0) { - return false; - } - - return isValid(); -} - -const std::string MultiTopicsBrokerConsumerStatsImpl::getAddress() const { - std::stringstream str; - for (int i = 0; i < statsList_.size(); i++) { - str << statsList_[i].getAddress() << DELIMITER; - } - return str.str(); -} - -const std::string MultiTopicsBrokerConsumerStatsImpl::getConnectedSince() const { - std::stringstream str; - for (int i = 0; i < statsList_.size(); i++) { - str << statsList_[i].getConnectedSince() << DELIMITER; - } - return str.str(); -} - -const ConsumerType MultiTopicsBrokerConsumerStatsImpl::getType() const { - if (!statsList_.size()) { - return ConsumerExclusive; - } - return statsList_[0].getType(); -} - -double MultiTopicsBrokerConsumerStatsImpl::getMsgRateExpired() const { - double sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getMsgRateExpired(); - } - return sum; -} - -uint64_t MultiTopicsBrokerConsumerStatsImpl::getMsgBacklog() const { - uint64_t sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getMsgBacklog(); - } - return sum; -} - -BrokerConsumerStats MultiTopicsBrokerConsumerStatsImpl::getBrokerConsumerStats(int index) { - return statsList_[index]; -} - -void MultiTopicsBrokerConsumerStatsImpl::add(BrokerConsumerStats stats, int index) { - statsList_[index] = stats; -} - -void MultiTopicsBrokerConsumerStatsImpl::clear() { statsList_.clear(); } diff --git a/pulsar-client-cpp/lib/MultiTopicsBrokerConsumerStatsImpl.h b/pulsar-client-cpp/lib/MultiTopicsBrokerConsumerStatsImpl.h deleted file mode 100644 index a76ecdce055a4..0000000000000 --- a/pulsar-client-cpp/lib/MultiTopicsBrokerConsumerStatsImpl.h +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CPP_MULTITOPICSBROKERCONSUMERSTATSIMPL_H -#define PULSAR_CPP_MULTITOPICSBROKERCONSUMERSTATSIMPL_H - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace pulsar { -class PULSAR_PUBLIC MultiTopicsBrokerConsumerStatsImpl : public BrokerConsumerStatsImplBase { - private: - std::vector statsList_; - static const std::string DELIMITER; - - public: - MultiTopicsBrokerConsumerStatsImpl(size_t size); - - /** Returns true if the Stats are still valid **/ - virtual bool isValid() const; - - /** Returns the rate of messages delivered to the consumer. msg/s */ - virtual double getMsgRateOut() const; - - /** Returns the throughput delivered to the consumer. bytes/s */ - virtual double getMsgThroughputOut() const; - - /** Returns the rate of messages redelivered by this consumer. msg/s */ - virtual double getMsgRateRedeliver() const; - - /** Returns the Name of the consumer */ - virtual const std::string getConsumerName() const; - - /** Returns the Number of available message permits for the consumer */ - virtual uint64_t getAvailablePermits() const; - - /** Returns the Number of unacknowledged messages for the consumer */ - virtual uint64_t getUnackedMessages() const; - - /** Returns true if the consumer is blocked due to unacked messages. */ - virtual bool isBlockedConsumerOnUnackedMsgs() const; - - /** Returns the Address of this consumer */ - virtual const std::string getAddress() const; - - /** Returns the Timestamp of connection */ - virtual const std::string getConnectedSince() const; - - /** Returns Whether this subscription is Exclusive or Shared or Failover */ - virtual const ConsumerType getType() const; - - /** Returns the rate of messages expired on this subscription. msg/s */ - virtual double getMsgRateExpired() const; - - /** Returns the Number of messages in the subscription backlog */ - virtual uint64_t getMsgBacklog() const; - - /** Returns the BrokerConsumerStatsImpl at of ith partition */ - BrokerConsumerStats getBrokerConsumerStats(int index); - - void add(BrokerConsumerStats stats, int index); - - void clear(); - - friend std::ostream &operator<<(std::ostream &os, const MultiTopicsBrokerConsumerStatsImpl &obj); -}; -typedef std::shared_ptr MultiTopicsBrokerConsumerStatsPtr; -} // namespace pulsar - -#endif // PULSAR_CPP_MULTITOPICSBROKERCONSUMERSTATSIMPL_H diff --git a/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc b/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc deleted file mode 100644 index 0d730e1561f72..0000000000000 --- a/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc +++ /dev/null @@ -1,834 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "MultiTopicsConsumerImpl.h" -#include "MultiResultCallback.h" - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -MultiTopicsConsumerImpl::MultiTopicsConsumerImpl(ClientImplPtr client, const std::vector& topics, - const std::string& subscriptionName, TopicNamePtr topicName, - const ConsumerConfiguration& conf, - LookupServicePtr lookupServicePtr) - : client_(client), - subscriptionName_(subscriptionName), - topic_(topicName ? topicName->toString() : "EmptyTopics"), - conf_(conf), - messages_(conf.getReceiverQueueSize()), - listenerExecutor_(client->getListenerExecutorProvider()->get()), - messageListener_(conf.getMessageListener()), - lookupServicePtr_(lookupServicePtr), - numberTopicPartitions_(std::make_shared>(0)), - topics_(topics) { - std::stringstream consumerStrStream; - consumerStrStream << "[Muti Topics Consumer: " - << "TopicName - " << topic_ << " - Subscription - " << subscriptionName << "]"; - consumerStr_ = consumerStrStream.str(); - - if (conf.getUnAckedMessagesTimeoutMs() != 0) { - if (conf.getTickDurationInMs() > 0) { - unAckedMessageTrackerPtr_.reset(new UnAckedMessageTrackerEnabled( - conf.getUnAckedMessagesTimeoutMs(), conf.getTickDurationInMs(), client, *this)); - } else { - unAckedMessageTrackerPtr_.reset( - new UnAckedMessageTrackerEnabled(conf.getUnAckedMessagesTimeoutMs(), client, *this)); - } - } else { - unAckedMessageTrackerPtr_.reset(new UnAckedMessageTrackerDisabled()); - } - auto partitionsUpdateInterval = static_cast(client_->conf().getPartitionsUpdateInterval()); - if (partitionsUpdateInterval > 0) { - partitionsUpdateTimer_ = listenerExecutor_->createDeadlineTimer(); - partitionsUpdateInterval_ = boost::posix_time::seconds(partitionsUpdateInterval); - lookupServicePtr_ = client_->getLookup(); - } -} - -void MultiTopicsConsumerImpl::start() { - if (topics_.empty()) { - MultiTopicsConsumerState state = Pending; - if (state_.compare_exchange_strong(state, Ready)) { - LOG_DEBUG("No topics passed in when create MultiTopicsConsumer."); - multiTopicsConsumerCreatedPromise_.setValue(shared_from_this()); - return; - } else { - LOG_ERROR("Consumer " << consumerStr_ << " in wrong state: " << state_); - multiTopicsConsumerCreatedPromise_.setFailed(ResultUnknownError); - return; - } - } - - // start call subscribeOneTopicAsync for each single topic - int topicsNumber = topics_.size(); - std::shared_ptr> topicsNeedCreate = std::make_shared>(topicsNumber); - // subscribe for each passed in topic - for (std::vector::const_iterator itr = topics_.begin(); itr != topics_.end(); itr++) { - subscribeOneTopicAsync(*itr).addListener(std::bind(&MultiTopicsConsumerImpl::handleOneTopicSubscribed, - shared_from_this(), std::placeholders::_1, - std::placeholders::_2, *itr, topicsNeedCreate)); - } -} - -void MultiTopicsConsumerImpl::handleOneTopicSubscribed(Result result, Consumer consumer, - const std::string& topic, - std::shared_ptr> topicsNeedCreate) { - if (result != ResultOk) { - state_ = Failed; - // Use the first failed result - auto expectedResult = ResultOk; - failedResult.compare_exchange_strong(expectedResult, result); - LOG_ERROR("Failed when subscribed to topic " << topic << " in TopicsConsumer. Error - " << result); - } else { - LOG_DEBUG("Subscribed to topic " << topic << " in TopicsConsumer "); - } - - if (--(*topicsNeedCreate) == 0) { - MultiTopicsConsumerState state = Pending; - if (state_.compare_exchange_strong(state, Ready)) { - LOG_INFO("Successfully Subscribed to Topics"); - multiTopicsConsumerCreatedPromise_.setValue(shared_from_this()); - } else { - LOG_ERROR("Unable to create Consumer - " << consumerStr_ << " Error - " << result); - // unsubscribed all of the successfully subscribed partitioned consumers - // It's safe to capture only this here, because the callback can be called only when this is valid - closeAsync( - [this](Result result) { multiTopicsConsumerCreatedPromise_.setFailed(failedResult.load()); }); - } - } -} - -// subscribe for passed in topic -Future MultiTopicsConsumerImpl::subscribeOneTopicAsync(const std::string& topic) { - TopicNamePtr topicName; - ConsumerSubResultPromisePtr topicPromise = std::make_shared>(); - if (!(topicName = TopicName::get(topic))) { - LOG_ERROR("TopicName invalid: " << topic); - topicPromise->setFailed(ResultInvalidTopicName); - return topicPromise->getFuture(); - } - - const auto state = state_.load(); - if (state == Closed || state == Closing) { - LOG_ERROR("MultiTopicsConsumer already closed when subscribe."); - topicPromise->setFailed(ResultAlreadyClosed); - return topicPromise->getFuture(); - } - - // subscribe for each partition, when all partitions completed, complete promise - Lock lock(mutex_); - auto entry = topicsPartitions_.find(topic); - if (entry == topicsPartitions_.end()) { - lock.unlock(); - lookupServicePtr_->getPartitionMetadataAsync(topicName).addListener( - [this, topicName, topicPromise](Result result, const LookupDataResultPtr& lookupDataResult) { - if (result != ResultOk) { - LOG_ERROR("Error Checking/Getting Partition Metadata while MultiTopics Subscribing- " - << consumerStr_ << " result: " << result) - topicPromise->setFailed(result); - return; - } - subscribeTopicPartitions(lookupDataResult->getPartitions(), topicName, subscriptionName_, - topicPromise); - }); - } else { - auto numPartitions = entry->second; - lock.unlock(); - subscribeTopicPartitions(numPartitions, topicName, subscriptionName_, topicPromise); - } - return topicPromise->getFuture(); -} - -void MultiTopicsConsumerImpl::subscribeTopicPartitions(int numPartitions, TopicNamePtr topicName, - const std::string& consumerName, - ConsumerSubResultPromisePtr topicSubResultPromise) { - std::shared_ptr consumer; - ConsumerConfiguration config = conf_.clone(); - ExecutorServicePtr internalListenerExecutor = client_->getPartitionListenerExecutorProvider()->get(); - - config.setMessageListener(std::bind(&MultiTopicsConsumerImpl::messageReceived, shared_from_this(), - std::placeholders::_1, std::placeholders::_2)); - - int partitions = numPartitions == 0 ? 1 : numPartitions; - - // Apply total limit of receiver queue size across partitions - config.setReceiverQueueSize( - std::min(conf_.getReceiverQueueSize(), - (int)(conf_.getMaxTotalReceiverQueueSizeAcrossPartitions() / partitions))); - - Lock lock(mutex_); - topicsPartitions_[topicName->toString()] = partitions; - lock.unlock(); - numberTopicPartitions_->fetch_add(partitions); - - std::shared_ptr> partitionsNeedCreate = std::make_shared>(partitions); - - // non-partitioned topic - if (numPartitions == 0) { - // We don't have to add partition-n suffix - consumer = std::make_shared(client_, topicName->toString(), subscriptionName_, config, - topicName->isPersistent(), internalListenerExecutor, true, - NonPartitioned); - consumer->getConsumerCreatedFuture().addListener(std::bind( - &MultiTopicsConsumerImpl::handleSingleConsumerCreated, shared_from_this(), std::placeholders::_1, - std::placeholders::_2, partitionsNeedCreate, topicSubResultPromise)); - consumers_.emplace(topicName->toString(), consumer); - LOG_DEBUG("Creating Consumer for - " << topicName << " - " << consumerStr_); - consumer->start(); - - } else { - for (int i = 0; i < numPartitions; i++) { - std::string topicPartitionName = topicName->getTopicPartitionName(i); - consumer = std::make_shared(client_, topicPartitionName, subscriptionName_, config, - topicName->isPersistent(), internalListenerExecutor, - true, Partitioned); - consumer->getConsumerCreatedFuture().addListener(std::bind( - &MultiTopicsConsumerImpl::handleSingleConsumerCreated, shared_from_this(), - std::placeholders::_1, std::placeholders::_2, partitionsNeedCreate, topicSubResultPromise)); - consumer->setPartitionIndex(i); - consumers_.emplace(topicPartitionName, consumer); - LOG_DEBUG("Creating Consumer for - " << topicPartitionName << " - " << consumerStr_); - consumer->start(); - } - } -} - -void MultiTopicsConsumerImpl::handleSingleConsumerCreated( - Result result, ConsumerImplBaseWeakPtr consumerImplBaseWeakPtr, - std::shared_ptr> partitionsNeedCreate, - ConsumerSubResultPromisePtr topicSubResultPromise) { - if (state_ == Failed) { - // one of the consumer creation failed, and we are cleaning up - topicSubResultPromise->setFailed(ResultAlreadyClosed); - LOG_ERROR("Unable to create Consumer " << consumerStr_ << " state == Failed, result: " << result); - return; - } - - int previous = partitionsNeedCreate->fetch_sub(1); - assert(previous > 0); - - if (result != ResultOk) { - topicSubResultPromise->setFailed(result); - LOG_ERROR("Unable to create Consumer - " << consumerStr_ << " Error - " << result); - return; - } - - LOG_INFO("Successfully Subscribed to a single partition of topic in TopicsConsumer. " - << "Partitions need to create : " << previous - 1); - - if (partitionsNeedCreate->load() == 0) { - if (partitionsUpdateTimer_) { - runPartitionUpdateTask(); - } - topicSubResultPromise->setValue(Consumer(shared_from_this())); - } -} - -void MultiTopicsConsumerImpl::unsubscribeAsync(ResultCallback callback) { - LOG_INFO("[ Topics Consumer " << topic_ << "," << subscriptionName_ << "] Unsubscribing"); - - const auto state = state_.load(); - if (state == Closing || state == Closed) { - LOG_INFO(consumerStr_ << " already closed"); - callback(ResultAlreadyClosed); - return; - } - state_ = Closing; - - std::shared_ptr> consumerUnsubed = std::make_shared>(0); - auto self = shared_from_this(); - int numConsumers = 0; - consumers_.forEachValue( - [&numConsumers, &consumerUnsubed, &self, callback](const ConsumerImplPtr& consumer) { - numConsumers++; - consumer->unsubscribeAsync([self, consumerUnsubed, callback](Result result) { - self->handleUnsubscribedAsync(result, consumerUnsubed, callback); - }); - }); - if (numConsumers == 0) { - // No need to unsubscribe, since the list matching the regex was empty - callback(ResultOk); - } -} - -void MultiTopicsConsumerImpl::handleUnsubscribedAsync(Result result, - std::shared_ptr> consumerUnsubed, - ResultCallback callback) { - (*consumerUnsubed)++; - - if (result != ResultOk) { - state_ = Failed; - LOG_ERROR("Error Closing one of the consumers in TopicsConsumer, result: " - << result << " subscription - " << subscriptionName_); - } - - if (consumerUnsubed->load() == numberTopicPartitions_->load()) { - LOG_DEBUG("Unsubscribed all of the partition consumer for TopicsConsumer. - " << consumerStr_); - consumers_.clear(); - topicsPartitions_.clear(); - unAckedMessageTrackerPtr_->clear(); - - Result result1 = (state_ != Failed) ? ResultOk : ResultUnknownError; - state_ = Closed; - callback(result1); - return; - } -} - -void MultiTopicsConsumerImpl::unsubscribeOneTopicAsync(const std::string& topic, ResultCallback callback) { - Lock lock(mutex_); - std::map::iterator it = topicsPartitions_.find(topic); - if (it == topicsPartitions_.end()) { - lock.unlock(); - LOG_ERROR("TopicsConsumer does not subscribe topic : " << topic << " subscription - " - << subscriptionName_); - callback(ResultTopicNotFound); - return; - } - int numberPartitions = it->second; - lock.unlock(); - - const auto state = state_.load(); - if (state == Closing || state == Closed) { - LOG_ERROR("TopicsConsumer already closed when unsubscribe topic: " << topic << " subscription - " - << subscriptionName_); - callback(ResultAlreadyClosed); - return; - } - - TopicNamePtr topicName; - if (!(topicName = TopicName::get(topic))) { - LOG_ERROR("TopicName invalid: " << topic); - callback(ResultUnknownError); - } - std::shared_ptr> consumerUnsubed = std::make_shared>(0); - - for (int i = 0; i < numberPartitions; i++) { - std::string topicPartitionName = topicName->getTopicPartitionName(i); - auto optConsumer = consumers_.find(topicPartitionName); - if (optConsumer.is_empty()) { - LOG_ERROR("TopicsConsumer not subscribed on topicPartitionName: " << topicPartitionName); - callback(ResultUnknownError); - continue; - } - - optConsumer.value()->unsubscribeAsync( - std::bind(&MultiTopicsConsumerImpl::handleOneTopicUnsubscribedAsync, shared_from_this(), - std::placeholders::_1, consumerUnsubed, numberPartitions, topicName, topicPartitionName, - callback)); - } -} - -void MultiTopicsConsumerImpl::handleOneTopicUnsubscribedAsync( - Result result, std::shared_ptr> consumerUnsubed, int numberPartitions, - TopicNamePtr topicNamePtr, std::string& topicPartitionName, ResultCallback callback) { - (*consumerUnsubed)++; - - if (result != ResultOk) { - state_ = Failed; - LOG_ERROR("Error Closing one of the consumers in TopicsConsumer, result: " - << result << " topicPartitionName - " << topicPartitionName); - } - - LOG_DEBUG("Successfully Unsubscribed one Consumer. topicPartitionName - " << topicPartitionName); - - auto optConsumer = consumers_.remove(topicPartitionName); - if (optConsumer.is_present()) { - optConsumer.value()->pauseMessageListener(); - } - - if (consumerUnsubed->load() == numberPartitions) { - LOG_DEBUG("Unsubscribed all of the partition consumer for TopicsConsumer. - " << consumerStr_); - std::map::iterator it = topicsPartitions_.find(topicNamePtr->toString()); - if (it != topicsPartitions_.end()) { - numberTopicPartitions_->fetch_sub(numberPartitions); - Lock lock(mutex_); - topicsPartitions_.erase(it); - lock.unlock(); - } - if (state_ != Failed) { - callback(ResultOk); - } else { - callback(ResultUnknownError); - } - unAckedMessageTrackerPtr_->removeTopicMessage(topicNamePtr->toString()); - return; - } -} - -void MultiTopicsConsumerImpl::closeAsync(ResultCallback callback) { - const auto state = state_.load(); - if (state == Closing || state == Closed) { - LOG_ERROR("TopicsConsumer already closed " - << " topic" << topic_ << " consumer - " << consumerStr_); - if (callback) { - callback(ResultAlreadyClosed); - } - return; - } - - state_ = Closing; - - std::weak_ptr weakSelf{shared_from_this()}; - int numConsumers = 0; - consumers_.clear( - [this, weakSelf, &numConsumers, callback](const std::string& name, const ConsumerImplPtr& consumer) { - auto self = weakSelf.lock(); - if (!self) { - return; - } - numConsumers++; - consumer->closeAsync([this, weakSelf, name, callback](Result result) { - auto self = weakSelf.lock(); - if (!self) { - return; - } - LOG_DEBUG("Closing the consumer for partition - " << name << " numberTopicPartitions_ - " - << numberTopicPartitions_->load()); - const int numConsumersLeft = --*numberTopicPartitions_; - if (numConsumersLeft < 0) { - LOG_ERROR("[" << name << "] Unexpected number of left consumers: " << numConsumersLeft - << " during close"); - return; - } - if (result != ResultOk) { - state_ = Failed; - LOG_ERROR("Closing the consumer failed for partition - " << name << " with error - " - << result); - } - // closed all consumers - if (numConsumersLeft == 0) { - messages_.clear(); - topicsPartitions_.clear(); - unAckedMessageTrackerPtr_->clear(); - - if (state_ != Failed) { - state_ = Closed; - } - - if (callback) { - callback(result); - } - } - }); - }); - if (numConsumers == 0) { - LOG_DEBUG("TopicsConsumer have no consumers to close " - << " topic" << topic_ << " subscription - " << subscriptionName_); - state_ = Closed; - if (callback) { - callback(ResultAlreadyClosed); - } - return; - } - - // fail pending receive - failPendingReceiveCallback(); -} - -void MultiTopicsConsumerImpl::messageReceived(Consumer consumer, const Message& msg) { - LOG_DEBUG("Received Message from one of the topic - " << consumer.getTopic() - << " message:" << msg.getDataAsString()); - const std::string& topicPartitionName = consumer.getTopic(); - msg.impl_->setTopicName(topicPartitionName); - - Lock lock(pendingReceiveMutex_); - if (!pendingReceives_.empty()) { - ReceiveCallback callback = pendingReceives_.front(); - pendingReceives_.pop(); - lock.unlock(); - listenerExecutor_->postWork(std::bind(&MultiTopicsConsumerImpl::notifyPendingReceivedCallback, - shared_from_this(), ResultOk, msg, callback)); - } else { - if (messages_.full()) { - lock.unlock(); - } - - if (messages_.push(msg) && messageListener_) { - listenerExecutor_->postWork( - std::bind(&MultiTopicsConsumerImpl::internalListener, shared_from_this(), consumer)); - } - } -} - -void MultiTopicsConsumerImpl::internalListener(Consumer consumer) { - Message m; - messages_.pop(m); - unAckedMessageTrackerPtr_->add(m.getMessageId()); - try { - messageListener_(Consumer(shared_from_this()), m); - } catch (const std::exception& e) { - LOG_ERROR("Exception thrown from listener of Partitioned Consumer" << e.what()); - } -} - -Result MultiTopicsConsumerImpl::receive(Message& msg) { - if (state_ != Ready) { - return ResultAlreadyClosed; - } - - if (messageListener_) { - LOG_ERROR("Can not receive when a listener has been set"); - return ResultInvalidConfiguration; - } - messages_.pop(msg); - - unAckedMessageTrackerPtr_->add(msg.getMessageId()); - return ResultOk; -} - -Result MultiTopicsConsumerImpl::receive(Message& msg, int timeout) { - if (state_ != Ready) { - return ResultAlreadyClosed; - } - - if (messageListener_) { - LOG_ERROR("Can not receive when a listener has been set"); - return ResultInvalidConfiguration; - } - - if (messages_.pop(msg, std::chrono::milliseconds(timeout))) { - unAckedMessageTrackerPtr_->add(msg.getMessageId()); - return ResultOk; - } else { - if (state_ != Ready) { - return ResultAlreadyClosed; - } - return ResultTimeout; - } -} - -void MultiTopicsConsumerImpl::receiveAsync(ReceiveCallback& callback) { - Message msg; - - // fail the callback if consumer is closing or closed - if (state_ != Ready) { - callback(ResultAlreadyClosed, msg); - return; - } - - Lock lock(pendingReceiveMutex_); - if (messages_.pop(msg, std::chrono::milliseconds(0))) { - lock.unlock(); - unAckedMessageTrackerPtr_->add(msg.getMessageId()); - callback(ResultOk, msg); - } else { - pendingReceives_.push(callback); - } -} - -void MultiTopicsConsumerImpl::failPendingReceiveCallback() { - Message msg; - - messages_.close(); - - Lock lock(pendingReceiveMutex_); - while (!pendingReceives_.empty()) { - ReceiveCallback callback = pendingReceives_.front(); - pendingReceives_.pop(); - listenerExecutor_->postWork(std::bind(&MultiTopicsConsumerImpl::notifyPendingReceivedCallback, - shared_from_this(), ResultAlreadyClosed, msg, callback)); - } - lock.unlock(); -} - -void MultiTopicsConsumerImpl::notifyPendingReceivedCallback(Result result, Message& msg, - const ReceiveCallback& callback) { - if (result == ResultOk) { - unAckedMessageTrackerPtr_->add(msg.getMessageId()); - } - callback(result, msg); -} - -void MultiTopicsConsumerImpl::acknowledgeAsync(const MessageId& msgId, ResultCallback callback) { - if (state_ != Ready) { - callback(ResultAlreadyClosed); - return; - } - - const std::string& topicPartitionName = msgId.getTopicName(); - auto optConsumer = consumers_.find(topicPartitionName); - - if (optConsumer.is_present()) { - unAckedMessageTrackerPtr_->remove(msgId); - optConsumer.value()->acknowledgeAsync(msgId, callback); - } else { - LOG_ERROR("Message of topic: " << topicPartitionName << " not in unAckedMessageTracker"); - callback(ResultUnknownError); - } -} - -void MultiTopicsConsumerImpl::acknowledgeCumulativeAsync(const MessageId& msgId, ResultCallback callback) { - callback(ResultOperationNotSupported); -} - -void MultiTopicsConsumerImpl::negativeAcknowledge(const MessageId& msgId) { - auto optConsumer = consumers_.find(msgId.getTopicName()); - - if (optConsumer.is_present()) { - unAckedMessageTrackerPtr_->remove(msgId); - optConsumer.value()->negativeAcknowledge(msgId); - } -} - -MultiTopicsConsumerImpl::~MultiTopicsConsumerImpl() {} - -Future MultiTopicsConsumerImpl::getConsumerCreatedFuture() { - return multiTopicsConsumerCreatedPromise_.getFuture(); -} -const std::string& MultiTopicsConsumerImpl::getSubscriptionName() const { return subscriptionName_; } - -const std::string& MultiTopicsConsumerImpl::getTopic() const { return topic_; } - -const std::string& MultiTopicsConsumerImpl::getName() const { return consumerStr_; } - -void MultiTopicsConsumerImpl::shutdown() {} - -bool MultiTopicsConsumerImpl::isClosed() { return state_ == Closed; } - -bool MultiTopicsConsumerImpl::isOpen() { return state_ == Ready; } - -void MultiTopicsConsumerImpl::receiveMessages() { - const auto receiverQueueSize = conf_.getReceiverQueueSize(); - consumers_.forEachValue([receiverQueueSize](const ConsumerImplPtr& consumer) { - consumer->sendFlowPermitsToBroker(consumer->getCnx().lock(), receiverQueueSize); - LOG_DEBUG("Sending FLOW command for consumer - " << consumer->getConsumerId()); - }); -} - -Result MultiTopicsConsumerImpl::pauseMessageListener() { - if (!messageListener_) { - return ResultInvalidConfiguration; - } - consumers_.forEachValue([](const ConsumerImplPtr& consumer) { consumer->pauseMessageListener(); }); - return ResultOk; -} - -Result MultiTopicsConsumerImpl::resumeMessageListener() { - if (!messageListener_) { - return ResultInvalidConfiguration; - } - consumers_.forEachValue([](const ConsumerImplPtr& consumer) { consumer->resumeMessageListener(); }); - return ResultOk; -} - -void MultiTopicsConsumerImpl::redeliverUnacknowledgedMessages() { - LOG_DEBUG("Sending RedeliverUnacknowledgedMessages command for partitioned consumer."); - consumers_.forEachValue( - [](const ConsumerImplPtr& consumer) { consumer->redeliverUnacknowledgedMessages(); }); - unAckedMessageTrackerPtr_->clear(); -} - -void MultiTopicsConsumerImpl::redeliverUnacknowledgedMessages(const std::set& messageIds) { - if (messageIds.empty()) { - return; - } - if (conf_.getConsumerType() != ConsumerShared && conf_.getConsumerType() != ConsumerKeyShared) { - redeliverUnacknowledgedMessages(); - return; - } - LOG_DEBUG("Sending RedeliverUnacknowledgedMessages command for partitioned consumer."); - consumers_.forEachValue([&messageIds](const ConsumerImplPtr& consumer) { - consumer->redeliverUnacknowledgedMessages(messageIds); - }); -} - -int MultiTopicsConsumerImpl::getNumOfPrefetchedMessages() const { return messages_.size(); } - -void MultiTopicsConsumerImpl::getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback) { - if (state_ != Ready) { - callback(ResultConsumerNotInitialized, BrokerConsumerStats()); - return; - } - Lock lock(mutex_); - MultiTopicsBrokerConsumerStatsPtr statsPtr = - std::make_shared(numberTopicPartitions_->load()); - LatchPtr latchPtr = std::make_shared(numberTopicPartitions_->load()); - lock.unlock(); - - auto self = shared_from_this(); - size_t i = 0; - consumers_.forEachValue([&self, &latchPtr, &statsPtr, &i, callback](const ConsumerImplPtr& consumer) { - size_t index = i++; - consumer->getBrokerConsumerStatsAsync( - [self, latchPtr, statsPtr, index, callback](Result result, BrokerConsumerStats stats) { - self->handleGetConsumerStats(result, stats, latchPtr, statsPtr, index, callback); - }); - }); -} - -void MultiTopicsConsumerImpl::handleGetConsumerStats(Result res, BrokerConsumerStats brokerConsumerStats, - LatchPtr latchPtr, - MultiTopicsBrokerConsumerStatsPtr statsPtr, size_t index, - BrokerConsumerStatsCallback callback) { - Lock lock(mutex_); - if (res == ResultOk) { - latchPtr->countdown(); - statsPtr->add(brokerConsumerStats, index); - } else { - lock.unlock(); - callback(res, BrokerConsumerStats()); - return; - } - if (latchPtr->getCount() == 0) { - lock.unlock(); - callback(ResultOk, BrokerConsumerStats(statsPtr)); - } -} - -std::shared_ptr MultiTopicsConsumerImpl::topicNamesValid(const std::vector& topics) { - TopicNamePtr topicNamePtr = std::shared_ptr(); - - // all topics name valid, and all topics have same namespace - for (std::vector::const_iterator itr = topics.begin(); itr != topics.end(); itr++) { - // topic name valid - if (!(topicNamePtr = TopicName::get(*itr))) { - LOG_ERROR("Topic name invalid when init " << *itr); - return std::shared_ptr(); - } - } - - return topicNamePtr; -} - -void MultiTopicsConsumerImpl::seekAsync(const MessageId& msgId, ResultCallback callback) { - callback(ResultOperationNotSupported); -} - -void MultiTopicsConsumerImpl::seekAsync(uint64_t timestamp, ResultCallback callback) { - if (state_ != Ready) { - callback(ResultAlreadyClosed); - return; - } - - MultiResultCallback multiResultCallback(callback, consumers_.size()); - consumers_.forEachValue([×tamp, &multiResultCallback](ConsumerImplPtr consumer) { - consumer->seekAsync(timestamp, multiResultCallback); - }); -} - -void MultiTopicsConsumerImpl::setNegativeAcknowledgeEnabledForTesting(bool enabled) { - consumers_.forEachValue([enabled](const ConsumerImplPtr& consumer) { - consumer->setNegativeAcknowledgeEnabledForTesting(enabled); - }); -} - -bool MultiTopicsConsumerImpl::isConnected() const { - if (state_ != Ready) { - return false; - } - - return consumers_ - .findFirstValueIf([](const ConsumerImplPtr& consumer) { return !consumer->isConnected(); }) - .is_empty(); -} - -uint64_t MultiTopicsConsumerImpl::getNumberOfConnectedConsumer() { - uint64_t numberOfConnectedConsumer = 0; - consumers_.forEachValue([&numberOfConnectedConsumer](const ConsumerImplPtr& consumer) { - if (consumer->isConnected()) { - numberOfConnectedConsumer++; - } - }); - return numberOfConnectedConsumer; -} -void MultiTopicsConsumerImpl::runPartitionUpdateTask() { - partitionsUpdateTimer_->expires_from_now(partitionsUpdateInterval_); - std::weak_ptr weakSelf{shared_from_this()}; - partitionsUpdateTimer_->async_wait([weakSelf](const boost::system::error_code& ec) { - // If two requests call runPartitionUpdateTask at the same time, the timer will fail, and it - // cannot continue at this time, and the request needs to be ignored. - auto self = weakSelf.lock(); - if (self && !ec) { - self->topicPartitionUpdate(); - } - }); -} -void MultiTopicsConsumerImpl::topicPartitionUpdate() { - using namespace std::placeholders; - Lock lock(mutex_); - auto topicsPartitions = topicsPartitions_; - lock.unlock(); - for (const auto& item : topicsPartitions) { - auto topicName = TopicName::get(item.first); - auto currentNumPartitions = item.second; - lookupServicePtr_->getPartitionMetadataAsync(topicName).addListener( - std::bind(&MultiTopicsConsumerImpl::handleGetPartitions, shared_from_this(), topicName, - std::placeholders::_1, std::placeholders::_2, currentNumPartitions)); - } -} -void MultiTopicsConsumerImpl::handleGetPartitions(TopicNamePtr topicName, Result result, - const LookupDataResultPtr& lookupDataResult, - int currentNumPartitions) { - if (state_ != Ready) { - return; - } - if (!result) { - const auto newNumPartitions = static_cast(lookupDataResult->getPartitions()); - if (newNumPartitions > currentNumPartitions) { - LOG_INFO("new partition count: " << newNumPartitions - << " current partition count: " << currentNumPartitions); - auto partitionsNeedCreate = - std::make_shared>(newNumPartitions - currentNumPartitions); - ConsumerSubResultPromisePtr topicPromise = std::make_shared>(); - Lock lock(mutex_); - topicsPartitions_[topicName->toString()] = newNumPartitions; - lock.unlock(); - numberTopicPartitions_->fetch_add(newNumPartitions - currentNumPartitions); - for (unsigned int i = currentNumPartitions; i < newNumPartitions; i++) { - subscribeSingleNewConsumer(newNumPartitions, topicName, i, topicPromise, - partitionsNeedCreate); - } - // `runPartitionUpdateTask()` will be called in `handleSingleConsumerCreated()` - return; - } - } else { - LOG_WARN("Failed to getPartitionMetadata: " << strResult(result)); - } - runPartitionUpdateTask(); -} - -void MultiTopicsConsumerImpl::subscribeSingleNewConsumer( - int numPartitions, TopicNamePtr topicName, int partitionIndex, - ConsumerSubResultPromisePtr topicSubResultPromise, - std::shared_ptr> partitionsNeedCreate) { - ConsumerConfiguration config = conf_.clone(); - ExecutorServicePtr internalListenerExecutor = client_->getPartitionListenerExecutorProvider()->get(); - config.setMessageListener(std::bind(&MultiTopicsConsumerImpl::messageReceived, shared_from_this(), - std::placeholders::_1, std::placeholders::_2)); - - // Apply total limit of receiver queue size across partitions - config.setReceiverQueueSize( - std::min(conf_.getReceiverQueueSize(), - (int)(conf_.getMaxTotalReceiverQueueSizeAcrossPartitions() / numPartitions))); - - std::string topicPartitionName = topicName->getTopicPartitionName(partitionIndex); - - auto consumer = std::make_shared(client_, topicPartitionName, subscriptionName_, config, - topicName->isPersistent(), internalListenerExecutor, true, - Partitioned); - consumer->getConsumerCreatedFuture().addListener( - std::bind(&MultiTopicsConsumerImpl::handleSingleConsumerCreated, shared_from_this(), - std::placeholders::_1, std::placeholders::_2, partitionsNeedCreate, topicSubResultPromise)); - consumer->setPartitionIndex(partitionIndex); - consumer->start(); - consumers_.emplace(topicPartitionName, consumer); - LOG_INFO("Add Creating Consumer for - " << topicPartitionName << " - " << consumerStr_ - << " consumerSize: " << consumers_.size()); -} diff --git a/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.h b/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.h deleted file mode 100644 index 8769d59b9908e..0000000000000 --- a/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.h +++ /dev/null @@ -1,162 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_MULTI_TOPICS_CONSUMER_HEADER -#define PULSAR_MULTI_TOPICS_CONSUMER_HEADER -#include "lib/TestUtil.h" -#include "ConsumerImpl.h" -#include "ClientImpl.h" -#include "BlockingQueue.h" -#include -#include -#include - -#include "ConsumerImplBase.h" -#include "lib/UnAckedMessageTrackerDisabled.h" -#include -#include -#include -#include -#include - -namespace pulsar { -typedef std::shared_ptr> ConsumerSubResultPromisePtr; - -class MultiTopicsConsumerImpl; -class MultiTopicsConsumerImpl : public ConsumerImplBase, - public std::enable_shared_from_this { - public: - enum MultiTopicsConsumerState - { - Pending, - Ready, - Closing, - Closed, - Failed - }; - MultiTopicsConsumerImpl(ClientImplPtr client, const std::vector& topics, - const std::string& subscriptionName, TopicNamePtr topicName, - const ConsumerConfiguration& conf, LookupServicePtr lookupServicePtr_); - MultiTopicsConsumerImpl(ClientImplPtr client, TopicNamePtr topicName, int numPartitions, - const std::string& subscriptionName, const ConsumerConfiguration& conf, - LookupServicePtr lookupServicePtr) - : MultiTopicsConsumerImpl(client, {topicName->toString()}, subscriptionName, topicName, conf, - lookupServicePtr) { - topicsPartitions_[topicName->toString()] = numPartitions; - } - ~MultiTopicsConsumerImpl(); - // overrided methods from ConsumerImplBase - Future getConsumerCreatedFuture() override; - const std::string& getSubscriptionName() const override; - const std::string& getTopic() const override; - Result receive(Message& msg) override; - Result receive(Message& msg, int timeout) override; - void receiveAsync(ReceiveCallback& callback) override; - void unsubscribeAsync(ResultCallback callback) override; - void acknowledgeAsync(const MessageId& msgId, ResultCallback callback) override; - void acknowledgeCumulativeAsync(const MessageId& msgId, ResultCallback callback) override; - void closeAsync(ResultCallback callback) override; - void start() override; - void shutdown() override; - bool isClosed() override; - bool isOpen() override; - Result pauseMessageListener() override; - Result resumeMessageListener() override; - void redeliverUnacknowledgedMessages() override; - void redeliverUnacknowledgedMessages(const std::set& messageIds) override; - const std::string& getName() const override; - int getNumOfPrefetchedMessages() const override; - void getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback) override; - void seekAsync(const MessageId& msgId, ResultCallback callback) override; - void seekAsync(uint64_t timestamp, ResultCallback callback) override; - void negativeAcknowledge(const MessageId& msgId) override; - bool isConnected() const override; - uint64_t getNumberOfConnectedConsumer() override; - - void handleGetConsumerStats(Result, BrokerConsumerStats, LatchPtr, MultiTopicsBrokerConsumerStatsPtr, - size_t, BrokerConsumerStatsCallback); - // return first topic name when all topics name valid, or return null pointer - static std::shared_ptr topicNamesValid(const std::vector& topics); - void unsubscribeOneTopicAsync(const std::string& topic, ResultCallback callback); - Future subscribeOneTopicAsync(const std::string& topic); - - protected: - const ClientImplPtr client_; - const std::string subscriptionName_; - std::string consumerStr_; - std::string topic_; - const ConsumerConfiguration conf_; - typedef SynchronizedHashMap ConsumerMap; - ConsumerMap consumers_; - std::map topicsPartitions_; - mutable std::mutex mutex_; - std::mutex pendingReceiveMutex_; - std::atomic state_{Pending}; - BlockingQueue messages_; - const ExecutorServicePtr listenerExecutor_; - MessageListener messageListener_; - DeadlineTimerPtr partitionsUpdateTimer_; - boost::posix_time::time_duration partitionsUpdateInterval_; - LookupServicePtr lookupServicePtr_; - std::shared_ptr> numberTopicPartitions_; - std::atomic failedResult{ResultOk}; - Promise multiTopicsConsumerCreatedPromise_; - UnAckedMessageTrackerPtr unAckedMessageTrackerPtr_; - const std::vector topics_; - std::queue pendingReceives_; - - /* methods */ - void handleSinglePartitionConsumerCreated(Result result, ConsumerImplBaseWeakPtr consumerImplBaseWeakPtr, - unsigned int partitionIndex); - void notifyResult(CloseCallback closeCallback); - void messageReceived(Consumer consumer, const Message& msg); - void internalListener(Consumer consumer); - void receiveMessages(); - void failPendingReceiveCallback(); - void notifyPendingReceivedCallback(Result result, Message& message, const ReceiveCallback& callback); - - void handleOneTopicSubscribed(Result result, Consumer consumer, const std::string& topic, - std::shared_ptr> topicsNeedCreate); - void subscribeTopicPartitions(int numPartitions, TopicNamePtr topicName, const std::string& consumerName, - ConsumerSubResultPromisePtr topicSubResultPromise); - void handleSingleConsumerCreated(Result result, ConsumerImplBaseWeakPtr consumerImplBaseWeakPtr, - std::shared_ptr> partitionsNeedCreate, - ConsumerSubResultPromisePtr topicSubResultPromise); - void handleUnsubscribedAsync(Result result, std::shared_ptr> consumerUnsubed, - ResultCallback callback); - void handleOneTopicUnsubscribedAsync(Result result, std::shared_ptr> consumerUnsubed, - int numberPartitions, TopicNamePtr topicNamePtr, - std::string& topicPartitionName, ResultCallback callback); - void runPartitionUpdateTask(); - void topicPartitionUpdate(); - void handleGetPartitions(TopicNamePtr topicName, Result result, - const LookupDataResultPtr& lookupDataResult, int currentNumPartitions); - void subscribeSingleNewConsumer(int numPartitions, TopicNamePtr topicName, int partitionIndex, - ConsumerSubResultPromisePtr topicSubResultPromise, - std::shared_ptr> partitionsNeedCreate); - - private: - void setNegativeAcknowledgeEnabledForTesting(bool enabled) override; - - FRIEND_TEST(ConsumerTest, testMultiTopicsConsumerUnAckedMessageRedelivery); - FRIEND_TEST(ConsumerTest, testPartitionedConsumerUnAckedMessageRedelivery); -}; - -typedef std::shared_ptr MultiTopicsConsumerImplPtr; -} // namespace pulsar -#endif // PULSAR_MULTI_TOPICS_CONSUMER_HEADER diff --git a/pulsar-client-cpp/lib/Murmur3_32Hash.cc b/pulsar-client-cpp/lib/Murmur3_32Hash.cc deleted file mode 100644 index 1b214dd8607b2..0000000000000 --- a/pulsar-client-cpp/lib/Murmur3_32Hash.cc +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -//----------------------------------------------------------------------------- -// The original MurmurHash3 was written by Austin Appleby, and is placed in the -// public domain. This source code, implemented by Licht Takeuchi, is based on -// the orignal MurmurHash3 source code. -#include "Murmur3_32Hash.h" - -#include -#if BOOST_VERSION >= 105500 -#include -#else -#include -#endif -#include - -#if BOOST_COMP_MSVC -#include -#define ROTATE_LEFT(x, y) _rotl(x, y) -#else -#define ROTATE_LEFT(x, y) rotate_left(x, y) -#endif - -#if defined(BOOST_ENDIAN_LITTLE_BYTE) || defined(BOOST_LITTLE_ENDIAN) -#define BYTESPWAP(x) (x) -#elif defined(BOOST_ENDIAN_BIG_BYTE) || defined(BOOST_BIG_ENDIAN) -#if BOOST_COMP_CLANG || BOOST_COMP_GNUC -#define BYTESPWAP(x) __builtin_bswap32(x) -#elif BOOST_COMP_MSVC -#define BYTESPWAP(x) _byteswap_uint32(x) -#else -#error "No BOOST_COMP_XXX macro found" -#endif -#else -#error "No byte order found" -#endif - -#define MACRO_CHUNK_SIZE 4 -#define MACRO_C1 0xcc9e2d51U -#define MACRO_C2 0x1b873593U - -namespace pulsar { - -Murmur3_32Hash::Murmur3_32Hash() : seed(0) {} - -int32_t Murmur3_32Hash::makeHash(const std::string &key) { - return static_cast(makeHash(&key.front(), key.length()) & std::numeric_limits::max()); -} - -uint32_t Murmur3_32Hash::makeHash(const void *key, const int64_t len) { - const uint8_t *data = reinterpret_cast(key); - const int nblocks = len / MACRO_CHUNK_SIZE; - uint32_t h1 = seed; - const uint32_t *blocks = reinterpret_cast(data + nblocks * MACRO_CHUNK_SIZE); - - for (int i = -nblocks; i != 0; i++) { - uint32_t k1 = BYTESPWAP(blocks[i]); - - k1 = mixK1(k1); - h1 = mixH1(h1, k1); - } - - const uint8_t *tail = reinterpret_cast(data + nblocks * MACRO_CHUNK_SIZE); - uint32_t k1 = 0; - switch (len - nblocks * MACRO_CHUNK_SIZE) { - case 3: - k1 ^= static_cast(tail[2]) << 16; - case 2: - k1 ^= static_cast(tail[1]) << 8; - case 1: - k1 ^= static_cast(tail[0]); - }; - - h1 ^= mixK1(k1); - h1 ^= len; - h1 = fmix(h1); - - return h1; -} - -uint32_t Murmur3_32Hash::fmix(uint32_t h) { - h ^= h >> 16; - h *= 0x85ebca6b; - h ^= h >> 13; - h *= 0xc2b2ae35; - h ^= h >> 16; - - return h; -} - -uint32_t Murmur3_32Hash::mixK1(uint32_t k1) { - k1 *= MACRO_C1; - k1 = ROTATE_LEFT(k1, 15); - k1 *= MACRO_C2; - return k1; -} - -uint32_t Murmur3_32Hash::mixH1(uint32_t h1, uint32_t k1) { - h1 ^= k1; - h1 = ROTATE_LEFT(h1, 13); - return h1 * 5 + 0xe6546b64; -} - -uint32_t Murmur3_32Hash::rotate_left(uint32_t x, uint8_t r) { return (x << r) | (x >> ((32 - r))); } -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Murmur3_32Hash.h b/pulsar-client-cpp/lib/Murmur3_32Hash.h deleted file mode 100644 index 50e6f16446a92..0000000000000 --- a/pulsar-client-cpp/lib/Murmur3_32Hash.h +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -//----------------------------------------------------------------------------- -// The original MurmurHash3 was written by Austin Appleby, and is placed in the -// public domain. This source code, implemented by Licht Takeuchi, is based on -// the orignal MurmurHash3 source code. -#ifndef MURMUR3_32_HASH_HPP_ -#define MURMUR3_32_HASH_HPP_ - -#include -#include "Hash.h" - -#include -#include - -namespace pulsar { - -class PULSAR_PUBLIC Murmur3_32Hash : public Hash { - public: - Murmur3_32Hash(); - - int32_t makeHash(const std::string& key); - - private: - uint32_t seed; - - static uint32_t fmix(uint32_t h); - static uint32_t mixK1(uint32_t k1); - static uint32_t mixH1(uint32_t h1, uint32_t k1); - static uint32_t rotate_left(uint32_t x, uint8_t r); - uint32_t makeHash(const void* key, const int64_t len); -}; -} // namespace pulsar - -#endif /* MURMUR3_32_HASH_HPP_ */ diff --git a/pulsar-client-cpp/lib/NamedEntity.cc b/pulsar-client-cpp/lib/NamedEntity.cc deleted file mode 100644 index ad7c385c6fe43..0000000000000 --- a/pulsar-client-cpp/lib/NamedEntity.cc +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "NamedEntity.h" - -bool NamedEntity::checkName(const std::string& name) { - for (char c : name) { - switch (c) { - case '=': - case ':': - case ' ': - case '!': - case '\t': - case '\r': - case '\n': - return false; - default: - break; - } - } - - return true; -} diff --git a/pulsar-client-cpp/lib/NamedEntity.h b/pulsar-client-cpp/lib/NamedEntity.h deleted file mode 100644 index 14b73d6069ca7..0000000000000 --- a/pulsar-client-cpp/lib/NamedEntity.h +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include - -class NamedEntity { - public: - static bool checkName(const std::string& name); -}; diff --git a/pulsar-client-cpp/lib/NamespaceName.cc b/pulsar-client-cpp/lib/NamespaceName.cc deleted file mode 100644 index 02bde00b27e26..0000000000000 --- a/pulsar-client-cpp/lib/NamespaceName.cc +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "NamespaceName.h" -#include "NamedEntity.h" -#include "LogUtils.h" - -#include -#include -#include -#include -#include - -DECLARE_LOG_OBJECT() -namespace pulsar { - -std::shared_ptr NamespaceName::get(const std::string& property, const std::string& cluster, - const std::string& namespaceName) { - if (validateNamespace(property, cluster, namespaceName)) { - std::shared_ptr ptr(new NamespaceName(property, cluster, namespaceName)); - return ptr; - } else { - LOG_DEBUG("Returning a null NamespaceName object"); - return std::shared_ptr(); - } -} - -NamespaceName::NamespaceName(const std::string& property, const std::string& cluster, - const std::string& namespaceName) { - std::ostringstream oss; - oss << property << "/" << cluster << "/" << namespaceName; - this->namespace_ = oss.str(); - this->property_ = property; - this->cluster_ = cluster; - this->localName_ = namespaceName; -} - -bool NamespaceName::validateNamespace(const std::string& property, const std::string& cluster, - const std::string& namespaceName) { - if (!property.empty() && !cluster.empty() && !namespaceName.empty()) { - return NamedEntity::checkName(property) && NamedEntity::checkName(cluster) && - NamedEntity::checkName(namespaceName); - } else { - LOG_DEBUG("Empty parameters passed for validating namespace"); - return false; - } -} - -std::shared_ptr NamespaceName::get(const std::string& property, - const std::string& namespaceName) { - if (validateNamespace(property, namespaceName)) { - std::shared_ptr ptr(new NamespaceName(property, namespaceName)); - return ptr; - } else { - LOG_DEBUG("Returning a null NamespaceName object"); - return std::shared_ptr(); - } -} - -NamespaceName::NamespaceName(const std::string& property, const std::string& namespaceName) { - std::ostringstream oss; - oss << property << "/" << namespaceName; - this->namespace_ = oss.str(); - this->property_ = property; - this->localName_ = namespaceName; -} - -bool NamespaceName::validateNamespace(const std::string& property, const std::string& namespaceName) { - if (!property.empty() && !namespaceName.empty()) { - return NamedEntity::checkName(property) && NamedEntity::checkName(namespaceName); - } else { - LOG_DEBUG("Empty parameters passed for validating namespace"); - return false; - } -} - -std::shared_ptr NamespaceName::getNamespaceObject() { - return std::shared_ptr(this); -} - -bool NamespaceName::operator==(const NamespaceName& namespaceName) { - return this->namespace_.compare(namespaceName.namespace_) == 0; -} - -std::string NamespaceName::getProperty() { return this->property_; } - -std::string NamespaceName::getCluster() { return this->cluster_; } - -std::string NamespaceName::getLocalName() { return this->localName_; } - -bool NamespaceName::isV2() { return this->cluster_.empty(); } - -std::string NamespaceName::toString() { return this->namespace_; } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/NamespaceName.h b/pulsar-client-cpp/lib/NamespaceName.h deleted file mode 100644 index 86ffc2f4ca1a7..0000000000000 --- a/pulsar-client-cpp/lib/NamespaceName.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _PULSAR_NAMESPACE_NAME_HEADER_ -#define _PULSAR_NAMESPACE_NAME_HEADER_ - -#include -#include "ServiceUnitId.h" - -#include -#include - -namespace pulsar { - -class PULSAR_PUBLIC NamespaceName : public ServiceUnitId { - public: - std::shared_ptr getNamespaceObject(); - std::string getProperty(); - std::string getCluster(); - std::string getLocalName(); - static std::shared_ptr get(const std::string& property, const std::string& cluster, - const std::string& namespaceName); - static std::shared_ptr get(const std::string& property, const std::string& namespaceName); - bool operator==(const NamespaceName& namespaceName); - bool isV2(); - std::string toString(); - - private: - std::string namespace_; - std::string property_; - std::string cluster_; - std::string localName_; - static bool validateNamespace(const std::string& property, const std::string& cluster, - const std::string& namespace_); - static bool validateNamespace(const std::string& property, const std::string& namespace_); - NamespaceName(const std::string& property, const std::string& cluster, const std::string& namespace_); - NamespaceName(const std::string& property, const std::string& namespace_); -}; - -typedef std::shared_ptr NamespaceNamePtr; - -} // namespace pulsar - -#endif diff --git a/pulsar-client-cpp/lib/NegativeAcksTracker.cc b/pulsar-client-cpp/lib/NegativeAcksTracker.cc deleted file mode 100644 index 8e501dc4f4ae9..0000000000000 --- a/pulsar-client-cpp/lib/NegativeAcksTracker.cc +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "NegativeAcksTracker.h" - -#include "ConsumerImpl.h" - -#include -#include - -#include "LogUtils.h" -DECLARE_LOG_OBJECT() - -namespace pulsar { - -NegativeAcksTracker::NegativeAcksTracker(ClientImplPtr client, ConsumerImpl &consumer, - const ConsumerConfiguration &conf) - : consumer_(consumer), - timerInterval_(0), - executor_(client->getIOExecutorProvider()->get()), - enabledForTesting_(true) { - static const long MIN_NACK_DELAY_MILLIS = 100; - - nackDelay_ = - std::chrono::milliseconds(std::max(conf.getNegativeAckRedeliveryDelayMs(), MIN_NACK_DELAY_MILLIS)); - timerInterval_ = boost::posix_time::milliseconds((long)(nackDelay_.count() / 3)); - LOG_DEBUG("Created negative ack tracker with delay: " << nackDelay_.count() - << " ms - Timer interval: " << timerInterval_); -} - -void NegativeAcksTracker::scheduleTimer() { - timer_ = executor_->createDeadlineTimer(); - timer_->expires_from_now(timerInterval_); - timer_->async_wait(std::bind(&NegativeAcksTracker::handleTimer, this, std::placeholders::_1)); -} - -void NegativeAcksTracker::handleTimer(const boost::system::error_code &ec) { - if (ec) { - // Ignore cancelled events - return; - } - - std::lock_guard lock(mutex_); - timer_ = nullptr; - - if (nackedMessages_.empty() || !enabledForTesting_) { - return; - } - - // Group all the nacked messages into one single re-delivery request - std::set messagesToRedeliver; - - auto now = Clock::now(); - - for (auto it = nackedMessages_.begin(); it != nackedMessages_.end();) { - if (it->second < now) { - messagesToRedeliver.insert(it->first); - it = nackedMessages_.erase(it); - } else { - ++it; - } - } - - if (!messagesToRedeliver.empty()) { - consumer_.redeliverMessages(messagesToRedeliver); - } - scheduleTimer(); -} - -void NegativeAcksTracker::add(const MessageId &m) { - std::lock_guard lock(mutex_); - - auto now = Clock::now(); - - // Erase batch id to group all nacks from same batch - MessageId batchMessageId = MessageId(m.partition(), m.ledgerId(), m.entryId(), -1); - nackedMessages_[batchMessageId] = now + nackDelay_; - - if (!timer_) { - scheduleTimer(); - } -} - -void NegativeAcksTracker::close() { - std::lock_guard lock(mutex_); - - if (timer_) { - boost::system::error_code ec; - timer_->cancel(ec); - } -} - -void NegativeAcksTracker::setEnabledForTesting(bool enabled) { - std::lock_guard lock(mutex_); - enabledForTesting_ = enabled; - - if (enabledForTesting_ && !timer_) { - scheduleTimer(); - } -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/NegativeAcksTracker.h b/pulsar-client-cpp/lib/NegativeAcksTracker.h deleted file mode 100644 index 14762754640c9..0000000000000 --- a/pulsar-client-cpp/lib/NegativeAcksTracker.h +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -#include "ExecutorService.h" -#include "ClientImpl.h" - -#include -#include - -namespace pulsar { - -class NegativeAcksTracker { - public: - NegativeAcksTracker(ClientImplPtr client, ConsumerImpl &consumer, const ConsumerConfiguration &conf); - - NegativeAcksTracker(const NegativeAcksTracker &) = delete; - - NegativeAcksTracker &operator=(const NegativeAcksTracker &) = delete; - - void add(const MessageId &m); - - void close(); - - void setEnabledForTesting(bool enabled); - - private: - void scheduleTimer(); - void handleTimer(const boost::system::error_code &ec); - - ConsumerImpl &consumer_; - std::mutex mutex_; - - std::chrono::milliseconds nackDelay_; - boost::posix_time::milliseconds timerInterval_; - typedef typename std::chrono::steady_clock Clock; - std::map nackedMessages_; - - ExecutorServicePtr executor_; - DeadlineTimerPtr timer_; - bool enabledForTesting_; // to be able to test deterministically -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ObjectPool.h b/pulsar-client-cpp/lib/ObjectPool.h deleted file mode 100644 index 87507a74f9cf4..0000000000000 --- a/pulsar-client-cpp/lib/ObjectPool.h +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_OBJECTPOOL_H_ -#define LIB_OBJECTPOOL_H_ - -#include -#include -#include - -namespace pulsar { - -template -class Allocator { - public: - // Allocator must be stateless, so put everything in this static - class Impl { - public: - // cheap lock to acquire - static std::mutex mutex_; - - // note: use std::forward_list<> when switching to C++11 mode - struct Node { - Node* next; - explicit Node(Node* n) : next(n) {} - }; - Node* head_; - int pushSize_; - - struct GlobalPool { - Node* node_; - int nodeCount_; - GlobalPool* next_; - explicit GlobalPool(GlobalPool* n) : next_(n) {} - }; - static struct GlobalPool* globalPool_; - static int globalNodeCount_; - - Impl(const Impl&); - void operator=(const Impl&); - - void* pop() { - if (!head_) { - // size = 0 - std::lock_guard lock(mutex_); - - if (!globalPool_) { - return NULL; - } - - GlobalPool* poolEntry = globalPool_; - head_ = globalPool_->node_; - pushSize_ += globalPool_->nodeCount_; - globalNodeCount_ -= globalPool_->nodeCount_; - globalPool_ = globalPool_->next_; - delete poolEntry; - } - void* result = head_; - if (result) { - head_ = head_->next; - pushSize_--; - } - return result; - } - - bool push(void* p) { - // Once thread specific entries reaches 10% of max size, push them to GlobalPool - if (pushSize_ >= MaxSize * 0.1) { - bool deleteList = true; - { - // Move the entries to global pool - std::lock_guard lock(mutex_); - - // If total node count reached max allowed cache limit, - // skip adding to global pool. - if ((globalNodeCount_ + pushSize_) <= MaxSize) { - deleteList = false; - - globalPool_ = new GlobalPool(globalPool_); - globalPool_->node_ = head_; - globalPool_->nodeCount_ = pushSize_; - globalNodeCount_ += pushSize_; - } - } - if (deleteList) { - pushSize_ = 0; - deleteLinkedList(head_); - } - head_ = new (p) Node(0); - pushSize_ = 1; - return true; - } - - head_ = new (p) Node(head_); - pushSize_++; - return true; - } - - static void deleteLinkedList(Node* head) { - Node* n = head; - while (n) { - void* p = n; - n = n->next; - ::operator delete(p); - } - } - - public: - Impl() { - pushSize_ = 0; - head_ = 0; - } - - ~Impl() { - // No need for mutex for pop - deleteLinkedList(head_); - } - - void* allocate() { - void* result = pop(); - if (!result) { - result = ::operator new(std::max(sizeof(T), sizeof(Node))); - } - return result; - } - - void deallocate(void* p) { - if (!push(p)) { - ::operator delete(p); - } - } - }; - - static thread_local std::unique_ptr implPtr_; - typedef T value_type; - typedef size_t size_type; - typedef T* pointer; - typedef const void* const_pointer; - - Allocator() {} - - Allocator(const Allocator& /*other*/) {} - - template - Allocator(const Allocator& /*other*/) {} - - pointer allocate(size_type n, const void* /*hint*/ = 0) { - Impl* impl = implPtr_.get(); - if (!impl) { - implPtr_.reset(new Impl); - impl = implPtr_.get(); - } - void* p = (n == 1) ? impl->allocate() : operator new(n * sizeof(T)); - return static_cast(p); - } - - void deallocate(pointer ptr, size_type n) { - Impl* impl = implPtr_.get(); - if (!impl) { - implPtr_.reset(new Impl); - impl = implPtr_.get(); - } - if (n == 1) - impl->deallocate(ptr); - else - ::operator delete(ptr); - } - - template - struct rebind { - typedef Allocator other; - }; -}; - -// typename Allocator::Impl is important else the compiler -// doesn't understand that it is a type -template -thread_local std::unique_ptr::Impl> Allocator::implPtr_; - -template -std::mutex Allocator::Impl::mutex_; - -template -typename Allocator::Impl::GlobalPool* Allocator::Impl::globalPool_; - -template -int Allocator::Impl::globalNodeCount_; - -template -class ObjectPool { - typedef std::shared_ptr TypeSharedPtr; - - Allocator allocator_; - - public: - ObjectPool() {} - - TypeSharedPtr create() { return std::allocate_shared(allocator_); } - - ~ObjectPool() { - struct Allocator::Impl::GlobalPool* poolEntry = - Allocator::Impl::globalPool_; - while (poolEntry) { - Allocator::Impl::deleteLinkedList(poolEntry->node_); - struct Allocator::Impl::GlobalPool* delEntry = poolEntry; - poolEntry = poolEntry->next_; - ::operator delete(delEntry); - } - } - - private: - ObjectPool(const ObjectPool&); - ObjectPool& operator=(const ObjectPool&); -}; -} // namespace pulsar -#endif /* LIB_OBJECTPOOL_H_ */ diff --git a/pulsar-client-cpp/lib/OpSendMsg.h b/pulsar-client-cpp/lib/OpSendMsg.h deleted file mode 100644 index 365301be4ea95..0000000000000 --- a/pulsar-client-cpp/lib/OpSendMsg.h +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_OPSENDMSG_H_ -#define LIB_OPSENDMSG_H_ - -#include -#include -#include - -#include "TimeUtils.h" -#include "MessageImpl.h" - -namespace pulsar { - -struct OpSendMsg { - proto::MessageMetadata metadata_; - SharedBuffer payload_; - SendCallback sendCallback_; - uint64_t producerId_; - uint64_t sequenceId_; - boost::posix_time::ptime timeout_; - uint32_t messagesCount_; - uint64_t messagesSize_; - - OpSendMsg() = default; - - OpSendMsg(const proto::MessageMetadata& metadata, const SharedBuffer& payload, - const SendCallback& sendCallback, uint64_t producerId, uint64_t sequenceId, int sendTimeoutMs, - uint32_t messagesCount, uint64_t messagesSize) - : metadata_(metadata), // the copy happens here because OpSendMsg of chunks are constructed with the - // a shared metadata object - payload_(payload), - sendCallback_(sendCallback), - producerId_(producerId), - sequenceId_(sequenceId), - timeout_(TimeUtils::now() + milliseconds(sendTimeoutMs)), - messagesCount_(messagesCount), - messagesSize_(messagesSize) {} - - void complete(Result result, const MessageId& messageId) const { - if (sendCallback_) { - sendCallback_(result, messageId); - } - } -}; - -} // namespace pulsar - -#endif diff --git a/pulsar-client-cpp/lib/PartitionedProducerImpl.cc b/pulsar-client-cpp/lib/PartitionedProducerImpl.cc deleted file mode 100644 index 469ecc9e793b6..0000000000000 --- a/pulsar-client-cpp/lib/PartitionedProducerImpl.cc +++ /dev/null @@ -1,449 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "PartitionedProducerImpl.h" -#include "LogUtils.h" -#include -#include -#include "RoundRobinMessageRouter.h" -#include "SinglePartitionMessageRouter.h" -#include "TopicMetadataImpl.h" - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -const std::string PartitionedProducerImpl::PARTITION_NAME_SUFFIX = "-partition-"; - -PartitionedProducerImpl::PartitionedProducerImpl(ClientImplPtr client, const TopicNamePtr topicName, - const unsigned int numPartitions, - const ProducerConfiguration& config) - : client_(client), - topicName_(topicName), - topic_(topicName_->toString()), - conf_(config), - topicMetadata_(new TopicMetadataImpl(numPartitions)), - flushedPartitions_(0) { - routerPolicy_ = getMessageRouter(); - - int maxPendingMessagesPerPartition = - std::min(config.getMaxPendingMessages(), - (int)(config.getMaxPendingMessagesAcrossPartitions() / numPartitions)); - conf_.setMaxPendingMessages(maxPendingMessagesPerPartition); - - auto partitionsUpdateInterval = static_cast(client_->conf().getPartitionsUpdateInterval()); - if (partitionsUpdateInterval > 0) { - listenerExecutor_ = client_->getListenerExecutorProvider()->get(); - partitionsUpdateTimer_ = listenerExecutor_->createDeadlineTimer(); - partitionsUpdateInterval_ = boost::posix_time::seconds(partitionsUpdateInterval); - lookupServicePtr_ = client_->getLookup(); - } -} - -MessageRoutingPolicyPtr PartitionedProducerImpl::getMessageRouter() { - switch (conf_.getPartitionsRoutingMode()) { - case ProducerConfiguration::RoundRobinDistribution: - return std::make_shared( - conf_.getHashingScheme(), conf_.getBatchingEnabled(), conf_.getBatchingMaxMessages(), - conf_.getBatchingMaxAllowedSizeInBytes(), - boost::posix_time::milliseconds(conf_.getBatchingMaxPublishDelayMs())); - case ProducerConfiguration::CustomPartition: - return conf_.getMessageRouterPtr(); - case ProducerConfiguration::UseSinglePartition: - default: - return std::make_shared(getNumPartitions(), - conf_.getHashingScheme()); - } -} - -PartitionedProducerImpl::~PartitionedProducerImpl() {} -// override -const std::string& PartitionedProducerImpl::getTopic() const { return topic_; } - -unsigned int PartitionedProducerImpl::getNumPartitions() const { - return static_cast(topicMetadata_->getNumPartitions()); -} - -unsigned int PartitionedProducerImpl::getNumPartitionsWithLock() const { - Lock lock(producersMutex_); - return getNumPartitions(); -} - -ProducerImplPtr PartitionedProducerImpl::newInternalProducer(unsigned int partition, bool lazy) { - using namespace std::placeholders; - auto producer = std::make_shared(client_, *topicName_, conf_, partition); - - if (lazy) { - createLazyPartitionProducer(partition); - } else { - producer->getProducerCreatedFuture().addListener( - std::bind(&PartitionedProducerImpl::handleSinglePartitionProducerCreated, - const_cast(this)->shared_from_this(), _1, _2, partition)); - } - - LOG_DEBUG("Creating Producer for single Partition - " << topicName_ << "-partition-" << partition); - return producer; -} - -// override -void PartitionedProducerImpl::start() { - // create producer per partition - // Here we don't need `producersMutex` to protect `producers_`, because `producers_` can only be increased - // when `state_` is Ready - - if (conf_.getLazyStartPartitionedProducers() && conf_.getAccessMode() == ProducerConfiguration::Shared) { - // start one producer now, to ensure authz errors occur now - // if the SinglePartition router is used, then this producer will serve - // all non-keyed messages in the future - Message msg = MessageBuilder().setContent("x").build(); - short partition = (short)(routerPolicy_->getPartition(msg, *topicMetadata_)); - - for (unsigned int i = 0; i < getNumPartitions(); i++) { - bool lazy = (short)i != partition; - producers_.push_back(newInternalProducer(i, lazy)); - } - - producers_[partition]->start(); - } else { - for (unsigned int i = 0; i < getNumPartitions(); i++) { - producers_.push_back(newInternalProducer(i, false)); - } - - for (ProducerList::const_iterator prod = producers_.begin(); prod != producers_.end(); prod++) { - (*prod)->start(); - } - } -} - -void PartitionedProducerImpl::handleSinglePartitionProducerCreated(Result result, - ProducerImplBaseWeakPtr producerWeakPtr, - unsigned int partitionIndex) { - // to indicate, we are doing cleanup using closeAsync after producer create - // has failed and the invocation of closeAsync is not from client - const auto numPartitions = getNumPartitionsWithLock(); - assert(numProducersCreated_ <= numPartitions && partitionIndex <= numPartitions); - - if (state_ == Failed) { - // We have already informed client that producer creation failed - if (++numProducersCreated_ == numPartitions) { - closeAsync(nullptr); - } - return; - } - - if (result != ResultOk) { - LOG_ERROR("Unable to create Producer for partition - " << partitionIndex << " Error - " << result); - partitionedProducerCreatedPromise_.setFailed(result); - state_ = Failed; - if (++numProducersCreated_ == numPartitions) { - closeAsync(nullptr); - } - return; - } - - if (++numProducersCreated_ == numPartitions) { - state_ = Ready; - if (partitionsUpdateTimer_) { - runPartitionUpdateTask(); - } - partitionedProducerCreatedPromise_.setValue(shared_from_this()); - } -} - -void PartitionedProducerImpl::createLazyPartitionProducer(unsigned int partitionIndex) { - const auto numPartitions = getNumPartitions(); - assert(numProducersCreated_ <= numPartitions); - assert(partitionIndex <= numPartitions); - numProducersCreated_++; - if (numProducersCreated_ == numPartitions) { - state_ = Ready; - if (partitionsUpdateTimer_) { - runPartitionUpdateTask(); - } - partitionedProducerCreatedPromise_.setValue(shared_from_this()); - } -} - -// override -void PartitionedProducerImpl::sendAsync(const Message& msg, SendCallback callback) { - if (state_ != Ready) { - callback(ResultAlreadyClosed, msg.getMessageId()); - return; - } - - // get partition for this message from router policy - Lock producersLock(producersMutex_); - short partition = (short)(routerPolicy_->getPartition(msg, *topicMetadata_)); - if (partition >= getNumPartitions() || partition >= producers_.size()) { - LOG_ERROR("Got Invalid Partition for message from Router Policy, Partition - " << partition); - // change me: abort or notify failure in callback? - // change to appropriate error if callback - callback(ResultUnknownError, msg.getMessageId()); - return; - } - // find a producer for that partition, index should start from 0 - ProducerImplPtr producer = producers_[partition]; - - // if the producer is not started (lazy producer), then kick-off the start process - if (!producer->isStarted()) { - producer->start(); - } - - producersLock.unlock(); - - // send message on that partition - producer->sendAsync(msg, callback); -} - -// override -void PartitionedProducerImpl::shutdown() { state_ = Closed; } - -const std::string& PartitionedProducerImpl::getProducerName() const { - Lock producersLock(producersMutex_); - return producers_[0]->getProducerName(); -} - -const std::string& PartitionedProducerImpl::getSchemaVersion() const { - Lock producersLock(producersMutex_); - // Since the schema is atomically assigned on the partitioned-topic, - // it's guaranteed that all the partitions will have the same schema version. - return producers_[0]->getSchemaVersion(); -} - -int64_t PartitionedProducerImpl::getLastSequenceId() const { - int64_t currentMax = -1L; - Lock producersLock(producersMutex_); - for (int i = 0; i < producers_.size(); i++) { - currentMax = std::max(currentMax, producers_[i]->getLastSequenceId()); - } - - return currentMax; -} - -/* - * if createProducerCallback is set, it means the closeAsync is called from CreateProducer API which failed to - * create one or many producers for partitions. So, we have to notify with ERROR on createProducerFailure - */ -void PartitionedProducerImpl::closeAsync(CloseCallback closeCallback) { - if (state_ == Closing || state_ == Closed) { - return; - } - state_ = Closing; - - unsigned int producerAlreadyClosed = 0; - - // Here we don't need `producersMutex` to protect `producers_`, because `producers_` can only be increased - // when `state_` is Ready - for (auto& producer : producers_) { - if (!producer->isClosed()) { - auto self = shared_from_this(); - const auto partition = static_cast(producer->partition()); - producer->closeAsync([this, self, partition, closeCallback](Result result) { - handleSinglePartitionProducerClose(result, partition, closeCallback); - }); - } else { - producerAlreadyClosed++; - } - } - const auto numProducers = producers_.size(); - - /* - * No need to set state since:- - * a. If closeAsync before creation then state == Closed, since producers_.size() = producerAlreadyClosed - * = 0 - * b. If closeAsync called after all sub partitioned producer connected - - * handleSinglePartitionProducerClose handles the closing - * c. If closeAsync called due to failure in creating just one sub producer then state is set by - * handleSinglePartitionProducerCreated - */ - if (producerAlreadyClosed == numProducers && closeCallback) { - state_ = Closed; - closeCallback(ResultOk); - } -} - -void PartitionedProducerImpl::handleSinglePartitionProducerClose(Result result, - const unsigned int partitionIndex, - CloseCallback callback) { - if (state_ == Failed) { - // we should have already notified the client by callback - return; - } - if (result != ResultOk) { - state_ = Failed; - LOG_ERROR("Closing the producer failed for partition - " << partitionIndex); - if (callback) { - callback(result); - } - return; - } - assert(partitionIndex < getNumPartitionsWithLock()); - if (numProducersCreated_ > 0) { - numProducersCreated_--; - } - // closed all successfully - if (!numProducersCreated_) { - state_ = Closed; - // set the producerCreatedPromise to failure, if client called - // closeAsync and it's not failure to create producer, the promise - // is set second time here, first time it was successful. So check - // if there's any adverse effect of setting it again. It should not - // be but must check. MUSTCHECK changeme - partitionedProducerCreatedPromise_.setFailed(ResultUnknownError); - if (callback) { - callback(result); - } - return; - } -} - -// override -Future PartitionedProducerImpl::getProducerCreatedFuture() { - return partitionedProducerCreatedPromise_.getFuture(); -} - -// override -bool PartitionedProducerImpl::isClosed() { return state_ == Closed; } - -void PartitionedProducerImpl::triggerFlush() { - Lock producersLock(producersMutex_); - for (ProducerList::const_iterator prod = producers_.begin(); prod != producers_.end(); prod++) { - if ((*prod)->isStarted()) { - (*prod)->triggerFlush(); - } - } -} - -void PartitionedProducerImpl::flushAsync(FlushCallback callback) { - if (!flushPromise_ || flushPromise_->isComplete()) { - flushPromise_ = std::make_shared>(); - } else { - // already in flushing, register a listener callback - auto listenerCallback = [callback](Result result, bool_type v) { - if (v) { - callback(ResultOk); - } else { - callback(ResultUnknownError); - } - return; - }; - - flushPromise_->getFuture().addListener(listenerCallback); - return; - } - - Lock producersLock(producersMutex_); - const int numProducers = static_cast(producers_.size()); - FlushCallback subFlushCallback = [this, callback, numProducers](Result result) { - // We shouldn't lock `producersMutex_` here because `subFlushCallback` may be called in - // `ProducerImpl::flushAsync`, and then deadlock occurs. - int previous = flushedPartitions_.fetch_add(1); - if (previous == numProducers - 1) { - flushedPartitions_.store(0); - flushPromise_->setValue(true); - callback(result); - } - return; - }; - - for (ProducerList::const_iterator prod = producers_.begin(); prod != producers_.end(); prod++) { - if ((*prod)->isStarted()) { - (*prod)->flushAsync(subFlushCallback); - } else { - subFlushCallback(ResultOk); - } - } -} - -void PartitionedProducerImpl::runPartitionUpdateTask() { - partitionsUpdateTimer_->expires_from_now(partitionsUpdateInterval_); - partitionsUpdateTimer_->async_wait( - std::bind(&PartitionedProducerImpl::getPartitionMetadata, shared_from_this())); -} - -void PartitionedProducerImpl::getPartitionMetadata() { - using namespace std::placeholders; - lookupServicePtr_->getPartitionMetadataAsync(topicName_) - .addListener(std::bind(&PartitionedProducerImpl::handleGetPartitions, shared_from_this(), _1, _2)); -} - -void PartitionedProducerImpl::handleGetPartitions(Result result, - const LookupDataResultPtr& lookupDataResult) { - if (state_ != Ready) { - return; - } - - if (!result) { - const auto newNumPartitions = static_cast(lookupDataResult->getPartitions()); - Lock producersLock(producersMutex_); - const auto currentNumPartitions = getNumPartitions(); - assert(currentNumPartitions == producers_.size()); - if (newNumPartitions > currentNumPartitions) { - LOG_INFO("new partition count: " << newNumPartitions); - topicMetadata_.reset(new TopicMetadataImpl(newNumPartitions)); - - for (unsigned int i = currentNumPartitions; i < newNumPartitions; i++) { - auto lazy = conf_.getLazyStartPartitionedProducers() && - conf_.getAccessMode() == ProducerConfiguration::Shared; - auto producer = newInternalProducer(i, lazy); - - if (!lazy) { - producer->start(); - } - producers_.push_back(producer); - } - // `runPartitionUpdateTask()` will be called in `handleSinglePartitionProducerCreated()` - return; - } - } else { - LOG_WARN("Failed to getPartitionMetadata: " << strResult(result)); - } - - runPartitionUpdateTask(); -} - -bool PartitionedProducerImpl::isConnected() const { - if (state_ != Ready) { - return false; - } - - Lock producersLock(producersMutex_); - const auto producers = producers_; - producersLock.unlock(); - for (const auto& producer : producers) { - if (producer->isStarted() && !producer->isConnected()) { - return false; - } - } - return true; -} - -uint64_t PartitionedProducerImpl::getNumberOfConnectedProducer() { - uint64_t numberOfConnectedProducer = 0; - Lock producersLock(producersMutex_); - const auto producers = producers_; - producersLock.unlock(); - for (const auto& producer : producers) { - if (producer->isConnected()) { - numberOfConnectedProducer++; - } - } - return numberOfConnectedProducer; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/PartitionedProducerImpl.h b/pulsar-client-cpp/lib/PartitionedProducerImpl.h deleted file mode 100644 index 0a8c10e221303..0000000000000 --- a/pulsar-client-cpp/lib/PartitionedProducerImpl.h +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "ProducerImpl.h" -#include "ClientImpl.h" -#include - -#include -#include -#include -#include - -namespace pulsar { - -class PartitionedProducerImpl : public ProducerImplBase, - public std::enable_shared_from_this { - public: - enum State - { - Pending, - Ready, - Closing, - Closed, - Failed - }; - const static std::string PARTITION_NAME_SUFFIX; - - typedef std::unique_lock Lock; - - PartitionedProducerImpl(ClientImplPtr ptr, const TopicNamePtr topicName, const unsigned int numPartitions, - const ProducerConfiguration& config); - virtual ~PartitionedProducerImpl(); - - // overrided methods from ProducerImplBase - const std::string& getProducerName() const override; - int64_t getLastSequenceId() const override; - const std::string& getSchemaVersion() const override; - void sendAsync(const Message& msg, SendCallback callback) override; - /* - * closes all active producers, it can be called explicitly from client as well as createProducer - * when it fails to create one of the producers and we want to fail createProducer - */ - void closeAsync(CloseCallback callback) override; - void start() override; - void shutdown() override; - bool isClosed() override; - const std::string& getTopic() const override; - Future getProducerCreatedFuture() override; - void triggerFlush() override; - void flushAsync(FlushCallback callback) override; - bool isConnected() const override; - uint64_t getNumberOfConnectedProducer() override; - void handleSinglePartitionProducerCreated(Result result, ProducerImplBaseWeakPtr producerBaseWeakPtr, - const unsigned int partitionIndex); - void createLazyPartitionProducer(const unsigned int partitionIndex); - void handleSinglePartitionProducerClose(Result result, const unsigned int partitionIndex, - CloseCallback callback); - - void notifyResult(CloseCallback closeCallback); - - friend class PulsarFriend; - - private: - const ClientImplPtr client_; - - const TopicNamePtr topicName_; - const std::string topic_; - - std::atomic_uint numProducersCreated_{0}; - - /* - * set when one or more Single Partition Creation fails, close will cleanup and fail the create callbackxo - */ - bool cleanup_ = false; - - ProducerConfiguration conf_; - - typedef std::vector ProducerList; - ProducerList producers_; - - // producersMutex_ is used to share producers_ and topicMetadata_ - mutable std::mutex producersMutex_; - MessageRoutingPolicyPtr routerPolicy_; - - std::atomic state_{Pending}; - - // only set this promise to value, when producers on all partitions are created. - Promise partitionedProducerCreatedPromise_; - - std::unique_ptr topicMetadata_; - - std::atomic flushedPartitions_; - std::shared_ptr> flushPromise_; - - ExecutorServicePtr listenerExecutor_; - DeadlineTimerPtr partitionsUpdateTimer_; - boost::posix_time::time_duration partitionsUpdateInterval_; - LookupServicePtr lookupServicePtr_; - - unsigned int getNumPartitions() const; - unsigned int getNumPartitionsWithLock() const; - ProducerImplPtr newInternalProducer(unsigned int partition, bool lazy); - MessageRoutingPolicyPtr getMessageRouter(); - void runPartitionUpdateTask(); - void getPartitionMetadata(); - void handleGetPartitions(const Result result, const LookupDataResultPtr& partitionMetadata); -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.cc b/pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.cc deleted file mode 100644 index 79ed1969d7870..0000000000000 --- a/pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.cc +++ /dev/null @@ -1,237 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "PatternMultiTopicsConsumerImpl.h" - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -PatternMultiTopicsConsumerImpl::PatternMultiTopicsConsumerImpl(ClientImplPtr client, - const std::string pattern, - const std::vector& topics, - const std::string& subscriptionName, - const ConsumerConfiguration& conf, - const LookupServicePtr lookupServicePtr_) - : MultiTopicsConsumerImpl(client, topics, subscriptionName, TopicName::get(pattern), conf, - lookupServicePtr_), - patternString_(pattern), - pattern_(PULSAR_REGEX_NAMESPACE::regex(pattern)), - autoDiscoveryTimer_(), - autoDiscoveryRunning_(false) { - namespaceName_ = TopicName::get(pattern)->getNamespaceName(); -} - -const PULSAR_REGEX_NAMESPACE::regex PatternMultiTopicsConsumerImpl::getPattern() { return pattern_; } - -void PatternMultiTopicsConsumerImpl::resetAutoDiscoveryTimer() { - autoDiscoveryRunning_ = false; - autoDiscoveryTimer_->expires_from_now(seconds(conf_.getPatternAutoDiscoveryPeriod())); - autoDiscoveryTimer_->async_wait( - std::bind(&PatternMultiTopicsConsumerImpl::autoDiscoveryTimerTask, this, std::placeholders::_1)); -} - -void PatternMultiTopicsConsumerImpl::autoDiscoveryTimerTask(const boost::system::error_code& err) { - if (err == boost::asio::error::operation_aborted) { - LOG_DEBUG(getName() << "Timer cancelled: " << err.message()); - return; - } else if (err) { - LOG_ERROR(getName() << "Timer error: " << err.message()); - return; - } - - const auto state = state_.load(); - if (state != Ready) { - LOG_ERROR("Error in autoDiscoveryTimerTask consumer state not ready: " << state); - resetAutoDiscoveryTimer(); - return; - } - - if (autoDiscoveryRunning_) { - LOG_DEBUG("autoDiscoveryTimerTask still running, cancel this running. "); - return; - } - - autoDiscoveryRunning_ = true; - - // already get namespace from pattern. - assert(namespaceName_); - - lookupServicePtr_->getTopicsOfNamespaceAsync(namespaceName_) - .addListener(std::bind(&PatternMultiTopicsConsumerImpl::timerGetTopicsOfNamespace, this, - std::placeholders::_1, std::placeholders::_2)); -} - -void PatternMultiTopicsConsumerImpl::timerGetTopicsOfNamespace(const Result result, - const NamespaceTopicsPtr topics) { - if (result != ResultOk) { - LOG_ERROR("Error in Getting topicsOfNameSpace. result: " << result); - resetAutoDiscoveryTimer(); - return; - } - - NamespaceTopicsPtr newTopics = PatternMultiTopicsConsumerImpl::topicsPatternFilter(*topics, pattern_); - // get old topics in consumer: - NamespaceTopicsPtr oldTopics = std::make_shared>(); - for (std::map::iterator it = topicsPartitions_.begin(); it != topicsPartitions_.end(); - it++) { - oldTopics->push_back(it->first); - } - NamespaceTopicsPtr topicsAdded = topicsListsMinus(*newTopics, *oldTopics); - NamespaceTopicsPtr topicsRemoved = topicsListsMinus(*oldTopics, *newTopics); - - // callback method when removed topics all un-subscribed. - ResultCallback topicsRemovedCallback = [this](Result result) { - if (result != ResultOk) { - LOG_ERROR("Failed to unsubscribe topics: " << result); - } - resetAutoDiscoveryTimer(); - }; - - // callback method when added topics all subscribed. - ResultCallback topicsAddedCallback = [this, topicsRemoved, topicsRemovedCallback](Result result) { - if (result == ResultOk) { - // call to unsubscribe all removed topics. - onTopicsRemoved(topicsRemoved, topicsRemovedCallback); - } else { - resetAutoDiscoveryTimer(); - } - }; - - // call to subscribe new added topics, then in its callback do unsubscribe - onTopicsAdded(topicsAdded, topicsAddedCallback); -} - -void PatternMultiTopicsConsumerImpl::onTopicsAdded(NamespaceTopicsPtr addedTopics, ResultCallback callback) { - // start call subscribeOneTopicAsync for each single topic - - if (addedTopics->empty()) { - LOG_DEBUG("no topics need subscribe"); - callback(ResultOk); - return; - } - int topicsNumber = addedTopics->size(); - - std::shared_ptr> topicsNeedCreate = std::make_shared>(topicsNumber); - // subscribe for each passed in topic - for (std::vector::const_iterator itr = addedTopics->begin(); itr != addedTopics->end(); - itr++) { - MultiTopicsConsumerImpl::subscribeOneTopicAsync(*itr).addListener( - std::bind(&PatternMultiTopicsConsumerImpl::handleOneTopicAdded, this, std::placeholders::_1, *itr, - topicsNeedCreate, callback)); - } -} - -void PatternMultiTopicsConsumerImpl::handleOneTopicAdded(const Result result, const std::string& topic, - std::shared_ptr> topicsNeedCreate, - ResultCallback callback) { - (*topicsNeedCreate)--; - - if (result != ResultOk) { - LOG_ERROR("Failed when subscribed to topic " << topic << " Error - " << result); - callback(result); - return; - } - - if (topicsNeedCreate->load() == 0) { - LOG_DEBUG("Subscribed all new added topics"); - callback(result); - } -} - -void PatternMultiTopicsConsumerImpl::onTopicsRemoved(NamespaceTopicsPtr removedTopics, - ResultCallback callback) { - // start call subscribeOneTopicAsync for each single topic - if (removedTopics->empty()) { - LOG_DEBUG("no topics need unsubscribe"); - callback(ResultOk); - return; - } - - auto topicsNeedUnsub = std::make_shared>(removedTopics->size()); - - ResultCallback oneTopicUnsubscribedCallback = [topicsNeedUnsub, callback](Result result) { - (*topicsNeedUnsub)--; - - if (result != ResultOk) { - LOG_ERROR("Failed when unsubscribe to one topic. Error - " << result); - callback(result); - return; - } - - if (topicsNeedUnsub->load() == 0) { - LOG_DEBUG("unSubscribed all needed topics"); - callback(result); - } - }; - - // unsubscribe for each passed in topic - for (std::vector::const_iterator itr = removedTopics->begin(); itr != removedTopics->end(); - itr++) { - MultiTopicsConsumerImpl::unsubscribeOneTopicAsync(*itr, oneTopicUnsubscribedCallback); - } -} - -NamespaceTopicsPtr PatternMultiTopicsConsumerImpl::topicsPatternFilter( - const std::vector& topics, const PULSAR_REGEX_NAMESPACE::regex& pattern) { - NamespaceTopicsPtr topicsResultPtr = std::make_shared>(); - - for (std::vector::const_iterator itr = topics.begin(); itr != topics.end(); itr++) { - if (PULSAR_REGEX_NAMESPACE::regex_match(*itr, pattern)) { - topicsResultPtr->push_back(*itr); - } - } - return topicsResultPtr; -} - -NamespaceTopicsPtr PatternMultiTopicsConsumerImpl::topicsListsMinus(std::vector& list1, - std::vector& list2) { - NamespaceTopicsPtr topicsResultPtr = std::make_shared>(); - std::remove_copy_if(list1.begin(), list1.end(), std::back_inserter(*topicsResultPtr), - [&list2](const std::string& arg) { - return (std::find(list2.begin(), list2.end(), arg) != list2.end()); - }); - - return topicsResultPtr; -} - -void PatternMultiTopicsConsumerImpl::start() { - MultiTopicsConsumerImpl::start(); - - LOG_DEBUG("PatternMultiTopicsConsumerImpl start autoDiscoveryTimer_."); - - // Init autoDiscoveryTimer task only once, wait for the timeout to happen - if (!autoDiscoveryTimer_ && conf_.getPatternAutoDiscoveryPeriod() > 0) { - autoDiscoveryTimer_ = client_->getIOExecutorProvider()->get()->createDeadlineTimer(); - autoDiscoveryTimer_->expires_from_now(seconds(conf_.getPatternAutoDiscoveryPeriod())); - autoDiscoveryTimer_->async_wait( - std::bind(&PatternMultiTopicsConsumerImpl::autoDiscoveryTimerTask, this, std::placeholders::_1)); - } -} - -void PatternMultiTopicsConsumerImpl::shutdown() { - Lock lock(mutex_); - state_ = Closed; - autoDiscoveryTimer_->cancel(); - multiTopicsConsumerCreatedPromise_.setFailed(ResultAlreadyClosed); -} - -void PatternMultiTopicsConsumerImpl::closeAsync(ResultCallback callback) { - MultiTopicsConsumerImpl::closeAsync(callback); - autoDiscoveryTimer_->cancel(); -} diff --git a/pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.h b/pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.h deleted file mode 100644 index 408d68e3a20a8..0000000000000 --- a/pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.h +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_PATTERN_MULTI_TOPICS_CONSUMER_HEADER -#define PULSAR_PATTERN_MULTI_TOPICS_CONSUMER_HEADER -#include "ConsumerImpl.h" -#include "ClientImpl.h" -#include -#include -#include "MultiTopicsConsumerImpl.h" -#include - -#ifdef PULSAR_USE_BOOST_REGEX -#include -#define PULSAR_REGEX_NAMESPACE boost -#else -#include -#define PULSAR_REGEX_NAMESPACE std -#endif - -namespace pulsar { - -class PatternMultiTopicsConsumerImpl; - -class PatternMultiTopicsConsumerImpl : public MultiTopicsConsumerImpl { - public: - // currently we support topics under same namespace, so `patternString` is a regex, - // which only contains after namespace part. - // when subscribe, client will first get all topics that match given pattern. - // `topics` contains the topics that match `patternString`. - PatternMultiTopicsConsumerImpl(ClientImplPtr client, const std::string patternString, - const std::vector& topics, - const std::string& subscriptionName, const ConsumerConfiguration& conf, - const LookupServicePtr lookupServicePtr_); - - const PULSAR_REGEX_NAMESPACE::regex getPattern(); - - void autoDiscoveryTimerTask(const boost::system::error_code& err); - - // filter input `topics` with given `pattern`, return matched topics - static NamespaceTopicsPtr topicsPatternFilter(const std::vector& topics, - const PULSAR_REGEX_NAMESPACE::regex& pattern); - - // Find out topics, which are in `list1` but not in `list2`. - static NamespaceTopicsPtr topicsListsMinus(std::vector& list1, - std::vector& list2); - - virtual void closeAsync(ResultCallback callback); - virtual void start(); - virtual void shutdown(); - - private: - const std::string patternString_; - const PULSAR_REGEX_NAMESPACE::regex pattern_; - typedef std::shared_ptr TimerPtr; - TimerPtr autoDiscoveryTimer_; - bool autoDiscoveryRunning_; - NamespaceNamePtr namespaceName_; - - void resetAutoDiscoveryTimer(); - void timerGetTopicsOfNamespace(const Result result, const NamespaceTopicsPtr topics); - void onTopicsAdded(NamespaceTopicsPtr addedTopics, ResultCallback callback); - void onTopicsRemoved(NamespaceTopicsPtr removedTopics, ResultCallback callback); - void handleOneTopicAdded(const Result result, const std::string& topic, - std::shared_ptr> topicsNeedCreate, ResultCallback callback); -}; - -} // namespace pulsar -#endif // PULSAR_PATTERN_MULTI_TOPICS_CONSUMER_HEADER diff --git a/pulsar-client-cpp/lib/PendingFailures.h b/pulsar-client-cpp/lib/PendingFailures.h deleted file mode 100644 index 060394736c31f..0000000000000 --- a/pulsar-client-cpp/lib/PendingFailures.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_PENDINGFAILURES_H_ -#define LIB_PENDINGFAILURES_H_ - -#include -#include - -namespace pulsar { - -class PendingFailures { - public: - void add(const std::function& failure) { failures.emplace_back(failure); } - - bool empty() const noexcept { return failures.empty(); } - - void complete() { - for (auto& failure : failures) { - failure(); - } - } - - private: - std::vector> failures; -}; - -} // namespace pulsar - -#endif diff --git a/pulsar-client-cpp/lib/PeriodicTask.cc b/pulsar-client-cpp/lib/PeriodicTask.cc deleted file mode 100644 index 4e91ef5f7e150..0000000000000 --- a/pulsar-client-cpp/lib/PeriodicTask.cc +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "lib/PeriodicTask.h" -#include - -namespace pulsar { - -void PeriodicTask::start() { - if (state_ != Pending) { - return; - } - state_ = Ready; - if (periodMs_ >= 0) { - std::weak_ptr weakSelf{shared_from_this()}; - timer_.expires_from_now(boost::posix_time::millisec(periodMs_)); - timer_.async_wait([weakSelf](const ErrorCode& ec) { - auto self = weakSelf.lock(); - if (self) { - self->handleTimeout(ec); - } - }); - } -} - -void PeriodicTask::stop() { - State state = Ready; - if (!state_.compare_exchange_strong(state, Closing)) { - return; - } - timer_.cancel(); - state_ = Pending; -} - -void PeriodicTask::handleTimeout(const ErrorCode& ec) { - if (state_ != Ready || ec.value() == boost::system::errc::operation_canceled) { - return; - } - - callback_(ec); - - // state_ may be changed in handleTimeout, so we check state_ again - if (state_ == Ready) { - auto self = shared_from_this(); - timer_.expires_from_now(boost::posix_time::millisec(periodMs_)); - timer_.async_wait([this, self](const ErrorCode& ec) { handleTimeout(ec); }); - } -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/PeriodicTask.h b/pulsar-client-cpp/lib/PeriodicTask.h deleted file mode 100644 index 57d0734859fd1..0000000000000 --- a/pulsar-client-cpp/lib/PeriodicTask.h +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include -#include - -#include - -namespace pulsar { - -/** - * A task that is executed periodically. - * - * After the `start()` method is called, it will trigger `callback_` method periodically whose interval is - * `periodMs` in the constructor. After the `stop()` method is called, the timer will be cancelled and - * `callback()` will never be called again unless `start()` was called again. - * - * If you don't want to execute the task infinitely, you can call `stop()` in the implementation of - * `callback()` method. - * - * NOTE: If the `periodMs` is negative, the `callback()` will never be called. - */ -class PeriodicTask : public std::enable_shared_from_this { - public: - using ErrorCode = boost::system::error_code; - using CallbackType = std::function; - - enum State : std::uint8_t - { - Pending, - Ready, - Closing - }; - - PeriodicTask(boost::asio::io_service& ioService, int periodMs) : timer_(ioService), periodMs_(periodMs) {} - - void start(); - - void stop(); - - void setCallback(CallbackType callback) noexcept { callback_ = callback; } - - State getState() const noexcept { return state_; } - int getPeriodMs() const noexcept { return periodMs_; } - - private: - std::atomic state_{Pending}; - boost::asio::deadline_timer timer_; - const int periodMs_; - CallbackType callback_{trivialCallback}; - - void handleTimeout(const ErrorCode& ec); - - static void trivialCallback(const ErrorCode&) {} -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Producer.cc b/pulsar-client-cpp/lib/Producer.cc deleted file mode 100644 index ad60828aba803..0000000000000 --- a/pulsar-client-cpp/lib/Producer.cc +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include "SharedBuffer.h" -#include - -#include "Utils.h" -#include "ProducerImpl.h" - -namespace pulsar { - -static const std::string EMPTY_STRING; - -Producer::Producer() : impl_() {} - -Producer::Producer(ProducerImplBasePtr impl) : impl_(impl) {} - -const std::string& Producer::getTopic() const { return impl_ != NULL ? impl_->getTopic() : EMPTY_STRING; } - -Result Producer::send(const Message& msg) { - Promise promise; - sendAsync(msg, WaitForCallbackValue(promise)); - - if (!promise.isComplete()) { - impl_->triggerFlush(); - } - - MessageId mi; - Result result = promise.getFuture().get(mi); - msg.setMessageId(mi); - - return result; -} - -Result Producer::send(const Message& msg, MessageId& messageId) { - Promise promise; - sendAsync(msg, WaitForCallbackValue(promise)); - - if (!promise.isComplete()) { - impl_->triggerFlush(); - } - - return promise.getFuture().get(messageId); -} - -void Producer::sendAsync(const Message& msg, SendCallback callback) { - if (!impl_) { - callback(ResultProducerNotInitialized, msg.getMessageId()); - return; - } - - impl_->sendAsync(msg, callback); -} - -const std::string& Producer::getProducerName() const { return impl_->getProducerName(); } - -int64_t Producer::getLastSequenceId() const { return impl_->getLastSequenceId(); } - -const std::string& Producer::getSchemaVersion() const { return impl_->getSchemaVersion(); } - -Result Producer::close() { - Promise promise; - closeAsync(WaitForCallback(promise)); - - Result result; - promise.getFuture().get(result); - return result; -} - -void Producer::closeAsync(CloseCallback callback) { - if (!impl_) { - callback(ResultProducerNotInitialized); - return; - } - - impl_->closeAsync(callback); -} - -Result Producer::flush() { - Promise promise; - flushAsync(WaitForCallback(promise)); - - Result result; - promise.getFuture().get(result); - return result; -} - -void Producer::flushAsync(FlushCallback callback) { - if (!impl_) { - callback(ResultProducerNotInitialized); - return; - } - - impl_->flushAsync(callback); -} - -void Producer::producerFailMessages(Result result) { - if (impl_) { - ProducerImpl* producerImpl = static_cast(impl_.get()); - producerImpl->failPendingMessages(result, true); - } -} - -bool Producer::isConnected() const { return impl_ && impl_->isConnected(); } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ProducerConfiguration.cc b/pulsar-client-cpp/lib/ProducerConfiguration.cc deleted file mode 100644 index 4f64870c06a6e..0000000000000 --- a/pulsar-client-cpp/lib/ProducerConfiguration.cc +++ /dev/null @@ -1,269 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include - -namespace pulsar { - -const static std::string emptyString; - -ProducerConfiguration::ProducerConfiguration() : impl_(std::make_shared()) {} - -ProducerConfiguration::~ProducerConfiguration() {} - -ProducerConfiguration::ProducerConfiguration(const ProducerConfiguration& x) : impl_(x.impl_) {} - -ProducerConfiguration& ProducerConfiguration::operator=(const ProducerConfiguration& x) { - impl_ = x.impl_; - return *this; -} - -ProducerConfiguration& ProducerConfiguration::setProducerName(const std::string& producerName) { - impl_->producerName = Optional::of(producerName); - return *this; -} - -const std::string& ProducerConfiguration::getProducerName() const { - return impl_->producerName.is_present() ? impl_->producerName.value() : emptyString; -} - -ProducerConfiguration& ProducerConfiguration::setInitialSequenceId(int64_t initialSequenceId) { - impl_->initialSequenceId = Optional::of(initialSequenceId); - return *this; -} - -int64_t ProducerConfiguration::getInitialSequenceId() const { - return impl_->initialSequenceId.is_present() ? impl_->initialSequenceId.value() : -1ll; -} - -ProducerConfiguration& ProducerConfiguration::setSendTimeout(int sendTimeoutMs) { - impl_->sendTimeoutMs = sendTimeoutMs; - return *this; -} - -int ProducerConfiguration::getSendTimeout() const { return impl_->sendTimeoutMs; } - -ProducerConfiguration& ProducerConfiguration::setCompressionType(CompressionType compressionType) { - impl_->compressionType = compressionType; - return *this; -} - -CompressionType ProducerConfiguration::getCompressionType() const { return impl_->compressionType; } - -ProducerConfiguration& ProducerConfiguration::setMaxPendingMessages(int maxPendingMessages) { - if (maxPendingMessages < 0) { - throw std::invalid_argument("maxPendingMessages needs to be >= 0"); - } - impl_->maxPendingMessages = maxPendingMessages; - return *this; -} - -int ProducerConfiguration::getMaxPendingMessages() const { return impl_->maxPendingMessages; } - -ProducerConfiguration& ProducerConfiguration::setMaxPendingMessagesAcrossPartitions(int maxPendingMessages) { - if (maxPendingMessages < 0) { - throw std::invalid_argument("maxPendingMessages needs to be >=0"); - } - impl_->maxPendingMessagesAcrossPartitions = maxPendingMessages; - return *this; -} - -int ProducerConfiguration::getMaxPendingMessagesAcrossPartitions() const { - return impl_->maxPendingMessagesAcrossPartitions; -} - -ProducerConfiguration& ProducerConfiguration::setPartitionsRoutingMode(const PartitionsRoutingMode& mode) { - impl_->routingMode = mode; - return *this; -} - -ProducerConfiguration::PartitionsRoutingMode ProducerConfiguration::getPartitionsRoutingMode() const { - return impl_->routingMode; -} - -ProducerConfiguration& ProducerConfiguration::setMessageRouter(const MessageRoutingPolicyPtr& router) { - impl_->routingMode = ProducerConfiguration::CustomPartition; - impl_->messageRouter = router; - return *this; -} - -const MessageRoutingPolicyPtr& ProducerConfiguration::getMessageRouterPtr() const { - return impl_->messageRouter; -} - -ProducerConfiguration& ProducerConfiguration::setHashingScheme(const HashingScheme& scheme) { - impl_->hashingScheme = scheme; - return *this; -} - -ProducerConfiguration::HashingScheme ProducerConfiguration::getHashingScheme() const { - return impl_->hashingScheme; -} - -ProducerConfiguration& ProducerConfiguration::setBlockIfQueueFull(bool flag) { - impl_->blockIfQueueFull = flag; - return *this; -} - -bool ProducerConfiguration::getBlockIfQueueFull() const { return impl_->blockIfQueueFull; } - -ProducerConfiguration& ProducerConfiguration::setBatchingEnabled(const bool& batchingEnabled) { - impl_->batchingEnabled = batchingEnabled; - return *this; -} -const bool& ProducerConfiguration::getBatchingEnabled() const { return impl_->batchingEnabled; } - -ProducerConfiguration& ProducerConfiguration::setBatchingMaxMessages( - const unsigned int& batchingMaxMessages) { - if (batchingMaxMessages <= 1) { - throw std::invalid_argument("batchingMaxMessages needs to be greater than 1"); - } - impl_->batchingMaxMessages = batchingMaxMessages; - return *this; -} - -const unsigned int& ProducerConfiguration::getBatchingMaxMessages() const { - return impl_->batchingMaxMessages; -} - -ProducerConfiguration& ProducerConfiguration::setBatchingMaxAllowedSizeInBytes( - const unsigned long& batchingMaxAllowedSizeInBytes) { - impl_->batchingMaxAllowedSizeInBytes = batchingMaxAllowedSizeInBytes; - return *this; -} -const unsigned long& ProducerConfiguration::getBatchingMaxAllowedSizeInBytes() const { - return impl_->batchingMaxAllowedSizeInBytes; -} - -ProducerConfiguration& ProducerConfiguration::setBatchingMaxPublishDelayMs( - const unsigned long& batchingMaxPublishDelayMs) { - impl_->batchingMaxPublishDelayMs = batchingMaxPublishDelayMs; - return *this; -} - -const unsigned long& ProducerConfiguration::getBatchingMaxPublishDelayMs() const { - return impl_->batchingMaxPublishDelayMs; -} - -ProducerConfiguration& ProducerConfiguration::setBatchingType(BatchingType batchingType) { - if (batchingType < ProducerConfiguration::DefaultBatching || - batchingType > ProducerConfiguration::KeyBasedBatching) { - throw std::invalid_argument("Unsupported batching type: " + std::to_string(batchingType)); - } - impl_->batchingType = batchingType; - return *this; -} - -ProducerConfiguration::BatchingType ProducerConfiguration::getBatchingType() const { - return impl_->batchingType; -} - -const CryptoKeyReaderPtr ProducerConfiguration::getCryptoKeyReader() const { return impl_->cryptoKeyReader; } - -ProducerConfiguration& ProducerConfiguration::setCryptoKeyReader(CryptoKeyReaderPtr cryptoKeyReader) { - impl_->cryptoKeyReader = cryptoKeyReader; - return *this; -} - -ProducerCryptoFailureAction ProducerConfiguration::getCryptoFailureAction() const { - return impl_->cryptoFailureAction; -} - -ProducerConfiguration& ProducerConfiguration::setCryptoFailureAction(ProducerCryptoFailureAction action) { - impl_->cryptoFailureAction = action; - return *this; -} - -const std::set& ProducerConfiguration::getEncryptionKeys() const { - return impl_->encryptionKeys; -} - -bool ProducerConfiguration::isEncryptionEnabled() const { - return (!impl_->encryptionKeys.empty() && (impl_->cryptoKeyReader != NULL)); -} - -ProducerConfiguration& ProducerConfiguration::addEncryptionKey(std::string key) { - impl_->encryptionKeys.insert(key); - return *this; -} - -ProducerConfiguration& ProducerConfiguration::setLazyStartPartitionedProducers( - bool useLazyStartPartitionedProducers) { - impl_->useLazyStartPartitionedProducers = useLazyStartPartitionedProducers; - return *this; -} - -bool ProducerConfiguration::getLazyStartPartitionedProducers() const { - return impl_->useLazyStartPartitionedProducers; -} - -ProducerConfiguration& ProducerConfiguration::setSchema(const SchemaInfo& schemaInfo) { - impl_->schemaInfo = schemaInfo; - return *this; -} - -const SchemaInfo& ProducerConfiguration::getSchema() const { return impl_->schemaInfo; } - -bool ProducerConfiguration::hasProperty(const std::string& name) const { - const std::map& m = impl_->properties; - return m.find(name) != m.end(); -} - -const std::string& ProducerConfiguration::getProperty(const std::string& name) const { - if (hasProperty(name)) { - const std::map& m = impl_->properties; - return m.at(name); - } else { - return emptyString; - } -} - -std::map& ProducerConfiguration::getProperties() const { return impl_->properties; } - -ProducerConfiguration& ProducerConfiguration::setProperty(const std::string& name, const std::string& value) { - impl_->properties.insert(std::make_pair(name, value)); - return *this; -} - -ProducerConfiguration& ProducerConfiguration::setProperties( - const std::map& properties) { - for (std::map::const_iterator it = properties.begin(); it != properties.end(); - it++) { - setProperty(it->first, it->second); - } - return *this; -} - -ProducerConfiguration& ProducerConfiguration::setChunkingEnabled(bool chunkingEnabled) { - impl_->chunkingEnabled = chunkingEnabled; - return *this; -} - -bool ProducerConfiguration::isChunkingEnabled() const { return impl_->chunkingEnabled; } - -ProducerConfiguration& ProducerConfiguration::setAccessMode(const ProducerAccessMode& accessMode) { - impl_->accessMode = accessMode; - return *this; -} -ProducerConfiguration::ProducerAccessMode ProducerConfiguration::getAccessMode() const { - return impl_->accessMode; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ProducerConfigurationImpl.h b/pulsar-client-cpp/lib/ProducerConfigurationImpl.h deleted file mode 100644 index 80c6432cfce69..0000000000000 --- a/pulsar-client-cpp/lib/ProducerConfigurationImpl.h +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_PRODUCERCONFIGURATIONIMPL_H_ -#define LIB_PRODUCERCONFIGURATIONIMPL_H_ - -#include -#include - -#include "Utils.h" - -namespace pulsar { - -struct ProducerConfigurationImpl { - SchemaInfo schemaInfo; - Optional producerName; - Optional initialSequenceId; - int sendTimeoutMs{30000}; - CompressionType compressionType{CompressionNone}; - int maxPendingMessages{1000}; - int maxPendingMessagesAcrossPartitions{50000}; - ProducerConfiguration::PartitionsRoutingMode routingMode{ProducerConfiguration::UseSinglePartition}; - MessageRoutingPolicyPtr messageRouter; - ProducerConfiguration::HashingScheme hashingScheme{ProducerConfiguration::BoostHash}; - bool useLazyStartPartitionedProducers{false}; - bool blockIfQueueFull{false}; - bool batchingEnabled{true}; - unsigned int batchingMaxMessages{1000}; - unsigned long batchingMaxAllowedSizeInBytes{128 * 1024}; // 128 KB - unsigned long batchingMaxPublishDelayMs{10}; // 10 milli seconds - ProducerConfiguration::BatchingType batchingType{ProducerConfiguration::DefaultBatching}; - CryptoKeyReaderPtr cryptoKeyReader; - std::set encryptionKeys; - ProducerCryptoFailureAction cryptoFailureAction{ProducerCryptoFailureAction::FAIL}; - std::map properties; - bool chunkingEnabled{false}; - ProducerConfiguration::ProducerAccessMode accessMode{ProducerConfiguration::Shared}; -}; -} // namespace pulsar - -#endif /* LIB_PRODUCERCONFIGURATIONIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/ProducerImpl.cc b/pulsar-client-cpp/lib/ProducerImpl.cc deleted file mode 100644 index 20133c50fc932..0000000000000 --- a/pulsar-client-cpp/lib/ProducerImpl.cc +++ /dev/null @@ -1,932 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "ProducerImpl.h" -#include "LogUtils.h" -#include "MessageImpl.h" -#include "TimeUtils.h" -#include "PulsarApi.pb.h" -#include "Commands.h" -#include "BatchMessageContainerBase.h" -#include "BatchMessageContainer.h" -#include "BatchMessageKeyBasedContainer.h" -#include -#include -#include "MessageAndCallbackBatch.h" - -namespace pulsar { -DECLARE_LOG_OBJECT() - -struct ProducerImpl::PendingCallbacks { - std::vector opSendMsgs; - - void complete(Result result) { - for (const auto& opSendMsg : opSendMsgs) { - opSendMsg.complete(result, {}); - } - } -}; - -ProducerImpl::ProducerImpl(ClientImplPtr client, const TopicName& topicName, - const ProducerConfiguration& conf, int32_t partition) - : HandlerBase( - client, (partition < 0) ? topicName.toString() : topicName.getTopicPartitionName(partition), - Backoff(milliseconds(100), seconds(60), milliseconds(std::max(100, conf.getSendTimeout() - 100)))), - conf_(conf), - semaphore_(), - pendingMessagesQueue_(), - partition_(partition), - producerName_(conf_.getProducerName()), - userProvidedProducerName_(false), - producerStr_("[" + topic_ + ", " + producerName_ + "] "), - producerId_(client->newProducerId()), - msgSequenceGenerator_(0), - batchTimer_(executor_->getIOService()), - sendTimer_(executor_->getIOService()), - dataKeyRefreshTask_(executor_->getIOService(), 4 * 60 * 60 * 1000), - memoryLimitController_(client->getMemoryLimitController()), - chunkingEnabled_(conf_.isChunkingEnabled() && topicName.isPersistent() && !conf_.getBatchingEnabled()) { - LOG_DEBUG("ProducerName - " << producerName_ << " Created producer on topic " << topic_ - << " id: " << producerId_); - - int64_t initialSequenceId = conf.getInitialSequenceId(); - lastSequenceIdPublished_ = initialSequenceId; - msgSequenceGenerator_ = initialSequenceId + 1; - - if (!producerName_.empty()) { - userProvidedProducerName_ = true; - } - - if (conf.getMaxPendingMessages() > 0) { - semaphore_ = std::unique_ptr(new Semaphore(conf_.getMaxPendingMessages())); - } - - unsigned int statsIntervalInSeconds = client->getClientConfig().getStatsIntervalInSeconds(); - if (statsIntervalInSeconds) { - producerStatsBasePtr_ = - std::make_shared(producerStr_, executor_, statsIntervalInSeconds); - } else { - producerStatsBasePtr_ = std::make_shared(); - } - - if (conf_.isEncryptionEnabled()) { - std::ostringstream logCtxStream; - logCtxStream << "[" << topic_ << ", " << producerName_ << ", " << producerId_ << "]"; - std::string logCtx = logCtxStream.str(); - msgCrypto_ = std::make_shared(logCtx, true); - msgCrypto_->addPublicKeyCipher(conf_.getEncryptionKeys(), conf_.getCryptoKeyReader()); - } - - if (conf_.getBatchingEnabled()) { - switch (conf_.getBatchingType()) { - case ProducerConfiguration::DefaultBatching: - batchMessageContainer_.reset(new BatchMessageContainer(*this)); - break; - case ProducerConfiguration::KeyBasedBatching: - batchMessageContainer_.reset(new BatchMessageKeyBasedContainer(*this)); - break; - default: // never reached here - LOG_ERROR("Unknown batching type: " << conf_.getBatchingType()); - return; - } - } -} - -ProducerImpl::~ProducerImpl() { - LOG_DEBUG(getName() << "~ProducerImpl"); - cancelTimers(); - printStats(); - if (state_ == Ready || state_ == Pending) { - LOG_WARN(getName() << "Destroyed producer which was not properly closed"); - } -} - -const std::string& ProducerImpl::getTopic() const { return topic_; } - -const std::string& ProducerImpl::getProducerName() const { return producerName_; } - -int64_t ProducerImpl::getLastSequenceId() const { return lastSequenceIdPublished_; } - -const std::string& ProducerImpl::getSchemaVersion() const { return schemaVersion_; } - -void ProducerImpl::connectionOpened(const ClientConnectionPtr& cnx) { - if (state_ == Closed) { - LOG_DEBUG(getName() << "connectionOpened : Producer is already closed"); - return; - } - - ClientImplPtr client = client_.lock(); - int requestId = client->newRequestId(); - - SharedBuffer cmd = Commands::newProducer( - topic_, producerId_, producerName_, requestId, conf_.getProperties(), conf_.getSchema(), epoch_, - userProvidedProducerName_, conf_.isEncryptionEnabled(), - static_cast(conf_.getAccessMode()), topicEpoch); - cnx->sendRequestWithId(cmd, requestId) - .addListener(std::bind(&ProducerImpl::handleCreateProducer, shared_from_this(), cnx, - std::placeholders::_1, std::placeholders::_2)); -} - -void ProducerImpl::connectionFailed(Result result) { - // Keep a reference to ensure object is kept alive - ProducerImplPtr ptr = shared_from_this(); - - if (conf_.getLazyStartPartitionedProducers() && conf_.getAccessMode() == ProducerConfiguration::Shared) { - // if producers are lazy, then they should always try to restart - // so don't change the state and allow reconnections - return; - } else if (producerCreatedPromise_.setFailed(result)) { - state_ = Failed; - } -} - -void ProducerImpl::handleCreateProducer(const ClientConnectionPtr& cnx, Result result, - const ResponseData& responseData) { - LOG_DEBUG(getName() << "ProducerImpl::handleCreateProducer res: " << strResult(result)); - - // make sure we're still in the Pending/Ready state, closeAsync could have been invoked - // while waiting for this response if using lazy producers - const auto state = state_.load(); - if (state != Ready && state != Pending) { - LOG_DEBUG("Producer created response received but producer already closed"); - failPendingMessages(ResultAlreadyClosed, false); - return; - } - - if (result == ResultOk) { - Lock lock(mutex_); - // We are now reconnected to broker and clear to send messages. Re-send all pending messages and - // set the cnx pointer so that new messages will be sent immediately - LOG_INFO(getName() << "Created producer on broker " << cnx->cnxString()); - - cnx->registerProducer(producerId_, shared_from_this()); - producerName_ = responseData.producerName; - schemaVersion_ = responseData.schemaVersion; - producerStr_ = "[" + topic_ + ", " + producerName_ + "] "; - topicEpoch = responseData.topicEpoch; - - if (lastSequenceIdPublished_ == -1 && conf_.getInitialSequenceId() == -1) { - lastSequenceIdPublished_ = responseData.lastSequenceId; - msgSequenceGenerator_ = lastSequenceIdPublished_ + 1; - } - resendMessages(cnx); - connection_ = cnx; - state_ = Ready; - backoff_.reset(); - lock.unlock(); - - if (conf_.isEncryptionEnabled()) { - auto weakSelf = weak_from_this(); - dataKeyRefreshTask_.setCallback([this, weakSelf](const PeriodicTask::ErrorCode& ec) { - auto self = weakSelf.lock(); - if (!self) { - return; - } - if (ec) { - LOG_ERROR("DataKeyRefresh timer failed: " << ec.message()); - return; - } - msgCrypto_->addPublicKeyCipher(conf_.getEncryptionKeys(), conf_.getCryptoKeyReader()); - }); - } - - // if the producer is lazy the send timeout timer is already running - if (!(conf_.getLazyStartPartitionedProducers() && - conf_.getAccessMode() == ProducerConfiguration::Shared)) { - startSendTimeoutTimer(); - } - - producerCreatedPromise_.setValue(shared_from_this()); - - } else { - // Producer creation failed - if (result == ResultTimeout) { - // Creating the producer has timed out. We need to ensure the broker closes the producer - // in case it was indeed created, otherwise it might prevent new create producer operation, - // since we are not closing the connection - int requestId = client_.lock()->newRequestId(); - cnx->sendRequestWithId(Commands::newCloseProducer(producerId_, requestId), requestId); - } - - if (producerCreatedPromise_.isComplete()) { - if (result == ResultProducerBlockedQuotaExceededException) { - LOG_WARN(getName() << "Backlog is exceeded on topic. Sending exception to producer"); - failPendingMessages(ResultProducerBlockedQuotaExceededException, true); - } else if (result == ResultProducerBlockedQuotaExceededError) { - LOG_WARN(getName() << "Producer is blocked on creation because backlog is exceeded on topic"); - } - - // Producer had already been initially created, we need to retry connecting in any case - LOG_WARN(getName() << "Failed to reconnect producer: " << strResult(result)); - scheduleReconnection(shared_from_this()); - } else { - // Producer was not yet created, retry to connect to broker if it's possible - if (isRetriableError(result) && (creationTimestamp_ + operationTimeut_ < TimeUtils::now())) { - LOG_WARN(getName() << "Temporary error in creating producer: " << strResult(result)); - scheduleReconnection(shared_from_this()); - } else { - LOG_ERROR(getName() << "Failed to create producer: " << strResult(result)); - failPendingMessages(result, true); - producerCreatedPromise_.setFailed(result); - state_ = Failed; - } - } - } -} - -std::shared_ptr ProducerImpl::getPendingCallbacksWhenFailed() { - auto callbacks = std::make_shared(); - callbacks->opSendMsgs.reserve(pendingMessagesQueue_.size()); - LOG_DEBUG(getName() << "# messages in pending queue : " << pendingMessagesQueue_.size()); - - // Iterate over a copy of the pending messages queue, to trigger the future completion - // without holding producer mutex. - for (auto& op : pendingMessagesQueue_) { - callbacks->opSendMsgs.push_back(op); - releaseSemaphoreForSendOp(op); - } - - if (batchMessageContainer_) { - batchMessageContainer_->processAndClear( - [this, &callbacks](Result result, const OpSendMsg& opSendMsg) { - if (result == ResultOk) { - callbacks->opSendMsgs.emplace_back(opSendMsg); - } - releaseSemaphoreForSendOp(opSendMsg); - }, - nullptr); - } - pendingMessagesQueue_.clear(); - - return callbacks; -} - -std::shared_ptr ProducerImpl::getPendingCallbacksWhenFailedWithLock() { - Lock lock(mutex_); - return getPendingCallbacksWhenFailed(); -} - -void ProducerImpl::failPendingMessages(Result result, bool withLock) { - if (withLock) { - getPendingCallbacksWhenFailedWithLock()->complete(result); - } else { - getPendingCallbacksWhenFailed()->complete(result); - } -} - -void ProducerImpl::resendMessages(ClientConnectionPtr cnx) { - if (pendingMessagesQueue_.empty()) { - return; - } - - LOG_DEBUG(getName() << "Re-Sending " << pendingMessagesQueue_.size() << " messages to server"); - - for (const auto& op : pendingMessagesQueue_) { - LOG_DEBUG(getName() << "Re-Sending " << op.sequenceId_); - cnx->sendMessage(op); - } -} - -void ProducerImpl::setMessageMetadata(const Message& msg, const uint64_t& sequenceId, - const uint32_t& uncompressedSize) { - // Call this function after acquiring the mutex_ - proto::MessageMetadata& msgMetadata = msg.impl_->metadata; - msgMetadata.set_producer_name(producerName_); - msgMetadata.set_publish_time(TimeUtils::currentTimeMillis()); - msgMetadata.set_sequence_id(sequenceId); - if (conf_.getCompressionType() != CompressionNone) { - msgMetadata.set_compression(CompressionCodecProvider::convertType(conf_.getCompressionType())); - msgMetadata.set_uncompressed_size(uncompressedSize); - } - if (!this->getSchemaVersion().empty()) { - msgMetadata.set_schema_version(this->getSchemaVersion()); - } -} - -void ProducerImpl::flushAsync(FlushCallback callback) { - if (batchMessageContainer_) { - if (state_ == Ready) { - Lock lock(mutex_); - auto failures = batchMessageAndSend(callback); - lock.unlock(); - failures.complete(); - } else { - callback(ResultAlreadyClosed); - } - } else { - callback(ResultOk); - } -} - -void ProducerImpl::triggerFlush() { - if (batchMessageContainer_) { - if (state_ == Ready) { - Lock lock(mutex_); - auto failures = batchMessageAndSend(); - lock.unlock(); - failures.complete(); - } - } -} - -bool ProducerImpl::isValidProducerState(const SendCallback& callback) const { - const auto state = state_.load(); - switch (state) { - case HandlerBase::Ready: - // OK - case HandlerBase::Pending: - // We are OK to queue the messages on the client, it will be sent to the broker once we get the - // connection - return true; - case HandlerBase::Closing: - case HandlerBase::Closed: - callback(ResultAlreadyClosed, {}); - return false; - case HandlerBase::NotStarted: - case HandlerBase::Failed: - default: - callback(ResultNotConnected, {}); - return false; - } -} - -bool ProducerImpl::canAddToBatch(const Message& msg) const { - // If a message has a delayed delivery time, we'll always send it individually - return batchMessageContainer_.get() && !msg.impl_->metadata.has_deliver_at_time(); -} - -static SharedBuffer applyCompression(const SharedBuffer& uncompressedPayload, - CompressionType compressionType) { - return CompressionCodecProvider::getCodec(compressionType).encode(uncompressedPayload); -} - -void ProducerImpl::sendAsync(const Message& msg, SendCallback callback) { - producerStatsBasePtr_->messageSent(msg); - - const auto now = boost::posix_time::microsec_clock::universal_time(); - auto self = shared_from_this(); - sendAsyncWithStatsUpdate(msg, [this, self, now, callback](Result result, const MessageId& messageId) { - producerStatsBasePtr_->messageReceived(result, now); - if (callback) { - callback(result, messageId); - } - }); -} - -void ProducerImpl::sendAsyncWithStatsUpdate(const Message& msg, const SendCallback& callback) { - if (!isValidProducerState(callback)) { - return; - } - - const auto& uncompressedPayload = msg.impl_->payload; - const uint32_t uncompressedSize = uncompressedPayload.readableBytes(); - const auto result = canEnqueueRequest(uncompressedSize); - if (result != ResultOk) { - // If queue is full sending the batch immediately, no point waiting till batchMessagetimeout - if (batchMessageContainer_) { - LOG_DEBUG(getName() << " - sending batch message immediately"); - Lock lock(mutex_); - auto failures = batchMessageAndSend(); - lock.unlock(); - failures.complete(); - } - - callback(result, {}); - return; - } - - // We have already reserved a spot, so if we need to early return for failed result, we should release the - // semaphore and memory first. - const auto handleFailedResult = [this, uncompressedSize, callback](Result result) { - releaseSemaphore(uncompressedSize); // it releases the memory as well - callback(result, {}); - }; - - auto& msgMetadata = msg.impl_->metadata; - const bool compressed = !canAddToBatch(msg); - const auto payload = - compressed ? applyCompression(uncompressedPayload, conf_.getCompressionType()) : uncompressedPayload; - const auto compressedSize = static_cast(payload.readableBytes()); - const auto maxMessageSize = static_cast(ClientConnection::getMaxMessageSize()); - - if (!msgMetadata.has_replicated_from() && msgMetadata.has_producer_name()) { - handleFailedResult(ResultInvalidMessage); - return; - } - - Lock lock(mutex_); - uint64_t sequenceId; - if (!msgMetadata.has_sequence_id()) { - sequenceId = msgSequenceGenerator_++; - } else { - sequenceId = msgMetadata.sequence_id(); - } - setMessageMetadata(msg, sequenceId, uncompressedSize); - - auto payloadChunkSize = maxMessageSize; - int totalChunks; - if (!compressed || !chunkingEnabled_) { - totalChunks = 1; - } else { - const auto metadataSize = static_cast(msgMetadata.ByteSizeLong()); - if (metadataSize >= maxMessageSize) { - LOG_WARN(getName() << " - metadata size " << metadataSize << " cannot exceed " << maxMessageSize - << " bytes"); - handleFailedResult(ResultMessageTooBig); - return; - } - payloadChunkSize = maxMessageSize - metadataSize; - totalChunks = getNumOfChunks(compressedSize, payloadChunkSize); - } - - // Each chunk should be sent individually, so try to acquire extra permits for chunks. - for (int i = 0; i < (totalChunks - 1); i++) { - const auto result = canEnqueueRequest(0); // size is 0 because the memory has already reserved - if (result != ResultOk) { - handleFailedResult(result); - return; - } - } - - if (canAddToBatch(msg)) { - // Batching is enabled and the message is not delayed - if (!batchMessageContainer_->hasEnoughSpace(msg)) { - batchMessageAndSend().complete(); - } - bool isFirstMessage = batchMessageContainer_->isFirstMessageToAdd(msg); - bool isFull = batchMessageContainer_->add(msg, callback); - if (isFirstMessage) { - batchTimer_.expires_from_now( - boost::posix_time::milliseconds(conf_.getBatchingMaxPublishDelayMs())); - auto weakSelf = weak_from_this(); - batchTimer_.async_wait([this, weakSelf](const boost::system::error_code& ec) { - auto self = weakSelf.lock(); - if (!self) { - return; - } - if (ec) { - LOG_DEBUG(getName() << " Ignoring timer cancelled event, code[" << ec << "]"); - return; - } - LOG_DEBUG(getName() << " - Batch Message Timer expired"); - - // ignore if the producer is already closing/closed - const auto state = state_.load(); - if (state == Pending || state == Ready) { - Lock lock(mutex_); - auto failures = batchMessageAndSend(); - lock.unlock(); - failures.complete(); - } - }); - } - - if (isFull) { - auto failures = batchMessageAndSend(); - lock.unlock(); - failures.complete(); - } - } else { - const bool sendChunks = (totalChunks > 1); - if (sendChunks) { - msgMetadata.set_uuid(producerName_ + "-" + std::to_string(sequenceId)); - msgMetadata.set_num_chunks_from_msg(totalChunks); - msgMetadata.set_total_chunk_msg_size(compressedSize); - } - - int beginIndex = 0; - for (int chunkId = 0; chunkId < totalChunks; chunkId++) { - if (sendChunks) { - msgMetadata.set_chunk_id(chunkId); - } - const uint32_t endIndex = std::min(compressedSize, beginIndex + payloadChunkSize); - auto chunkedPayload = payload.slice(beginIndex, endIndex - beginIndex); - beginIndex = endIndex; - - SharedBuffer encryptedPayload; - if (!encryptMessage(msgMetadata, chunkedPayload, encryptedPayload)) { - handleFailedResult(ResultCryptoError); - return; - } - OpSendMsg op{msgMetadata, encryptedPayload, (chunkId == totalChunks - 1) ? callback : nullptr, - producerId_, sequenceId, conf_.getSendTimeout(), - 1, uncompressedSize}; - - if (!chunkingEnabled_) { - const uint32_t msgMetadataSize = op.metadata_.ByteSize(); - const uint32_t payloadSize = op.payload_.readableBytes(); - const uint32_t msgHeadersAndPayloadSize = msgMetadataSize + payloadSize; - if (msgHeadersAndPayloadSize > maxMessageSize) { - lock.unlock(); - releaseSemaphoreForSendOp(op); - LOG_WARN(getName() - << " - compressed Message size " << msgHeadersAndPayloadSize << " cannot exceed " - << maxMessageSize << " bytes unless chunking is enabled"); - handleFailedResult(ResultMessageTooBig); - return; - } - } - - sendMessage(op); - } - } -} - -int ProducerImpl::getNumOfChunks(uint32_t size, uint32_t maxMessageSize) { - if (size >= maxMessageSize && maxMessageSize != 0) { - return size / maxMessageSize + ((size % maxMessageSize == 0) ? 0 : 1); - } - return 1; -} - -Result ProducerImpl::canEnqueueRequest(uint32_t payloadSize) { - if (conf_.getBlockIfQueueFull()) { - if (semaphore_ && !semaphore_->acquire()) { - return ResultInterrupted; - } - if (!memoryLimitController_.reserveMemory(payloadSize)) { - return ResultInterrupted; - } - return ResultOk; - } else { - if (semaphore_ && !semaphore_->tryAcquire()) { - return ResultProducerQueueIsFull; - } - if (!memoryLimitController_.tryReserveMemory(payloadSize)) { - if (semaphore_) { - semaphore_->release(1); - } - - return ResultMemoryBufferIsFull; - } - - return ResultOk; - } -} - -void ProducerImpl::releaseSemaphore(uint32_t payloadSize) { - if (semaphore_) { - semaphore_->release(); - } - - memoryLimitController_.releaseMemory(payloadSize); -} - -void ProducerImpl::releaseSemaphoreForSendOp(const OpSendMsg& op) { - if (semaphore_) { - semaphore_->release(op.messagesCount_); - } - - memoryLimitController_.releaseMemory(op.messagesSize_); -} - -// It must be called while `mutex_` is acquired -PendingFailures ProducerImpl::batchMessageAndSend(const FlushCallback& flushCallback) { - PendingFailures failures; - LOG_DEBUG("batchMessageAndSend " << *batchMessageContainer_); - batchTimer_.cancel(); - - batchMessageContainer_->processAndClear( - [this, &failures](Result result, const OpSendMsg& opSendMsg) { - if (result == ResultOk) { - sendMessage(opSendMsg); - } else { - // A spot has been reserved for this batch, but the batch failed to be pushed to the queue, so - // we need to release the spot manually - LOG_ERROR("batchMessageAndSend | Failed to createOpSendMsg: " << result); - releaseSemaphoreForSendOp(opSendMsg); - failures.add([opSendMsg, result] { opSendMsg.complete(result, {}); }); - } - }, - flushCallback); - return failures; -} - -// Precondition - -// a. we have a reserved spot on the queue -// b. call this function after acquiring the ProducerImpl mutex_ -void ProducerImpl::sendMessage(const OpSendMsg& op) { - const auto sequenceId = op.metadata_.sequence_id(); - LOG_DEBUG("Inserting data to pendingMessagesQueue_"); - pendingMessagesQueue_.push_back(op); - - ClientConnectionPtr cnx = getCnx().lock(); - if (cnx) { - // If we do have a connection, the message is sent immediately, otherwise - // we'll try again once a new connection is established - LOG_DEBUG(getName() << "Sending msg immediately - seq: " << sequenceId); - cnx->sendMessage(op); - } else { - LOG_DEBUG(getName() << "Connection is not ready - seq: " << sequenceId); - } -} - -void ProducerImpl::printStats() { - if (batchMessageContainer_) { - LOG_INFO("Producer - " << producerStr_ << ", [batchMessageContainer = " << *batchMessageContainer_ - << "]"); - } else { - LOG_INFO("Producer - " << producerStr_ << ", [batching = off]"); - } -} - -void ProducerImpl::closeAsync(CloseCallback callback) { - // if the producer was never started then there is nothing to clean up - State expectedState = NotStarted; - if (state_.compare_exchange_strong(expectedState, Closed)) { - callback(ResultOk); - return; - } - - // Keep a reference to ensure object is kept alive - ProducerImplPtr ptr = shared_from_this(); - - cancelTimers(); - - if (semaphore_) { - semaphore_->close(); - } - - // ensure any remaining send callbacks are called before calling the close callback - failPendingMessages(ResultAlreadyClosed, false); - - // TODO maybe we need a loop here to implement CAS for a condition, - // just like Java's `getAndUpdate` method on an atomic variable - const auto state = state_.load(); - if (state != Ready && state != Pending) { - state_ = Closed; - if (callback) { - callback(ResultAlreadyClosed); - } - - return; - } - LOG_INFO(getName() << "Closing producer for topic " << topic_); - state_ = Closing; - - ClientConnectionPtr cnx = getCnx().lock(); - if (!cnx) { - state_ = Closed; - - if (callback) { - callback(ResultOk); - } - return; - } - - // Detach the producer from the connection to avoid sending any other - // message from the producer - connection_.reset(); - - ClientImplPtr client = client_.lock(); - if (!client) { - state_ = Closed; - // Client was already destroyed - if (callback) { - callback(ResultOk); - } - return; - } - - int requestId = client->newRequestId(); - Future future = - cnx->sendRequestWithId(Commands::newCloseProducer(producerId_, requestId), requestId); - if (callback) { - // Pass the shared pointer "ptr" to the handler to prevent the object from being destroyed - future.addListener( - std::bind(&ProducerImpl::handleClose, shared_from_this(), std::placeholders::_1, callback, ptr)); - } -} - -void ProducerImpl::handleClose(Result result, ResultCallback callback, ProducerImplPtr producer) { - if (result == ResultOk) { - state_ = Closed; - LOG_INFO(getName() << "Closed producer " << producerId_); - ClientConnectionPtr cnx = getCnx().lock(); - if (cnx) { - cnx->removeProducer(producerId_); - } - } else { - LOG_ERROR(getName() << "Failed to close producer: " << strResult(result)); - } - - if (callback) { - callback(result); - } -} - -Future ProducerImpl::getProducerCreatedFuture() { - return producerCreatedPromise_.getFuture(); -} - -uint64_t ProducerImpl::getProducerId() const { return producerId_; } - -void ProducerImpl::handleSendTimeout(const boost::system::error_code& err) { - const auto state = state_.load(); - if (state != Pending && state != Ready) { - return; - } - Lock lock(mutex_); - - if (err == boost::asio::error::operation_aborted) { - LOG_DEBUG(getName() << "Timer cancelled: " << err.message()); - return; - } else if (err) { - LOG_ERROR(getName() << "Timer error: " << err.message()); - return; - } - - std::shared_ptr pendingCallbacks; - if (pendingMessagesQueue_.empty()) { - // If there are no pending messages, reset the timeout to the configured value. - LOG_DEBUG(getName() << "Producer timeout triggered on empty pending message queue"); - asyncWaitSendTimeout(milliseconds(conf_.getSendTimeout())); - } else { - // If there is at least one message, calculate the diff between the message timeout and - // the current time. - time_duration diff = pendingMessagesQueue_.front().timeout_ - TimeUtils::now(); - if (diff.total_milliseconds() <= 0) { - // The diff is less than or equal to zero, meaning that the message has been expired. - LOG_DEBUG(getName() << "Timer expired. Calling timeout callbacks."); - pendingCallbacks = getPendingCallbacksWhenFailed(); - // Since the pending queue is cleared now, set timer to expire after configured value. - asyncWaitSendTimeout(milliseconds(conf_.getSendTimeout())); - } else { - // The diff is greater than zero, set the timeout to the diff value - LOG_DEBUG(getName() << "Timer hasn't expired yet, setting new timeout " << diff); - asyncWaitSendTimeout(diff); - } - } - - lock.unlock(); - if (pendingCallbacks) { - pendingCallbacks->complete(ResultTimeout); - } -} - -bool ProducerImpl::removeCorruptMessage(uint64_t sequenceId) { - Lock lock(mutex_); - if (pendingMessagesQueue_.empty()) { - LOG_DEBUG(getName() << " -- SequenceId - " << sequenceId << "]" // - << "Got send failure for expired message, ignoring it."); - return true; - } - - OpSendMsg op = pendingMessagesQueue_.front(); - uint64_t expectedSequenceId = op.sequenceId_; - if (sequenceId > expectedSequenceId) { - LOG_WARN(getName() << "Got ack failure for msg " << sequenceId // - << " expecting: " << expectedSequenceId << " queue size=" // - << pendingMessagesQueue_.size() << " producer: " << producerId_); - return false; - } else if (sequenceId < expectedSequenceId) { - LOG_DEBUG(getName() << "Corrupt message is already timed out. Ignoring msg " << sequenceId); - return true; - } else { - LOG_DEBUG(getName() << "Remove corrupt message from queue " << sequenceId); - pendingMessagesQueue_.pop_front(); - lock.unlock(); - try { - // to protect from client callback exception - op.complete(ResultChecksumError, {}); - } catch (const std::exception& e) { - LOG_ERROR(getName() << "Exception thrown from callback " << e.what()); - } - releaseSemaphoreForSendOp(op); - return true; - } -} - -bool ProducerImpl::ackReceived(uint64_t sequenceId, MessageId& rawMessageId) { - MessageId messageId(partition_, rawMessageId.ledgerId(), rawMessageId.entryId(), - rawMessageId.batchIndex()); - Lock lock(mutex_); - - if (pendingMessagesQueue_.empty()) { - LOG_DEBUG(getName() << " -- SequenceId - " << sequenceId << "]" // - << " -- MessageId - " << messageId << "]" - << "Got an SEND_ACK for expired message, ignoring it."); - return true; - } - - OpSendMsg op = pendingMessagesQueue_.front(); - uint64_t expectedSequenceId = op.sequenceId_; - if (sequenceId > expectedSequenceId) { - LOG_WARN(getName() << "Got ack for msg " << sequenceId // - << " expecting: " << expectedSequenceId << " queue size=" // - << pendingMessagesQueue_.size() << " producer: " << producerId_); - return false; - } else if (sequenceId < expectedSequenceId) { - // Ignoring the ack since it's referring to a message that has already timed out. - LOG_DEBUG(getName() << "Got ack for timed out msg " << sequenceId // - << " -- MessageId - " << messageId << " last-seq: " << expectedSequenceId - << " producer: " << producerId_); - return true; - } else { - // Message was persisted correctly - LOG_DEBUG(getName() << "Received ack for msg " << sequenceId); - releaseSemaphoreForSendOp(op); - lastSequenceIdPublished_ = sequenceId + op.messagesCount_ - 1; - - pendingMessagesQueue_.pop_front(); - - lock.unlock(); - try { - op.complete(ResultOk, messageId); - } catch (const std::exception& e) { - LOG_ERROR(getName() << "Exception thrown from callback " << e.what()); - } - return true; - } -} - -bool ProducerImpl::encryptMessage(proto::MessageMetadata& metadata, SharedBuffer& payload, - SharedBuffer& encryptedPayload) { - if (!conf_.isEncryptionEnabled() || msgCrypto_ == NULL) { - encryptedPayload = payload; - return true; - } - - return msgCrypto_->encrypt(conf_.getEncryptionKeys(), conf_.getCryptoKeyReader(), metadata, payload, - encryptedPayload); -} - -void ProducerImpl::disconnectProducer() { - LOG_DEBUG("Broker notification of Closed producer: " << producerId_); - Lock lock(mutex_); - connection_.reset(); - lock.unlock(); - scheduleReconnection(shared_from_this()); -} - -void ProducerImpl::start() { - HandlerBase::start(); - - if (conf_.getLazyStartPartitionedProducers() && conf_.getAccessMode() == ProducerConfiguration::Shared) { - // we need to kick it off now as it is possible that the connection may take - // longer than sendTimeout to connect - startSendTimeoutTimer(); - } -} - -void ProducerImpl::shutdown() { - Lock lock(mutex_); - state_ = Closed; - cancelTimers(); - producerCreatedPromise_.setFailed(ResultAlreadyClosed); -} - -void ProducerImpl::cancelTimers() { - dataKeyRefreshTask_.stop(); - batchTimer_.cancel(); - sendTimer_.cancel(); -} - -bool ProducerImplCmp::operator()(const ProducerImplPtr& a, const ProducerImplPtr& b) const { - return a->getProducerId() < b->getProducerId(); -} - -bool ProducerImpl::isClosed() { return state_ == Closed; } - -bool ProducerImpl::isConnected() const { return !getCnx().expired() && state_ == Ready; } - -uint64_t ProducerImpl::getNumberOfConnectedProducer() { return isConnected() ? 1 : 0; } - -bool ProducerImpl::isStarted() const { return state_ != NotStarted; } -void ProducerImpl::startSendTimeoutTimer() { - if (conf_.getSendTimeout() > 0) { - asyncWaitSendTimeout(milliseconds(conf_.getSendTimeout())); - } -} - -void ProducerImpl::asyncWaitSendTimeout(DurationType expiryTime) { - sendTimer_.expires_from_now(expiryTime); - - auto weakSelf = weak_from_this(); - sendTimer_.async_wait([weakSelf](const boost::system::error_code& err) { - auto self = weakSelf.lock(); - if (self) { - std::static_pointer_cast(self)->handleSendTimeout(err); - } - }); -} - -ProducerImplWeakPtr ProducerImpl::weak_from_this() noexcept { return shared_from_this(); } - -} // namespace pulsar -/* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/ProducerImpl.h b/pulsar-client-cpp/lib/ProducerImpl.h deleted file mode 100644 index 74eee61066e5f..0000000000000 --- a/pulsar-client-cpp/lib/ProducerImpl.h +++ /dev/null @@ -1,200 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_PRODUCERIMPL_H_ -#define LIB_PRODUCERIMPL_H_ - -#include -#include - -#include "ClientImpl.h" -#include "BlockingQueue.h" -#include "HandlerBase.h" -#include "SharedBuffer.h" -#include "CompressionCodec.h" -#include "MessageCrypto.h" -#include "stats/ProducerStatsDisabled.h" -#include "stats/ProducerStatsImpl.h" -#include "PulsarApi.pb.h" -#include "OpSendMsg.h" -#include "BatchMessageContainerBase.h" -#include "PendingFailures.h" -#include "Semaphore.h" -#include "PeriodicTask.h" - -using namespace pulsar; - -namespace pulsar { -typedef bool bool_type; - -typedef std::shared_ptr MessageCryptoPtr; - -class PulsarFriend; - -class Producer; -class MemoryLimitController; -class TopicName; - -class ProducerImpl : public HandlerBase, - public std::enable_shared_from_this, - public ProducerImplBase { - public: - ProducerImpl(ClientImplPtr client, const TopicName& topic, - const ProducerConfiguration& producerConfiguration, int32_t partition = -1); - ~ProducerImpl(); - - // overrided methods from ProducerImplBase - const std::string& getProducerName() const override; - int64_t getLastSequenceId() const override; - const std::string& getSchemaVersion() const override; - void sendAsync(const Message& msg, SendCallback callback) override; - void closeAsync(CloseCallback callback) override; - void start() override; - void shutdown() override; - bool isClosed() override; - const std::string& getTopic() const override; - Future getProducerCreatedFuture() override; - void triggerFlush() override; - void flushAsync(FlushCallback callback) override; - bool isConnected() const override; - uint64_t getNumberOfConnectedProducer() override; - bool isStarted() const; - - bool removeCorruptMessage(uint64_t sequenceId); - - bool ackReceived(uint64_t sequenceId, MessageId& messageId); - - virtual void disconnectProducer(); - - uint64_t getProducerId() const; - - int32_t partition() const noexcept { return partition_; } - - static int getNumOfChunks(uint32_t size, uint32_t maxMessageSize); - - // NOTE: this method is introduced into `enable_shared_from_this` since C++17 - ProducerImplWeakPtr weak_from_this() noexcept; - - protected: - ProducerStatsBasePtr producerStatsBasePtr_; - - typedef std::deque MessageQueue; - - void setMessageMetadata(const Message& msg, const uint64_t& sequenceId, const uint32_t& uncompressedSize); - - void sendMessage(const OpSendMsg& opSendMsg); - - void startSendTimeoutTimer(); - - friend class PulsarFriend; - - friend class Producer; - - friend class BatchMessageContainerBase; - friend class BatchMessageContainer; - - // overrided methods from HandlerBase - void connectionOpened(const ClientConnectionPtr& connection) override; - void connectionFailed(Result result) override; - HandlerBaseWeakPtr get_weak_from_this() override { return shared_from_this(); } - const std::string& getName() const override { return producerStr_; } - - private: - void printStats(); - - void handleCreateProducer(const ClientConnectionPtr& cnx, Result result, - const ResponseData& responseData); - - void handleClose(Result result, ResultCallback callback, ProducerImplPtr producer); - - void resendMessages(ClientConnectionPtr cnx); - - void refreshEncryptionKey(const boost::system::error_code& ec); - bool encryptMessage(proto::MessageMetadata& metadata, SharedBuffer& payload, - SharedBuffer& encryptedPayload); - - void sendAsyncWithStatsUpdate(const Message& msg, const SendCallback& callback); - - /** - * Reserve a spot in the messages queue before acquiring the ProducerImpl mutex. When the queue is full, - * this call will block until a spot is available if blockIfQueueIsFull is true. Otherwise, it will return - * ResultProducerQueueIsFull immediately. - * - * It also checks whether the memory could reach the limit after `payloadSize` is added. If so, this call - * will block until enough memory could be retained. - */ - Result canEnqueueRequest(uint32_t payloadSize); - - void releaseSemaphore(uint32_t payloadSize); - void releaseSemaphoreForSendOp(const OpSendMsg& op); - - void cancelTimers(); - - bool isValidProducerState(const SendCallback& callback) const; - bool canAddToBatch(const Message& msg) const; - - typedef std::unique_lock Lock; - - ProducerConfiguration conf_; - - std::unique_ptr semaphore_; - MessageQueue pendingMessagesQueue_; - - const int32_t partition_; // -1 if topic is non-partitioned - std::string producerName_; - bool userProvidedProducerName_; - std::string producerStr_; - uint64_t producerId_; - int64_t msgSequenceGenerator_; - proto::BaseCommand cmd_; - - std::unique_ptr batchMessageContainer_; - boost::asio::deadline_timer batchTimer_; - PendingFailures batchMessageAndSend(const FlushCallback& flushCallback = nullptr); - - volatile int64_t lastSequenceIdPublished_; - std::string schemaVersion_; - - boost::asio::deadline_timer sendTimer_; - void handleSendTimeout(const boost::system::error_code& err); - using DurationType = typename boost::asio::deadline_timer::duration_type; - void asyncWaitSendTimeout(DurationType expiryTime); - - Promise producerCreatedPromise_; - - struct PendingCallbacks; - std::shared_ptr getPendingCallbacksWhenFailed(); - std::shared_ptr getPendingCallbacksWhenFailedWithLock(); - - void failPendingMessages(Result result, bool withLock); - - MessageCryptoPtr msgCrypto_; - PeriodicTask dataKeyRefreshTask_; - - MemoryLimitController& memoryLimitController_; - const bool chunkingEnabled_; - Optional topicEpoch{Optional::empty()}; -}; - -struct ProducerImplCmp { - bool operator()(const ProducerImplPtr& a, const ProducerImplPtr& b) const; -}; - -} /* namespace pulsar */ - -#endif /* LIB_PRODUCERIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/ProducerImplBase.h b/pulsar-client-cpp/lib/ProducerImplBase.h deleted file mode 100644 index 15a6e1d5a3f80..0000000000000 --- a/pulsar-client-cpp/lib/ProducerImplBase.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_PRODUCER_IMPL_BASE_HEADER -#define PULSAR_PRODUCER_IMPL_BASE_HEADER -#include -#include - -namespace pulsar { -class ProducerImplBase; - -typedef std::weak_ptr ProducerImplBaseWeakPtr; - -class ProducerImplBase { - public: - virtual ~ProducerImplBase() {} - - virtual const std::string& getProducerName() const = 0; - - virtual int64_t getLastSequenceId() const = 0; - virtual const std::string& getSchemaVersion() const = 0; - - virtual void sendAsync(const Message& msg, SendCallback callback) = 0; - virtual void closeAsync(CloseCallback callback) = 0; - virtual void start() = 0; - virtual void shutdown() = 0; - virtual bool isClosed() = 0; - virtual const std::string& getTopic() const = 0; - virtual Future getProducerCreatedFuture() = 0; - virtual void triggerFlush() = 0; - virtual void flushAsync(FlushCallback callback) = 0; - virtual bool isConnected() const = 0; - virtual uint64_t getNumberOfConnectedProducer() = 0; -}; -} // namespace pulsar -#endif // PULSAR_PRODUCER_IMPL_BASE_HEADER diff --git a/pulsar-client-cpp/lib/ProtobufNativeSchema.cc b/pulsar-client-cpp/lib/ProtobufNativeSchema.cc deleted file mode 100644 index 3b8a404bb65d1..0000000000000 --- a/pulsar-client-cpp/lib/ProtobufNativeSchema.cc +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "pulsar/ProtobufNativeSchema.h" - -#include -#include - -#include -#include -#include - -using google::protobuf::FileDescriptor; -using google::protobuf::FileDescriptorSet; - -namespace pulsar { - -void internalCollectFileDescriptors(const FileDescriptor* fileDescriptor, - FileDescriptorSet& fileDescriptorSet); - -SchemaInfo createProtobufNativeSchema(const google::protobuf::Descriptor* descriptor) { - if (!descriptor) { - throw std::invalid_argument("descriptor is null"); - } - - const auto fileDescriptor = descriptor->file(); - const std::string rootMessageTypeName = descriptor->full_name(); - const std::string rootFileDescriptorName = fileDescriptor->name(); - - FileDescriptorSet fileDescriptorSet; - internalCollectFileDescriptors(fileDescriptor, fileDescriptorSet); - - using namespace boost::archive::iterators; - using base64 = base64_from_binary>; - - std::vector bytes(fileDescriptorSet.ByteSizeLong()); - fileDescriptorSet.SerializeToArray(bytes.data(), bytes.size()); - - std::string base64String{base64(bytes.data()), base64(bytes.data() + bytes.size())}; - // Pulsar broker only supports decoding Base64 with padding so we need to add padding '=' here - const size_t numPadding = 4 - base64String.size() % 4; - if (numPadding <= 2) { - for (size_t i = 0; i < numPadding; i++) { - base64String.push_back('='); - } - } else if (numPadding == 3) { - // The length of encoded Base64 string (without padding) should not be 4N+1 - throw std::runtime_error("Unexpected padding number (3), the encoded Base64 string is:\n" + - base64String); - } // else numPadding == 4, which means no padding characters need to be added - - const std::string schemaJson = R"({"fileDescriptorSet":")" + base64String + - R"(","rootMessageTypeName":")" + rootMessageTypeName + - R"(","rootFileDescriptorName":")" + rootFileDescriptorName + R"("})"; - - return SchemaInfo(SchemaType::PROTOBUF_NATIVE, "", schemaJson); -} - -void internalCollectFileDescriptors(const FileDescriptor* fileDescriptor, - FileDescriptorSet& fileDescriptorSet) { - fileDescriptor->CopyTo(fileDescriptorSet.add_file()); - for (int i = 0; i < fileDescriptor->dependency_count(); i++) { - // collect the file descriptors recursively - internalCollectFileDescriptors(fileDescriptor->dependency(i), fileDescriptorSet); - } -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/PulsarScheme.h b/pulsar-client-cpp/lib/PulsarScheme.h deleted file mode 100644 index e292687275fbb..0000000000000 --- a/pulsar-client-cpp/lib/PulsarScheme.h +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include - -namespace pulsar { - -enum PulsarScheme -{ - PULSAR, - PULSAR_SSL, - HTTP, - HTTPS -}; - -namespace scheme { - -inline PulsarScheme toScheme(const std::string& scheme) { - if (scheme == "pulsar") { - return PulsarScheme::PULSAR; - } else if (scheme == "pulsar+ssl") { - return PulsarScheme::PULSAR_SSL; - } else if (scheme == "http") { - return PulsarScheme::HTTP; - } else if (scheme == "https") { - return PulsarScheme::HTTPS; - } else { - throw std::invalid_argument("Invalid scheme: " + scheme); - } -} - -inline const char* getSchemeString(PulsarScheme scheme) { - switch (scheme) { - case PulsarScheme::PULSAR: - return "pulsar://"; - case PulsarScheme::PULSAR_SSL: - return "pulsar+ssl://"; - case PulsarScheme::HTTP: - return "http://"; - case PulsarScheme::HTTPS: - return "https://"; - default: - return "unknown://"; - } -} - -inline short getDefaultPort(PulsarScheme scheme) { - switch (scheme) { - case PulsarScheme::PULSAR: - return 6650; - case PulsarScheme::PULSAR_SSL: - return 6651; - case PulsarScheme::HTTP: - return 8080; - case PulsarScheme::HTTPS: - return 8081; - default: - throw std::invalid_argument("Unexpected scheme: " + std::to_string(scheme)); - } -} - -} // namespace scheme - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Reader.cc b/pulsar-client-cpp/lib/Reader.cc deleted file mode 100644 index fa485362095d6..0000000000000 --- a/pulsar-client-cpp/lib/Reader.cc +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include "Future.h" -#include "Utils.h" -#include "ReaderImpl.h" - -namespace pulsar { - -static const std::string EMPTY_STRING; - -Reader::Reader() : impl_() {} - -Reader::Reader(ReaderImplPtr impl) : impl_(impl) {} - -const std::string& Reader::getTopic() const { return impl_ != NULL ? impl_->getTopic() : EMPTY_STRING; } - -Result Reader::readNext(Message& msg) { - if (!impl_) { - return ResultConsumerNotInitialized; - } - - return impl_->readNext(msg); -} - -Result Reader::readNext(Message& msg, int timeoutMs) { - if (!impl_) { - return ResultConsumerNotInitialized; - } - - return impl_->readNext(msg, timeoutMs); -} - -Result Reader::close() { - Promise promise; - closeAsync(WaitForCallback(promise)); - - Result result; - promise.getFuture().get(result); - return result; -} - -void Reader::closeAsync(ResultCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized); - return; - } - - impl_->closeAsync(callback); -} - -void Reader::hasMessageAvailableAsync(HasMessageAvailableCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized, false); - return; - } - - impl_->hasMessageAvailableAsync(callback); -} - -Result Reader::hasMessageAvailable(bool& hasMessageAvailable) { - Promise promise; - - hasMessageAvailableAsync(WaitForCallbackValue(promise)); - return promise.getFuture().get(hasMessageAvailable); -} - -void Reader::seekAsync(const MessageId& msgId, ResultCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized); - return; - } - impl_->seekAsync(msgId, callback); -} - -void Reader::seekAsync(uint64_t timestamp, ResultCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized); - return; - } - impl_->seekAsync(timestamp, callback); -} - -Result Reader::seek(const MessageId& msgId) { - Promise promise; - impl_->seekAsync(msgId, WaitForCallback(promise)); - Result result; - promise.getFuture().get(result); - return result; -} - -Result Reader::seek(uint64_t timestamp) { - Promise promise; - impl_->seekAsync(timestamp, WaitForCallback(promise)); - Result result; - promise.getFuture().get(result); - return result; -} - -bool Reader::isConnected() const { return impl_ && impl_->isConnected(); } - -void Reader::getLastMessageIdAsync(GetLastMessageIdCallback callback) { - if (!impl_) { - callback(ResultConsumerNotInitialized, MessageId()); - return; - } - impl_->getLastMessageIdAsync(callback); -} - -Result Reader::getLastMessageId(MessageId& messageId) { - Promise promise; - - getLastMessageIdAsync(WaitForCallbackValue(promise)); - return promise.getFuture().get(messageId); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ReaderConfiguration.cc b/pulsar-client-cpp/lib/ReaderConfiguration.cc deleted file mode 100644 index 0dfdbedc32234..0000000000000 --- a/pulsar-client-cpp/lib/ReaderConfiguration.cc +++ /dev/null @@ -1,155 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -namespace pulsar { - -const static std::string emptyString; - -ReaderConfiguration::ReaderConfiguration() : impl_(std::make_shared()) {} - -ReaderConfiguration::~ReaderConfiguration() {} - -ReaderConfiguration::ReaderConfiguration(const ReaderConfiguration& x) : impl_(x.impl_) {} - -ReaderConfiguration& ReaderConfiguration::operator=(const ReaderConfiguration& x) { - impl_ = x.impl_; - return *this; -} - -ReaderConfiguration& ReaderConfiguration::setSchema(const SchemaInfo& schemaInfo) { - impl_->schemaInfo = schemaInfo; - return *this; -} - -const SchemaInfo& ReaderConfiguration::getSchema() const { return impl_->schemaInfo; } - -ReaderConfiguration& ReaderConfiguration::setReaderListener(ReaderListener readerListener) { - impl_->readerListener = readerListener; - impl_->hasReaderListener = true; - return *this; -} - -ReaderListener ReaderConfiguration::getReaderListener() const { return impl_->readerListener; } - -bool ReaderConfiguration::hasReaderListener() const { return impl_->hasReaderListener; } - -void ReaderConfiguration::setReceiverQueueSize(int size) { impl_->receiverQueueSize = size; } - -int ReaderConfiguration::getReceiverQueueSize() const { return impl_->receiverQueueSize; } - -const std::string& ReaderConfiguration::getReaderName() const { return impl_->readerName; } - -void ReaderConfiguration::setReaderName(const std::string& readerName) { impl_->readerName = readerName; } - -const std::string& ReaderConfiguration::getSubscriptionRolePrefix() const { - return impl_->subscriptionRolePrefix; -} - -void ReaderConfiguration::setSubscriptionRolePrefix(const std::string& subscriptionRolePrefix) { - impl_->subscriptionRolePrefix = subscriptionRolePrefix; -} - -bool ReaderConfiguration::isReadCompacted() const { return impl_->readCompacted; } - -void ReaderConfiguration::setReadCompacted(bool compacted) { impl_->readCompacted = compacted; } - -void ReaderConfiguration::setInternalSubscriptionName(std::string internalSubscriptionName) { - impl_->internalSubscriptionName = internalSubscriptionName; -} - -const std::string& ReaderConfiguration::getInternalSubscriptionName() const { - return impl_->internalSubscriptionName; -} - -void ReaderConfiguration::setUnAckedMessagesTimeoutMs(const uint64_t milliSeconds) { - impl_->unAckedMessagesTimeoutMs = milliSeconds; -} - -long ReaderConfiguration::getUnAckedMessagesTimeoutMs() const { return impl_->unAckedMessagesTimeoutMs; } - -void ReaderConfiguration::setTickDurationInMs(const uint64_t milliSeconds) { - impl_->tickDurationInMs = milliSeconds; -} - -long ReaderConfiguration::getTickDurationInMs() const { return impl_->tickDurationInMs; } - -void ReaderConfiguration::setAckGroupingTimeMs(long ackGroupingMillis) { - impl_->ackGroupingTimeMs = ackGroupingMillis; -} - -long ReaderConfiguration::getAckGroupingTimeMs() const { return impl_->ackGroupingTimeMs; } - -void ReaderConfiguration::setAckGroupingMaxSize(long maxGroupingSize) { - impl_->ackGroupingMaxSize = maxGroupingSize; -} - -long ReaderConfiguration::getAckGroupingMaxSize() const { return impl_->ackGroupingMaxSize; } - -bool ReaderConfiguration::isEncryptionEnabled() const { return impl_->cryptoKeyReader != nullptr; } - -const CryptoKeyReaderPtr ReaderConfiguration::getCryptoKeyReader() const { return impl_->cryptoKeyReader; } - -ReaderConfiguration& ReaderConfiguration::setCryptoKeyReader(CryptoKeyReaderPtr cryptoKeyReader) { - impl_->cryptoKeyReader = cryptoKeyReader; - return *this; -} - -ConsumerCryptoFailureAction ReaderConfiguration::getCryptoFailureAction() const { - return impl_->cryptoFailureAction; -} - -ReaderConfiguration& ReaderConfiguration::setCryptoFailureAction(ConsumerCryptoFailureAction action) { - impl_->cryptoFailureAction = action; - return *this; -} - -bool ReaderConfiguration::hasProperty(const std::string& name) const { - const auto& properties = impl_->properties; - return properties.find(name) != properties.cend(); -} - -const std::string& ReaderConfiguration::getProperty(const std::string& name) const { - const auto& properties = impl_->properties; - const auto it = properties.find(name); - return (it != properties.cend()) ? (it->second) : emptyString; -} - -std::map& ReaderConfiguration::getProperties() const { return impl_->properties; } - -ReaderConfiguration& ReaderConfiguration::setProperty(const std::string& name, const std::string& value) { - auto& properties = impl_->properties; - auto it = properties.find(name); - if (it != properties.end()) { - it->second = value; - } else { - properties.emplace(name, value); - } - return *this; -} - -ReaderConfiguration& ReaderConfiguration::setProperties( - const std::map& properties) { - for (const auto& kv : properties) { - setProperty(kv.first, kv.second); - } - return *this; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ReaderConfigurationImpl.h b/pulsar-client-cpp/lib/ReaderConfigurationImpl.h deleted file mode 100644 index 6f38c29bedec7..0000000000000 --- a/pulsar-client-cpp/lib/ReaderConfigurationImpl.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_READERCONFIGURATIONIMPL_H_ -#define LIB_READERCONFIGURATIONIMPL_H_ - -#include - -namespace pulsar { -struct ReaderConfigurationImpl { - SchemaInfo schemaInfo; - ReaderListener readerListener; - bool hasReaderListener{false}; - int receiverQueueSize{1000}; - std::string readerName; - std::string subscriptionRolePrefix; - bool readCompacted{false}; - std::string internalSubscriptionName; - long unAckedMessagesTimeoutMs{0}; - long tickDurationInMs{1000}; - long ackGroupingTimeMs{100}; - long ackGroupingMaxSize{1000}; - CryptoKeyReaderPtr cryptoKeyReader; - ConsumerCryptoFailureAction cryptoFailureAction; - std::map properties; -}; -} // namespace pulsar -#endif /* LIB_READERCONFIGURATIONIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/ReaderImpl.cc b/pulsar-client-cpp/lib/ReaderImpl.cc deleted file mode 100644 index 83fa6a57009d7..0000000000000 --- a/pulsar-client-cpp/lib/ReaderImpl.cc +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "ClientImpl.h" -#include "ReaderImpl.h" -#include "TopicName.h" - -namespace pulsar { - -namespace test { -std::mutex readerConfigTestMutex; -std::atomic_bool readerConfigTestEnabled{false}; -ConsumerConfiguration consumerConfigOfReader; -} // namespace test - -static ResultCallback emptyCallback; - -ReaderImpl::ReaderImpl(const ClientImplPtr client, const std::string& topic, const ReaderConfiguration& conf, - const ExecutorServicePtr listenerExecutor, ReaderCallback readerCreatedCallback) - : topic_(topic), client_(client), readerConf_(conf), readerCreatedCallback_(readerCreatedCallback) {} - -void ReaderImpl::start(const MessageId& startMessageId, - std::function callback) { - ConsumerConfiguration consumerConf; - consumerConf.setConsumerType(ConsumerExclusive); - consumerConf.setReceiverQueueSize(readerConf_.getReceiverQueueSize()); - consumerConf.setReadCompacted(readerConf_.isReadCompacted()); - consumerConf.setSchema(readerConf_.getSchema()); - consumerConf.setUnAckedMessagesTimeoutMs(readerConf_.getUnAckedMessagesTimeoutMs()); - consumerConf.setTickDurationInMs(readerConf_.getTickDurationInMs()); - consumerConf.setAckGroupingTimeMs(readerConf_.getAckGroupingTimeMs()); - consumerConf.setAckGroupingMaxSize(readerConf_.getAckGroupingMaxSize()); - consumerConf.setCryptoKeyReader(readerConf_.getCryptoKeyReader()); - consumerConf.setCryptoFailureAction(readerConf_.getCryptoFailureAction()); - consumerConf.setProperties(readerConf_.getProperties()); - - if (readerConf_.getReaderName().length() > 0) { - consumerConf.setConsumerName(readerConf_.getReaderName()); - } - - if (readerConf_.hasReaderListener()) { - // Adapt the message listener to be a reader-listener - readerListener_ = readerConf_.getReaderListener(); - consumerConf.setMessageListener(std::bind(&ReaderImpl::messageListener, shared_from_this(), - std::placeholders::_1, std::placeholders::_2)); - } - - std::string subscription; - if (!readerConf_.getInternalSubscriptionName().empty()) { - subscription = readerConf_.getInternalSubscriptionName(); - } else { - subscription = "reader-" + generateRandomName(); - if (!readerConf_.getSubscriptionRolePrefix().empty()) { - subscription = readerConf_.getSubscriptionRolePrefix() + "-" + subscription; - } - } - - // get the consumer's configuration before created - if (test::readerConfigTestEnabled) { - test::consumerConfigOfReader = consumerConf.clone(); - } - - consumer_ = std::make_shared(client_.lock(), topic_, subscription, consumerConf, - TopicName::get(topic_)->isPersistent(), ExecutorServicePtr(), - false, NonPartitioned, Commands::SubscriptionModeNonDurable, - Optional::of(startMessageId)); - consumer_->setPartitionIndex(TopicName::getPartitionIndex(topic_)); - auto self = shared_from_this(); - consumer_->getConsumerCreatedFuture().addListener( - [this, self, callback](Result result, const ConsumerImplBaseWeakPtr& weakConsumerPtr) { - if (result == ResultOk) { - callback(weakConsumerPtr); - readerCreatedCallback_(result, Reader(self)); - } else { - readerCreatedCallback_(result, {}); - } - }); - consumer_->start(); -} - -const std::string& ReaderImpl::getTopic() const { return consumer_->getTopic(); } - -Result ReaderImpl::readNext(Message& msg) { - Result res = consumer_->receive(msg); - acknowledgeIfNecessary(res, msg); - return res; -} - -Result ReaderImpl::readNext(Message& msg, int timeoutMs) { - Result res = consumer_->receive(msg, timeoutMs); - acknowledgeIfNecessary(res, msg); - return res; -} - -void ReaderImpl::messageListener(Consumer consumer, const Message& msg) { - readerListener_(Reader(shared_from_this()), msg); - acknowledgeIfNecessary(ResultOk, msg); -} - -void ReaderImpl::acknowledgeIfNecessary(Result result, const Message& msg) { - if (result != ResultOk) { - return; - } - - // Only acknowledge on the first message in the batch - if (msg.getMessageId().batchIndex() <= 0) { - // Acknowledge message immediately because the reader is based on non-durable - // subscription. When it reconnects, it will specify the subscription position anyway - consumer_->acknowledgeCumulativeAsync(msg.getMessageId(), emptyCallback); - } -} - -void ReaderImpl::closeAsync(ResultCallback callback) { consumer_->closeAsync(callback); } - -void ReaderImpl::hasMessageAvailableAsync(HasMessageAvailableCallback callback) { - consumer_->hasMessageAvailableAsync(callback); -} - -void ReaderImpl::seekAsync(const MessageId& msgId, ResultCallback callback) { - consumer_->seekAsync(msgId, callback); -} -void ReaderImpl::seekAsync(uint64_t timestamp, ResultCallback callback) { - consumer_->seekAsync(timestamp, callback); -} - -void ReaderImpl::getLastMessageIdAsync(GetLastMessageIdCallback callback) { - consumer_->getLastMessageIdAsync([callback](Result result, const GetLastMessageIdResponse& response) { - callback(result, response.getLastMessageId()); - }); -} - -bool ReaderImpl::isConnected() const { return consumer_->isConnected(); } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ReaderImpl.h b/pulsar-client-cpp/lib/ReaderImpl.h deleted file mode 100644 index b0d8a6bc40a21..0000000000000 --- a/pulsar-client-cpp/lib/ReaderImpl.h +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef LIB_READERIMPL_H_ -#define LIB_READERIMPL_H_ - -#include "ConsumerImpl.h" - -namespace pulsar { - -class ReaderImpl; - -typedef std::shared_ptr ReaderImplPtr; -typedef std::weak_ptr ReaderImplWeakPtr; - -namespace test { - -extern PULSAR_PUBLIC std::mutex readerConfigTestMutex; -extern PULSAR_PUBLIC std::atomic_bool readerConfigTestEnabled; -extern PULSAR_PUBLIC ConsumerConfiguration consumerConfigOfReader; - -} // namespace test - -class PULSAR_PUBLIC ReaderImpl : public std::enable_shared_from_this { - public: - ReaderImpl(const ClientImplPtr client, const std::string& topic, const ReaderConfiguration& conf, - const ExecutorServicePtr listenerExecutor, ReaderCallback readerCreatedCallback); - - void start(const MessageId& startMessageId, std::function callback); - - const std::string& getTopic() const; - - Result readNext(Message& msg); - Result readNext(Message& msg, int timeoutMs); - - void closeAsync(ResultCallback callback); - - Future getReaderCreatedFuture(); - - ConsumerImplBaseWeakPtr getConsumer() const noexcept { return consumer_; } - - void hasMessageAvailableAsync(HasMessageAvailableCallback callback); - - void seekAsync(const MessageId& msgId, ResultCallback callback); - void seekAsync(uint64_t timestamp, ResultCallback callback); - - void getLastMessageIdAsync(GetLastMessageIdCallback callback); - - bool isConnected() const; - - private: - void messageListener(Consumer consumer, const Message& msg); - - void acknowledgeIfNecessary(Result result, const Message& msg); - - std::string topic_; - ClientImplWeakPtr client_; - ReaderConfiguration readerConf_; - ConsumerImplPtr consumer_; - ReaderCallback readerCreatedCallback_; - ReaderListener readerListener_; -}; -} // namespace pulsar - -#endif /* LIB_READERIMPL_H_ */ diff --git a/pulsar-client-cpp/lib/Result.cc b/pulsar-client-cpp/lib/Result.cc deleted file mode 100644 index 6682341b2a396..0000000000000 --- a/pulsar-client-cpp/lib/Result.cc +++ /dev/null @@ -1,177 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -#include - -namespace pulsar { - -const char* strResult(Result result) { - switch (result) { - case ResultRetryable: - return "Retryable"; - - case ResultOk: - return "Ok"; - - case ResultUnknownError: - return "UnknownError"; - - case ResultInvalidConfiguration: - return "InvalidConfiguration"; - - case ResultTimeout: - return "TimeOut"; - - case ResultLookupError: - return "LookupError"; - - case ResultConnectError: - return "ConnectError"; - - case ResultAuthenticationError: - return "AuthenticationError"; - - case ResultAuthorizationError: - return "AuthorizationError"; - - case ResultErrorGettingAuthenticationData: - return "ErrorGettingAuthenticationData"; - - case ResultBrokerMetadataError: - return "BrokerMetadataError"; - - case ResultBrokerPersistenceError: - return "BrokerPersistenceError"; - - case ResultConsumerBusy: - return "ConsumerBusy"; - - case ResultNotConnected: - return "NotConnected"; - - case ResultReadError: - return "ReadError"; - - case ResultAlreadyClosed: - return "AlreadyClosed"; - - case ResultInvalidMessage: - return "InvalidMessage"; - - case ResultConsumerNotInitialized: - return "ConsumerNotInitialized"; - - case ResultProducerNotInitialized: - return "ProducerNotInitialized"; - - case ResultInvalidTopicName: - return "InvalidTopicName"; - - case ResultServiceUnitNotReady: - return "ServiceUnitNotReady"; - - case ResultInvalidUrl: - return "InvalidUrl"; - - case ResultChecksumError: - return "ChecksumError"; - - case ResultTooManyLookupRequestException: - return "TooManyLookupRequestException"; - - case ResultOperationNotSupported: - return "OperationNotSupported"; - - case ResultProducerBlockedQuotaExceededError: - return "ProducerBlockedQuotaExceededError"; - - case ResultProducerBlockedQuotaExceededException: - return "ProducerBlockedQuotaExceededException"; - - case ResultProducerQueueIsFull: - return "ProducerQueueIsFull"; - - case ResultMessageTooBig: - return "MessageTooBig"; - - case ResultTopicNotFound: - return "TopicNotFound"; - - case ResultSubscriptionNotFound: - return "SubscriptionNotFound"; - - case ResultConsumerNotFound: - return "ConsumerNotFound"; - - case ResultUnsupportedVersionError: - return "UnsupportedVersionError"; - - case ResultTopicTerminated: - return "TopicTerminated"; - - case ResultCryptoError: - return "CryptoError"; - - case ResultProducerBusy: - return "ProducerBusy"; - - case ResultIncompatibleSchema: - return "IncompatibleSchema"; - - case ResultConsumerAssignError: - return "ResultConsumerAssignError"; - - case ResultCumulativeAcknowledgementNotAllowedError: - return "ResultCumulativeAcknowledgementNotAllowedError"; - - case ResultTransactionCoordinatorNotFoundError: - return "ResultTransactionCoordinatorNotFoundError"; - - case ResultInvalidTxnStatusError: - return "ResultInvalidTxnStatusError"; - - case ResultNotAllowedError: - return "ResultNotAllowedError"; - - case ResultTransactionConflict: - return "ResultTransactionConflict"; - - case ResultTransactionNotFound: - return "ResultTransactionNotFound"; - - case ResultProducerFenced: - return "ResultProducerFenced"; - - case ResultMemoryBufferIsFull: - return "ResultMemoryBufferIsFull"; - - case ResultInterrupted: - return "ResultInterrupted"; - }; - // NOTE : Do not add default case in the switch above. In future if we get new cases for - // ServerError and miss them in the switch above we would like to get notified. Adding - // return here to make the compiler happy. - return "UnknownErrorCode"; -} - -PULSAR_PUBLIC std::ostream& operator<<(std::ostream& s, Result result) { return s << strResult(result); } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/RetryableLookupService.h b/pulsar-client-cpp/lib/RetryableLookupService.h deleted file mode 100644 index a8f7bfcec3b3c..0000000000000 --- a/pulsar-client-cpp/lib/RetryableLookupService.h +++ /dev/null @@ -1,151 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include "lib/Backoff.h" -#include "lib/ExecutorService.h" -#include "lib/LookupService.h" -#include "lib/SynchronizedHashMap.h" -#include "lib/LogUtils.h" - -namespace pulsar { - -class RetryableLookupService : public LookupService, - public std::enable_shared_from_this { - private: - friend class PulsarFriend; - struct PassKey { - explicit PassKey() {} - }; - - public: - template - explicit RetryableLookupService(PassKey, Args&&... args) - : RetryableLookupService(std::forward(args)...) {} - - template - static std::shared_ptr create(Args&&... args) { - return std::make_shared(PassKey{}, std::forward(args)...); - } - - LookupResultFuture getBroker(const TopicName& topicName) override { - return executeAsync("get-broker-" + topicName.toString(), - [this, topicName] { return lookupService_->getBroker(topicName); }); - } - - Future getPartitionMetadataAsync(const TopicNamePtr& topicName) override { - return executeAsync( - "get-partition-metadata-" + topicName->toString(), - [this, topicName] { return lookupService_->getPartitionMetadataAsync(topicName); }); - } - - Future getTopicsOfNamespaceAsync(const NamespaceNamePtr& nsName) override { - return executeAsync( - "get-topics-of-namespace-" + nsName->toString(), - [this, nsName] { return lookupService_->getTopicsOfNamespaceAsync(nsName); }); - } - - template - Future executeAsync(const std::string& key, std::function()> f) { - Promise promise; - executeAsyncImpl(key, f, promise, timeout_); - return promise.getFuture(); - } - - private: - const std::shared_ptr lookupService_; - const TimeDuration timeout_; - Backoff backoff_; - const ExecutorServiceProviderPtr executorProvider_; - - using Timer = boost::asio::deadline_timer; - using TimerPtr = std::unique_ptr; - SynchronizedHashMap backoffTimers_; - - RetryableLookupService(std::shared_ptr lookupService, int timeoutSeconds, - ExecutorServiceProviderPtr executorProvider) - : lookupService_(lookupService), - timeout_(boost::posix_time::seconds(timeoutSeconds)), - backoff_(boost::posix_time::milliseconds(100), timeout_ + timeout_, - boost::posix_time::milliseconds(0)), - executorProvider_(executorProvider) {} - - std::weak_ptr weak_from_this() noexcept { return shared_from_this(); } - - // NOTE: Set the visibility to fix compilation error in GCC 6 - template -#ifndef _WIN32 - __attribute__((visibility("hidden"))) -#endif - void - executeAsyncImpl(const std::string& key, std::function()> f, Promise promise, - TimeDuration remainingTime) { - auto weakSelf = weak_from_this(); - f().addListener([this, weakSelf, key, f, promise, remainingTime](Result result, const T& value) { - auto self = weakSelf.lock(); - if (!self) { - return; - } - - if (result == ResultOk) { - backoffTimers_.remove(key); - promise.setValue(value); - } else if (result == ResultRetryable) { - if (remainingTime.total_milliseconds() <= 0) { - backoffTimers_.remove(key); - promise.setFailed(ResultTimeout); - return; - } - - auto it = backoffTimers_.emplace( - key, TimerPtr{new Timer(executorProvider_->get()->getIOService())}); - auto& timer = *(it.first->second); - auto delay = std::min(backoff_.next(), remainingTime); - timer.expires_from_now(delay); - - auto nextRemainingTime = remainingTime - delay; - LOG_INFO("Reschedule " << key << " for " << delay.total_milliseconds() - << " ms, remaining time: " << nextRemainingTime.total_milliseconds() - << " ms"); - timer.async_wait([this, weakSelf, key, f, promise, - nextRemainingTime](const boost::system::error_code& ec) { - auto self = weakSelf.lock(); - if (!self || ec) { - if (self && ec != boost::asio::error::operation_aborted) { - LOG_ERROR("The timer for " << key << " failed: " << ec.message()); - } - // The lookup service has been destructed or the timer has been cancelled - promise.setFailed(ResultTimeout); - return; - } - executeAsyncImpl(key, f, promise, nextRemainingTime); - }); - } else { - backoffTimers_.remove(key); - promise.setFailed(result); - } - }); - } - - DECLARE_LOG_OBJECT() -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/RoundRobinMessageRouter.cc b/pulsar-client-cpp/lib/RoundRobinMessageRouter.cc deleted file mode 100644 index 51d10e2c8c1bd..0000000000000 --- a/pulsar-client-cpp/lib/RoundRobinMessageRouter.cc +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "RoundRobinMessageRouter.h" - -#include "TimeUtils.h" - -#include -#include - -namespace pulsar { -RoundRobinMessageRouter::RoundRobinMessageRouter(ProducerConfiguration::HashingScheme hashingScheme, - bool batchingEnabled, uint32_t maxBatchingMessages, - uint32_t maxBatchingSize, - boost::posix_time::time_duration maxBatchingDelay) - : MessageRouterBase(hashingScheme), - batchingEnabled_(batchingEnabled), - maxBatchingMessages_(maxBatchingMessages), - maxBatchingSize_(maxBatchingSize), - maxBatchingDelay_(maxBatchingDelay), - lastPartitionChange_(TimeUtils::currentTimeMillis()), - msgCounter_(0), - cumulativeBatchSize_(0) { - boost::random::mt19937 rng(time(nullptr)); - boost::random::uniform_int_distribution dist; - currentPartitionCursor_ = dist(rng); -} - -RoundRobinMessageRouter::~RoundRobinMessageRouter() {} - -// override -int RoundRobinMessageRouter::getPartition(const Message& msg, const TopicMetadata& topicMetadata) { - if (topicMetadata.getNumPartitions() == 1) { - // When there are no partitions, don't even bother - return 0; - } - - // if message has a key, hash the key and return the partition - if (msg.hasPartitionKey()) { - return hash->makeHash(msg.getPartitionKey()) % topicMetadata.getNumPartitions(); - } - - if (!batchingEnabled_) { - // If there's no batching, do the round-robin at the message scope - // as there is no gain otherwise. - return currentPartitionCursor_++ % topicMetadata.getNumPartitions(); - } - - // If there's no key, we do round-robin across partition, sticking with a given - // partition for a certain amount of messages or volume buffered or the max delay to batch is reached so - // that we ensure having a decent amount of batching of the messages. Note that it is possible that we - // skip more than one partition if multiple goroutines increment currentPartitionCursor at the same time. - // If that happens it shouldn't be a problem because we only want to spread the data on different - // partitions but not necessarily in a specific sequence. - uint32_t messageSize = msg.getLength(); - uint32_t messageCount = msgCounter_; - uint32_t batchSize = cumulativeBatchSize_; - int64_t lastPartitionChange = lastPartitionChange_; - int64_t now = TimeUtils::currentTimeMillis(); - - if (messageCount >= maxBatchingMessages_ || (messageSize >= maxBatchingSize_ - batchSize) || - (now - lastPartitionChange >= maxBatchingDelay_.total_milliseconds())) { - uint32_t currentPartitionCursor = ++currentPartitionCursor_; - lastPartitionChange_ = now; - cumulativeBatchSize_ = messageSize; - msgCounter_ = 1; - return currentPartitionCursor % topicMetadata.getNumPartitions(); - } - - ++msgCounter_; - cumulativeBatchSize_ += messageSize; - return currentPartitionCursor_ % topicMetadata.getNumPartitions(); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/RoundRobinMessageRouter.h b/pulsar-client-cpp/lib/RoundRobinMessageRouter.h deleted file mode 100644 index be172a0ef3b6c..0000000000000 --- a/pulsar-client-cpp/lib/RoundRobinMessageRouter.h +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include "Hash.h" -#include "MessageRouterBase.h" - -#include -#include - -namespace pulsar { -class PULSAR_PUBLIC RoundRobinMessageRouter : public MessageRouterBase { - public: - RoundRobinMessageRouter(ProducerConfiguration::HashingScheme hashingScheme, bool batchingEnabled, - uint32_t maxBatchingMessages, uint32_t maxBatchingSize, - boost::posix_time::time_duration maxBatchingDelay); - virtual ~RoundRobinMessageRouter(); - virtual int getPartition(const Message& msg, const TopicMetadata& topicMetadata); - - private: - const bool batchingEnabled_; - const uint32_t maxBatchingMessages_; - const uint32_t maxBatchingSize_; - const boost::posix_time::time_duration maxBatchingDelay_; - - std::atomic currentPartitionCursor_; - std::atomic lastPartitionChange_; - std::atomic msgCounter_; - std::atomic cumulativeBatchSize_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Schema.cc b/pulsar-client-cpp/lib/Schema.cc deleted file mode 100644 index af452f4c6ec5e..0000000000000 --- a/pulsar-client-cpp/lib/Schema.cc +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -#include -#include -#include - -PULSAR_PUBLIC std::ostream &operator<<(std::ostream &s, pulsar::SchemaType schemaType) { - return s << strSchemaType(schemaType); -} - -namespace pulsar { - -PULSAR_PUBLIC const char *strSchemaType(SchemaType schemaType) { - switch (schemaType) { - case NONE: - return "NONE"; - case STRING: - return "STRING"; - case INT8: - return "INT8"; - case INT16: - return "INT16"; - case INT32: - return "INT32"; - case INT64: - return "INT64"; - case FLOAT: - return "FLOAT"; - case DOUBLE: - return "DOUBLE"; - case BYTES: - return "BYTES"; - case JSON: - return "JSON"; - case PROTOBUF: - return "PROTOBUF"; - case AVRO: - return "AVRO"; - case AUTO_CONSUME: - return "AUTO_CONSUME"; - case AUTO_PUBLISH: - return "AUTO_PUBLISH"; - case KEY_VALUE: - return "KEY_VALUE"; - case PROTOBUF_NATIVE: - return "PROTOBUF_NATIVE"; - }; - // NOTE : Do not add default case in the switch above. In future if we get new cases for - // Schema and miss them in the switch above we would like to get notified. Adding - // return here to make the compiler happy. - return "UnknownSchemaType"; -} - -class PULSAR_PUBLIC SchemaInfoImpl { - public: - const std::string name_; - const std::string schema_; - const SchemaType type_ = BYTES; - const std::map properties_; - - SchemaInfoImpl() : name_("BYTES") {} - - SchemaInfoImpl(SchemaType schemaType, const std::string &name, const std::string &schema, - const StringMap &properties) - : name_(name), schema_(schema), type_(schemaType), properties_(properties) {} -}; - -SchemaInfo::SchemaInfo() : impl_(std::make_shared()) {} - -SchemaInfo::SchemaInfo(SchemaType schemaType, const std::string &name, const std::string &schema, - const StringMap &properties) - : impl_(std::make_shared(schemaType, name, schema, properties)) {} - -SchemaType SchemaInfo::getSchemaType() const { return impl_->type_; } - -const std::string &SchemaInfo::getName() const { return impl_->name_; } - -const std::string &SchemaInfo::getSchema() const { return impl_->schema_; } - -const std::map &SchemaInfo::getProperties() const { return impl_->properties_; } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Semaphore.cc b/pulsar-client-cpp/lib/Semaphore.cc deleted file mode 100644 index 40d4e46cd1ad6..0000000000000 --- a/pulsar-client-cpp/lib/Semaphore.cc +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "Semaphore.h" - -namespace pulsar { - -Semaphore::Semaphore(uint32_t limit) : limit_(limit), currentUsage_(0), mutex_(), condition_() {} - -bool Semaphore::tryAcquire(int n) { - std::lock_guard lock(mutex_); - - if (currentUsage_ + n > limit_) { - return false; - } else { - currentUsage_ += n; - return true; - } -} - -bool Semaphore::acquire(int n) { - std::unique_lock lock(mutex_); - - while (currentUsage_ + n > limit_) { - if (isClosed_) { - return false; - } - condition_.wait(lock); - } - - currentUsage_ += n; - return true; -} - -void Semaphore::release(int n) { - std::unique_lock lock(mutex_); - currentUsage_ -= n; - lock.unlock(); - - if (n == 1) { - condition_.notify_one(); - } else { - condition_.notify_all(); - } -} - -uint32_t Semaphore::currentUsage() const { - std::lock_guard lock(mutex_); - return currentUsage_; -} - -void Semaphore::close() { - std::unique_lock lock(mutex_); - isClosed_ = true; - condition_.notify_all(); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Semaphore.h b/pulsar-client-cpp/lib/Semaphore.h deleted file mode 100644 index dcef2ad5b741c..0000000000000 --- a/pulsar-client-cpp/lib/Semaphore.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include -#include -#include - -namespace pulsar { - -class Semaphore { - public: - explicit Semaphore(uint32_t limit); - - bool tryAcquire(int n = 1); - bool acquire(int n = 1); - void release(int n = 1); - uint32_t currentUsage() const; - - void close(); - - private: - const uint32_t limit_; - uint32_t currentUsage_; - mutable std::mutex mutex_; - std::condition_variable condition_; - bool isClosed_ = false; -}; - -} // namespace pulsar \ No newline at end of file diff --git a/pulsar-client-cpp/lib/ServiceNameResolver.h b/pulsar-client-cpp/lib/ServiceNameResolver.h deleted file mode 100644 index cf7a5832697cb..0000000000000 --- a/pulsar-client-cpp/lib/ServiceNameResolver.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include "ServiceURI.h" - -namespace pulsar { - -class ServiceNameResolver { - public: - ServiceNameResolver(const std::string& uriString) - : serviceUri_(uriString), numAddresses_(serviceUri_.getServiceHosts().size()) { - assert(numAddresses_ > 0); // the validation has been done in ServiceURI - } - - ServiceNameResolver(const ServiceNameResolver&) = delete; - ServiceNameResolver& operator=(const ServiceNameResolver&) = delete; - - bool useTls() const noexcept { - return serviceUri_.getScheme() == PulsarScheme::PULSAR_SSL || - serviceUri_.getScheme() == PulsarScheme::HTTPS; - } - - bool useHttp() const noexcept { - return serviceUri_.getScheme() == PulsarScheme::HTTP || - serviceUri_.getScheme() == PulsarScheme::HTTPS; - } - - const std::string& resolveHost() { - return serviceUri_.getServiceHosts()[(numAddresses_ == 1) ? 0 : (index_++ % numAddresses_)]; - } - - private: - const ServiceURI serviceUri_; - const size_t numAddresses_; - std::atomic_size_t index_{0}; - - friend class PulsarFriend; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ServiceURI.cc b/pulsar-client-cpp/lib/ServiceURI.cc deleted file mode 100644 index ec515b2444a39..0000000000000 --- a/pulsar-client-cpp/lib/ServiceURI.cc +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "ServiceURI.h" -#include - -namespace pulsar { - -static void processAddress(std::string& address, PulsarScheme scheme) { - const auto posOfSlash = address.find('/'); - if (posOfSlash != std::string::npos) { - // ignore the path part - address.erase(posOfSlash); - } - auto fail = [&address] { throw std::invalid_argument("invalid hostname: " + address); }; - - const auto posOfColon = address.find(':'); - if (posOfColon != std::string::npos) { - if (address.find(':', posOfColon + 1) != std::string::npos) { - fail(); - } - try { - const auto port = std::stoi(address.substr(posOfColon + 1)); - if (port < 0 || port > 65535) { - throw std::invalid_argument(""); - } - } catch (const std::invalid_argument& ignored) { - fail(); - } - } else { - address = address + ":" + std::to_string(scheme::getDefaultPort(scheme)); - } - if (!address.empty()) { - address = scheme::getSchemeString(scheme) + address; - } -} - -auto ServiceURI::parse(const std::string& uriString) -> DataType { - size_t pos = uriString.find("://"); - if (pos == std::string::npos) { - throw std::invalid_argument("The scheme part is missing: " + uriString); - } - if (pos == 0) { - throw std::invalid_argument("Expected scheme name at index 0: " + uriString); - } - const auto scheme = scheme::toScheme(uriString.substr(0, pos)); - - pos += 3; // now it points to the end of "://" - if (pos < uriString.size() && uriString[pos] == '/') { - throw std::invalid_argument("authority component is missing in service uri: " + uriString); - } - - std::vector addresses; - while (pos < uriString.size()) { - const size_t endPos = uriString.find(',', pos); - if (endPos == std::string::npos) { - addresses.emplace_back(uriString.substr(pos, endPos - pos)); - break; - } - addresses.emplace_back(uriString.substr(pos, endPos - pos)); - pos = endPos + 1; - } - - bool hasEmptyAddress = false; - for (auto& address : addresses) { - processAddress(address, scheme); - if (address.empty()) { - hasEmptyAddress = true; - } - } - if (hasEmptyAddress) { - auto originalAddresses = addresses; - addresses.clear(); - for (const auto& address : originalAddresses) { - if (!address.empty()) { - addresses.emplace_back(address); - } - } - } - if (addresses.empty()) { - throw std::invalid_argument("No service url is provided yet"); - } - return std::make_pair(scheme, addresses); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ServiceURI.h b/pulsar-client-cpp/lib/ServiceURI.h deleted file mode 100644 index 4f459d987f600..0000000000000 --- a/pulsar-client-cpp/lib/ServiceURI.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include -#include "PulsarScheme.h" - -namespace pulsar { - -class ServiceURI { - public: - /** - * @param uriString the URL string that is used to create a pulsar::Client object - * @throws std::invalid_argument if `uriString` is invalid - */ - ServiceURI(const std::string& uriString) : data_(parse(uriString)) {} - - PulsarScheme getScheme() const noexcept { return data_.first; } - - const std::vector& getServiceHosts() const noexcept { return data_.second; } - - private: - // The 2 elements of the pair are: - // 1. The Scheme of the lookup protocol - // 2. The available addresses, each item is like "pulsar://localhost:6650" - using DataType = std::pair>; - const DataType data_; - - static DataType parse(const std::string& uriString); -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/ServiceUnitId.h b/pulsar-client-cpp/lib/ServiceUnitId.h deleted file mode 100644 index d3ea00d90b008..0000000000000 --- a/pulsar-client-cpp/lib/ServiceUnitId.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _PULSAR_SERVICE_UNIT_ID_HEADER_ -#define _PULSAR_SERVICE_UNIT_ID_HEADER_ - -class ServiceUnitId { - public: - virtual ~ServiceUnitId() {} -}; - -#endif diff --git a/pulsar-client-cpp/lib/SharedBuffer.h b/pulsar-client-cpp/lib/SharedBuffer.h deleted file mode 100644 index be889a7ee97ad..0000000000000 --- a/pulsar-client-cpp/lib/SharedBuffer.h +++ /dev/null @@ -1,256 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_SHARED_BUFFER_H_ -#define LIB_SHARED_BUFFER_H_ - -#include - -#include -#include -#include -#include - -namespace pulsar { - -class SharedBuffer { - public: - explicit SharedBuffer() : data_(), ptr_(nullptr), readIdx_(0), writeIdx_(0), capacity_(0) {} - - // SHALLOW copy constructor. - SharedBuffer(const SharedBuffer&) = default; - SharedBuffer& operator=(const SharedBuffer&) = default; - - // Move constructor. - SharedBuffer(SharedBuffer&& right) { *this = std::move(right); } - SharedBuffer& operator=(SharedBuffer&& right) { - this->data_ = std::move(right.data_); - - this->ptr_ = right.ptr_; - right.ptr_ = nullptr; - - this->readIdx_ = right.readIdx_; - right.readIdx_ = 0; - - this->writeIdx_ = right.writeIdx_; - right.writeIdx_ = 0; - - this->capacity_ = right.capacity_; - right.capacity_ = 0; - - return *this; - } - - /** - * Allocate a buffer of given size - */ - static SharedBuffer allocate(const uint32_t size) { return SharedBuffer(size); } - - /** - * Create a buffer with a copy of memory pointed by ptr - */ - static SharedBuffer copy(const char* ptr, uint32_t size) { - SharedBuffer buf = allocate(size); - buf.write(ptr, size); - return buf; - } - - /** - * Create a buffer by taking ownership of given data. - */ - static SharedBuffer take(std::string&& data) { return SharedBuffer(std::move(data)); } - - static SharedBuffer copyFrom(const SharedBuffer& other, uint32_t capacity) { - assert(other.readableBytes() <= capacity); - SharedBuffer buf = allocate(capacity); - buf.write(other.data(), other.readableBytes()); - return buf; - } - - /** - * Create a buffer that wraps the passed pointer, without copying the memory - */ - static SharedBuffer wrap(char* ptr, size_t size) { return SharedBuffer(ptr, size); } - - inline const char* data() const { return ptr_ + readIdx_; } - - inline char* mutableData() { return ptr_ + writeIdx_; } - - /** - * Return a shared buffer that include a portion of current buffer. No memory is copied - */ - SharedBuffer slice(uint32_t offset) const { - SharedBuffer buf(*this); - buf.consume(offset); - return buf; - } - - SharedBuffer slice(uint32_t offset, uint32_t length) const { - SharedBuffer buf(*this); - buf.consume(offset); - assert(buf.readableBytes() >= length); - buf.writeIdx_ = buf.readIdx_ + length; - return buf; - } - - uint32_t readUnsignedInt() { - assert(readableBytes() >= sizeof(uint32_t)); - uint32_t value = ntohl(*(uint32_t*)data()); - consume(sizeof(uint32_t)); - return value; - } - - uint16_t readUnsignedShort() { - assert(readableBytes() >= sizeof(uint16_t)); - uint16_t value = ntohs(*(uint16_t*)data()); - consume(sizeof(uint16_t)); - return value; - } - - void writeUnsignedInt(uint32_t value) { - assert(writableBytes() >= sizeof(uint32_t)); - *(uint32_t*)(mutableData()) = htonl(value); - bytesWritten(sizeof(value)); - } - - void writeUnsignedShort(uint16_t value) { - assert(writableBytes() >= sizeof(uint16_t)); - *(uint16_t*)(mutableData()) = htons(value); - bytesWritten(sizeof(value)); - } - - inline uint32_t readableBytes() const { return writeIdx_ - readIdx_; } - - inline uint32_t writableBytes() const { return capacity_ - writeIdx_; } - - inline bool readable() const { return readableBytes() > 0; } - - inline bool writable() const { return writableBytes() > 0; } - - boost::asio::const_buffers_1 const_asio_buffer() const { - return boost::asio::const_buffers_1(ptr_ + readIdx_, readableBytes()); - } - - boost::asio::mutable_buffers_1 asio_buffer() { - assert(data_); - return boost::asio::buffer(ptr_ + writeIdx_, writableBytes()); - } - - void write(const char* data, uint32_t size) { - assert(size <= writableBytes()); - - std::copy(data, data + size, mutableData()); - bytesWritten(size); - } - - // Mark that some bytes were written into the buffer - inline void bytesWritten(uint32_t size) { - assert(size <= writableBytes()); - writeIdx_ += size; - } - - // Return current writer index - uint32_t writerIndex() const noexcept { return writeIdx_; } - - // skip writerIndex - void skipBytes(uint32_t size) { - assert(writeIdx_ + size <= capacity_); - writeIdx_ += size; - } - - // set writerIndex - void setWriterIndex(uint32_t index) { - assert(index <= capacity_); - writeIdx_ = index; - } - - // Return current reader index - uint32_t readerIndex() const noexcept { return readIdx_; } - - // set readerIndex - void setReaderIndex(uint32_t index) { - assert(index <= capacity_); - readIdx_ = index; - } - - inline void consume(uint32_t size) { - assert(size <= readableBytes()); - readIdx_ += size; - } - - inline void rollback(uint32_t size) { - assert(size <= readIdx_); - readIdx_ -= size; - } - - inline void reset() { - readIdx_ = 0; - writeIdx_ = 0; - } - - private: - std::shared_ptr data_; - char* ptr_; - uint32_t readIdx_; - uint32_t writeIdx_; - uint32_t capacity_; - - SharedBuffer(char* ptr, size_t size) - : data_(), ptr_(ptr), readIdx_(0), writeIdx_(size), capacity_(size) {} - - explicit SharedBuffer(size_t size) - : data_(std::make_shared(size, '\0')), - ptr_(size ? &(*data_)[0] : nullptr), - readIdx_(0), - writeIdx_(0), - capacity_(size) {} - - explicit SharedBuffer(std::string&& data) - : data_(std::make_shared(std::move(data))), - ptr_(data_->empty() ? nullptr : &(*data_)[0]), - readIdx_(0), - writeIdx_(data_->length()), - capacity_(data_->length()) {} -}; // class SharedBuffer - -template -class CompositeSharedBuffer { - public: - void set(int idx, const SharedBuffer& buffer) { - sharedBuffers_[idx] = buffer; - asioBuffers_[idx] = buffer.const_asio_buffer(); - } - - // Implement the ConstBufferSequence requirements. - typedef boost::asio::const_buffer value_type; - typedef boost::asio::const_buffer* iterator; - typedef const boost::asio::const_buffer* const_iterator; - - const boost::asio::const_buffer* begin() const { return &(asioBuffers_.at(0)); } - - const boost::asio::const_buffer* end() const { return begin() + Size; } - - private: - std::array sharedBuffers_; - std::array asioBuffers_; -}; - -typedef CompositeSharedBuffer<2> PairSharedBuffer; -} // namespace pulsar - -#endif /* LIB_SHARED_BUFFER_H_ */ diff --git a/pulsar-client-cpp/lib/SimpleLogger.h b/pulsar-client-cpp/lib/SimpleLogger.h deleted file mode 100644 index b750336a37f68..0000000000000 --- a/pulsar-client-cpp/lib/SimpleLogger.h +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include - -namespace pulsar { - -inline std::ostream &operator<<(std::ostream &s, Logger::Level level) { - switch (level) { - case Logger::LEVEL_DEBUG: - s << "DEBUG"; - break; - case Logger::LEVEL_INFO: - s << "INFO "; - break; - case Logger::LEVEL_WARN: - s << "WARN "; - break; - case Logger::LEVEL_ERROR: - s << "ERROR"; - break; - } - - return s; -} - -class SimpleLogger : public Logger { - public: - SimpleLogger(std::ostream &os, const std::string &filename, Level level) - : os_(os), filename_(filename), level_(level) {} - - bool isEnabled(Level level) { return level >= level_; } - - void log(Level level, int line, const std::string &message) { - std::stringstream ss; - - printTimestamp(ss); - ss << " " << level << " [" << std::this_thread::get_id() << "] " << filename_ << ":" << line << " | " - << message << "\n"; - - os_ << ss.str(); - os_.flush(); - } - - private: - std::ostream &os_; - const std::string filename_; - const Level level_; - - static std::ostream &printTimestamp(std::ostream &s) { - boost::posix_time::ptime now = boost::posix_time::microsec_clock::local_time(); - - const boost::format f = - boost::format("%04d-%02d-%02d %02d:%02d:%02d.%03d") % now.date().year_month_day().year % - now.date().year_month_day().month.as_number() % now.date().year_month_day().day.as_number() % - now.time_of_day().hours() % now.time_of_day().minutes() % now.time_of_day().seconds() % - (now.time_of_day().fractional_seconds() / 1000); - - s << f.str(); - return s; - } -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/SinglePartitionMessageRouter.cc b/pulsar-client-cpp/lib/SinglePartitionMessageRouter.cc deleted file mode 100644 index 5ebe4c876d97c..0000000000000 --- a/pulsar-client-cpp/lib/SinglePartitionMessageRouter.cc +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "SinglePartitionMessageRouter.h" - -#include -#include - -namespace pulsar { -SinglePartitionMessageRouter::~SinglePartitionMessageRouter() {} - -SinglePartitionMessageRouter::SinglePartitionMessageRouter(const int numberOfPartitions, - ProducerConfiguration::HashingScheme hashingScheme) - : MessageRouterBase(hashingScheme) { - std::default_random_engine generator( - std::chrono::high_resolution_clock::now().time_since_epoch().count()); - selectedSinglePartition_ = generator() % numberOfPartitions; -} - -SinglePartitionMessageRouter::SinglePartitionMessageRouter(const int partitionIndex, - const int numberOfPartitions, - ProducerConfiguration::HashingScheme hashingScheme) - : MessageRouterBase(hashingScheme) { - selectedSinglePartition_ = partitionIndex; -} - -// override -int SinglePartitionMessageRouter::getPartition(const Message& msg, const TopicMetadata& topicMetadata) { - // if message has a key, hash the key and return the partition - if (msg.hasPartitionKey()) { - return hash->makeHash(msg.getPartitionKey()) % topicMetadata.getNumPartitions(); - } else { - // else pick the next partition - return selectedSinglePartition_; - } -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/SinglePartitionMessageRouter.h b/pulsar-client-cpp/lib/SinglePartitionMessageRouter.h deleted file mode 100644 index 4407bd426ffb4..0000000000000 --- a/pulsar-client-cpp/lib/SinglePartitionMessageRouter.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_SINGLE_PARTITION_MESSAGE_ROUTER_HEADER_ -#define PULSAR_SINGLE_PARTITION_MESSAGE_ROUTER_HEADER_ - -#include -#include -#include -#include "Hash.h" -#include -#include "MessageRouterBase.h" - -namespace pulsar { - -class PULSAR_PUBLIC SinglePartitionMessageRouter : public MessageRouterBase { - public: - SinglePartitionMessageRouter(const int partitionIndex, const int numberOfPartitions, - ProducerConfiguration::HashingScheme hashingScheme); - SinglePartitionMessageRouter(const int numberOfPartitions, - ProducerConfiguration::HashingScheme hashingScheme); - virtual ~SinglePartitionMessageRouter(); - virtual int getPartition(const Message& msg, const TopicMetadata& topicMetadata); - - private: - int selectedSinglePartition_; -}; - -} // namespace pulsar -#endif // PULSAR_SINGLE_PARTITION_MESSAGE_ROUTER_HEADER_ diff --git a/pulsar-client-cpp/lib/Synchronized.h b/pulsar-client-cpp/lib/Synchronized.h deleted file mode 100644 index a98c08daeee3a..0000000000000 --- a/pulsar-client-cpp/lib/Synchronized.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include - -template -class Synchronized { - public: - explicit Synchronized(const T& value) : value_(value) {} - - T get() const { - std::lock_guard lock(mutex_); - return value_; - } - - Synchronized& operator=(const T& value) { - std::lock_guard lock(mutex_); - value_ = value; - return *this; - } - - private: - T value_; - mutable std::mutex mutex_; -}; diff --git a/pulsar-client-cpp/lib/SynchronizedHashMap.h b/pulsar-client-cpp/lib/SynchronizedHashMap.h deleted file mode 100644 index 831d1e83bbd6e..0000000000000 --- a/pulsar-client-cpp/lib/SynchronizedHashMap.h +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include -#include -#include -#include "Utils.h" - -namespace pulsar { - -// V must be default constructible and copyable -template -class SynchronizedHashMap { - using MutexType = std::recursive_mutex; - using Lock = std::lock_guard; - - public: - using OptValue = Optional; - using PairVector = std::vector>; - using MapType = std::unordered_map; - using Iterator = typename MapType::iterator; - - SynchronizedHashMap() = default; - - SynchronizedHashMap(const PairVector& pairs) { - for (auto&& kv : pairs) { - data_.emplace(kv.first, kv.second); - } - } - - template - std::pair emplace(Args&&... args) { - Lock lock(mutex_); - return data_.emplace(std::forward(args)...); - } - - void forEach(std::function f) const { - Lock lock(mutex_); - for (const auto& kv : data_) { - f(kv.first, kv.second); - } - } - - void forEachValue(std::function f) const { - Lock lock(mutex_); - for (const auto& kv : data_) { - f(kv.second); - } - } - - void clear() { - Lock lock(mutex_); - data_.clear(); - } - - // clear the map and apply `f` on each removed value - void clear(std::function f) { - Lock lock(mutex_); - auto it = data_.begin(); - while (it != data_.end()) { - f(it->first, it->second); - auto next = data_.erase(it); - it = next; - } - } - - OptValue find(const K& key) const { - Lock lock(mutex_); - auto it = data_.find(key); - if (it != data_.end()) { - return OptValue::of(it->second); - } else { - return OptValue::empty(); - } - } - - OptValue findFirstValueIf(std::function f) const { - Lock lock(mutex_); - for (const auto& kv : data_) { - if (f(kv.second)) { - return OptValue::of(kv.second); - } - } - return OptValue::empty(); - } - - OptValue remove(const K& key) { - Lock lock(mutex_); - auto it = data_.find(key); - if (it != data_.end()) { - auto result = OptValue::of(std::move(it->second)); - data_.erase(it); - return result; - } else { - return OptValue::empty(); - } - } - - // This method is only used for test - PairVector toPairVector() const { - Lock lock(mutex_); - PairVector pairs; - for (auto&& kv : data_) { - pairs.emplace_back(kv); - } - return pairs; - } - - // This method is only used for test - size_t size() const noexcept { - Lock lock(mutex_); - return data_.size(); - } - - private: - std::unordered_map data_; - // Use recursive_mutex to allow methods being called in `forEach` - mutable MutexType mutex_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/TestUtil.h b/pulsar-client-cpp/lib/TestUtil.h deleted file mode 100644 index 1de5ae3a19d1e..0000000000000 --- a/pulsar-client-cpp/lib/TestUtil.h +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef PULSAR_CPP_TESTUTIL_H -#define PULSAR_CPP_TESTUTIL_H - -#define FRIEND_TEST(test_case_name, test_name) friend class test_case_name##_##test_name##_Test - -#endif // PULSAR_CPP_TESTUTIL_H diff --git a/pulsar-client-cpp/lib/TimeUtils.cc b/pulsar-client-cpp/lib/TimeUtils.cc deleted file mode 100644 index 7eecb86bf7108..0000000000000 --- a/pulsar-client-cpp/lib/TimeUtils.cc +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "TimeUtils.h" - -namespace pulsar { - -ptime TimeUtils::now() { return microsec_clock::universal_time(); } - -int64_t TimeUtils::currentTimeMillis() { - static ptime time_t_epoch(boost::gregorian::date(1970, 1, 1)); - - time_duration diff = now() - time_t_epoch; - return diff.total_milliseconds(); -} -} // namespace pulsar \ No newline at end of file diff --git a/pulsar-client-cpp/lib/TimeUtils.h b/pulsar-client-cpp/lib/TimeUtils.h deleted file mode 100644 index 45157ae855b98..0000000000000 --- a/pulsar-client-cpp/lib/TimeUtils.h +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include - -#include - -namespace pulsar { - -using namespace boost::posix_time; -using boost::posix_time::milliseconds; -using boost::posix_time::seconds; - -class PULSAR_PUBLIC TimeUtils { - public: - static ptime now(); - static int64_t currentTimeMillis(); -}; - -// This class processes a timeout with the following semantics: -// > 0: wait at most the timeout until a blocking operation completes -// == 0: do not wait the blocking operation -// < 0: wait infinitely until a blocking operation completes. -// -// Here is a simple example usage: -// -// ```c++ -// // Wait at most 300 milliseconds -// TimeoutProcessor timeoutProcessor{300}; -// while (!allOperationsAreDone()) { -// timeoutProcessor.tik(); -// // This method may block for some time -// performBlockingOperation(timeoutProcessor.getLeftTimeout()); -// timeoutProcessor.tok(); -// } -// ``` -// -// The template argument is the same as std::chrono::duration. -template -class TimeoutProcessor { - public: - using Clock = std::chrono::high_resolution_clock; - - TimeoutProcessor(long timeout) : leftTimeout_(timeout) {} - - long getLeftTimeout() const noexcept { return leftTimeout_; } - - void tik() { before_ = Clock::now(); } - - void tok() { - if (leftTimeout_ > 0) { - leftTimeout_ -= std::chrono::duration_cast(Clock::now() - before_).count(); - if (leftTimeout_ <= 0) { - // The timeout exceeds, getLeftTimeout() will return 0 to indicate we should not wait more - leftTimeout_ = 0; - } - } - } - - private: - std::atomic_long leftTimeout_; - std::chrono::time_point before_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/TopicMetadataImpl.cc b/pulsar-client-cpp/lib/TopicMetadataImpl.cc deleted file mode 100644 index e29cd9702f6f9..0000000000000 --- a/pulsar-client-cpp/lib/TopicMetadataImpl.cc +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "TopicMetadataImpl.h" - -namespace pulsar { -TopicMetadataImpl::TopicMetadataImpl(const int numPartitions) : numPartitions_(numPartitions) {} - -int TopicMetadataImpl::getNumPartitions() const { return numPartitions_; } -} // namespace pulsar \ No newline at end of file diff --git a/pulsar-client-cpp/lib/TopicMetadataImpl.h b/pulsar-client-cpp/lib/TopicMetadataImpl.h deleted file mode 100644 index 76393c43fdad1..0000000000000 --- a/pulsar-client-cpp/lib/TopicMetadataImpl.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef TOPIC_METADATA_IMPL_HPP_ -#define TOPIC_METADATA_IMPL_HPP_ - -#include -#include - -namespace pulsar { -class PULSAR_PUBLIC TopicMetadataImpl : public TopicMetadata { - public: - TopicMetadataImpl(const int numPartitions); - virtual int getNumPartitions() const; - - private: - int numPartitions_; -}; -} // namespace pulsar - -#endif /* TOPIC_METADATA_IMPL_HPP_ */ diff --git a/pulsar-client-cpp/lib/TopicName.cc b/pulsar-client-cpp/lib/TopicName.cc deleted file mode 100644 index 70b7b7e507242..0000000000000 --- a/pulsar-client-cpp/lib/TopicName.cc +++ /dev/null @@ -1,259 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "NamedEntity.h" -#include "LogUtils.h" -#include "PartitionedProducerImpl.h" -#include "TopicName.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -DECLARE_LOG_OBJECT() -namespace pulsar { - -const std::string TopicDomain::Persistent = "persistent"; -const std::string TopicDomain::NonPersistent = "non-persistent"; - -typedef std::unique_lock Lock; -// static members -CURL* TopicName::curl = NULL; -std::mutex TopicName::curlHandleMutex; - -CURL* TopicName::getCurlHandle() { - if (curl == NULL) { - // this handle can not be shared across threads, so had to get here everytime - curl = curl_easy_init(); - } - return curl; -} -//******************************************************************** -TopicName::TopicName() {} - -bool TopicName::init(const std::string& topicName) { - topicName_ = topicName; - if (topicName.find("://") == std::string::npos) { - std::string topicNameCopy_ = topicName; - std::vector pathTokens; - boost::algorithm::split(pathTokens, topicNameCopy_, boost::algorithm::is_any_of("/")); - if (pathTokens.size() == 3) { - topicName_ = - TopicDomain::Persistent + "://" + pathTokens[0] + "/" + pathTokens[1] + "/" + pathTokens[2]; - } else if (pathTokens.size() == 1) { - topicName_ = TopicDomain::Persistent + "://public/default/" + pathTokens[0]; - } else { - LOG_ERROR( - "Topic name is not valid, short topic name should be in the format of '' or " - "'//' - " - << topicName); - return false; - } - } - isV2Topic_ = parse(topicName_, domain_, property_, cluster_, namespacePortion_, localName_); - if (isV2Topic_ && !cluster_.empty()) { - LOG_ERROR("V2 Topic name is not valid, cluster is not empty - " << topicName_ << " : cluster " - << cluster_); - return false; - } else if (!isV2Topic_ && cluster_.empty()) { - LOG_ERROR("V1 Topic name is not valid, cluster is empty - " << topicName_); - return false; - } - if (localName_.empty()) { - LOG_ERROR("Topic name is not valid, topic name is empty - " << topicName_); - return false; - } - if (isV2Topic_ && cluster_.empty()) { - namespaceName_ = NamespaceName::get(property_, namespacePortion_); - } else { - namespaceName_ = NamespaceName::get(property_, cluster_, namespacePortion_); - } - partition_ = TopicName::getPartitionIndex(localName_); - return true; -} -bool TopicName::parse(const std::string& topicName, std::string& domain, std::string& property, - std::string& cluster, std::string& namespacePortion, std::string& localName) { - std::string topicNameCopy = topicName; - boost::replace_first(topicNameCopy, "://", "/"); - std::vector pathTokens; - boost::algorithm::split(pathTokens, topicNameCopy, boost::algorithm::is_any_of("/")); - if (pathTokens.size() < 4) { - LOG_ERROR("Topic name is not valid, does not have enough parts - " << topicName); - return false; - } - domain = pathTokens[0]; - size_t numSlashIndexes; - bool isV2Topic; - if (pathTokens.size() == 4) { - // New topic name without cluster name - property = pathTokens[1]; - cluster = ""; - namespacePortion = pathTokens[2]; - localName = pathTokens[3]; - numSlashIndexes = 3; - isV2Topic = true; - } else { - // Legacy topic name that includes cluster name - property = pathTokens[1]; - cluster = pathTokens[2]; - namespacePortion = pathTokens[3]; - localName = pathTokens[4]; - numSlashIndexes = 4; - isV2Topic = false; - } - size_t slashIndex = -1; - // find `numSlashIndexes` '/', whatever is left is topic local name - for (int i = 0; i < numSlashIndexes; i++) { - slashIndex = topicNameCopy.find('/', slashIndex + 1); - } - // get index to next char to '/' - slashIndex++; - localName = topicNameCopy.substr(slashIndex, (topicNameCopy.size() - slashIndex)); - return isV2Topic; -} -std::string TopicName::getEncodedName(const std::string& nameBeforeEncoding) { - Lock lock(curlHandleMutex); - std::string nameAfterEncoding; - if (getCurlHandle()) { - char* encodedName = - curl_easy_escape(getCurlHandle(), nameBeforeEncoding.c_str(), nameBeforeEncoding.size()); - if (encodedName) { - nameAfterEncoding.assign(encodedName); - curl_free(encodedName); - } else { - LOG_ERROR("Unable to encode the name using curl_easy_escape, name - " << nameBeforeEncoding); - } - } else { - LOG_ERROR("Unable to get CURL handle to encode the name - " << nameBeforeEncoding); - } - return nameAfterEncoding; -} - -bool TopicName::isV2Topic() const { return isV2Topic_; } - -std::string TopicName::getDomain() const { return domain_; } - -std::string TopicName::getProperty() const { return property_; } - -std::string TopicName::getCluster() const { return cluster_; } - -std::string TopicName::getNamespacePortion() const { return namespacePortion_; } - -std::string TopicName::getLocalName() { return localName_; } - -std::string TopicName::getEncodedLocalName() const { return getEncodedName(localName_); } - -bool TopicName::operator==(const TopicName& other) { - return (this->topicName_.compare(other.topicName_) == 0); -} - -bool TopicName::validate() { - // Check if domain matches with TopicDomain::Persistent, in future check "memory" when server is - // ready. - if (domain_.compare(TopicDomain::Persistent) != 0 && domain_.compare(TopicDomain::NonPersistent) != 0) { - return false; - } - // cluster_ can be empty - if (!isV2Topic_ && !property_.empty() && !cluster_.empty() && !namespacePortion_.empty() && - !localName_.empty()) { - // v1 topic format - return NamedEntity::checkName(property_) && NamedEntity::checkName(cluster_) && - NamedEntity::checkName(namespacePortion_); - } else if (isV2Topic_ && !property_.empty() && !namespacePortion_.empty() && !localName_.empty()) { - // v2 topic format - return NamedEntity::checkName(property_) && NamedEntity::checkName(namespacePortion_); - } else { - return false; - } -} - -std::shared_ptr TopicName::get(const std::string& topicName) { - std::shared_ptr ptr(new TopicName()); - if (!ptr->init(topicName)) { - LOG_ERROR("Topic name initialization failed"); - return std::shared_ptr(); - } - if (ptr->validate()) { - return ptr; - } else { - LOG_ERROR("Topic name validation Failed - " << topicName); - return std::shared_ptr(); - } -} - -// TODO - for now return empty string if there's any error in format, later think about better error handling -std::string TopicName::getLookupName() { - std::stringstream ss; - std::string seperator("/"); - if (isV2Topic_ && cluster_.empty()) { - ss << domain_ << seperator << property_ << seperator << namespacePortion_ << seperator - << getEncodedLocalName(); - } else { - ss << domain_ << seperator << property_ << seperator << cluster_ << seperator << namespacePortion_ - << seperator << getEncodedLocalName(); - } - return ss.str(); -} - -std::string TopicName::toString() const { - std::stringstream ss; - std::string seperator("/"); - if (isV2Topic_ && cluster_.empty()) { - ss << domain_ << "://" << property_ << seperator << namespacePortion_ << seperator << localName_; - } else { - ss << domain_ << "://" << property_ << seperator << cluster_ << seperator << namespacePortion_ - << seperator << localName_; - } - return ss.str(); -} - -bool TopicName::isPersistent() const { return this->domain_ == TopicDomain::Persistent; } - -std::string TopicName::getTopicPartitionName(unsigned int partition) const { - std::stringstream topicPartitionName; - // make this topic name as well - topicPartitionName << toString() << PartitionedProducerImpl::PARTITION_NAME_SUFFIX << partition; - return topicPartitionName.str(); -} - -int TopicName::getPartitionIndex(const std::string& topic) { - const auto& suffix = PartitionedProducerImpl::PARTITION_NAME_SUFFIX; - const size_t pos = topic.rfind(suffix); - if (pos == std::string::npos) { - return -1; - } - - try { - // TODO: When handling topic name like "xxx-partition-00", it should return -1. - // But here it will returns, which is consistent with Java client's behavior - // Another corner case: "xxx-partition--2" => 2 (not -1) - return std::stoi(topic.substr(topic.rfind('-') + 1)); - } catch (const std::exception&) { - return -1; - } -} - -NamespaceNamePtr TopicName::getNamespaceName() { return namespaceName_; } - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/TopicName.h b/pulsar-client-cpp/lib/TopicName.h deleted file mode 100644 index d8620ea1fee76..0000000000000 --- a/pulsar-client-cpp/lib/TopicName.h +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _PULSAR_TOPIC_NAME_HEADER_ -#define _PULSAR_TOPIC_NAME_HEADER_ - -#include -#include "NamespaceName.h" -#include "ServiceUnitId.h" - -#include -#include -#include - -namespace pulsar { -class PULSAR_PUBLIC TopicDomain { - public: - static const std::string Persistent; - static const std::string NonPersistent; -}; // class TopicDomain - -class PULSAR_PUBLIC TopicName : public ServiceUnitId { - private: - std::string topicName_; - std::string domain_; - std::string property_; - std::string cluster_; - std::string namespacePortion_; - std::string localName_; - bool isV2Topic_; - std::shared_ptr namespaceName_; - int partition_ = -1; - - public: - bool isV2Topic() const; - std::string getLookupName(); - std::string getDomain() const; - std::string getProperty() const; - std::string getCluster() const; - std::string getNamespacePortion() const; - std::string getLocalName(); - std::string getEncodedLocalName() const; - std::string toString() const; - bool isPersistent() const; - NamespaceNamePtr getNamespaceName(); - int getPartitionIndex() const noexcept { return partition_; } - static std::shared_ptr get(const std::string& topicName); - bool operator==(const TopicName& other); - static std::string getEncodedName(const std::string& nameBeforeEncoding); - std::string getTopicPartitionName(unsigned int partition) const; - static int getPartitionIndex(const std::string& topic); - - private: - static CURL* getCurlHandle(); - static CURL* curl; - static std::mutex curlHandleMutex; - static bool parse(const std::string& topicName, std::string& domain, std::string& property, - std::string& cluster, std::string& namespacePortion, std::string& localName); - TopicName(); - bool validate(); - bool init(const std::string& topicName); -}; // class TopicName -typedef std::shared_ptr TopicNamePtr; -} // namespace pulsar -// end of namespace pulsar - -#endif //_PULSAR_TOPIC_NAME_HEADER_ diff --git a/pulsar-client-cpp/lib/UnAckedMessageTrackerDisabled.h b/pulsar-client-cpp/lib/UnAckedMessageTrackerDisabled.h deleted file mode 100644 index c25c1a5b9dd85..0000000000000 --- a/pulsar-client-cpp/lib/UnAckedMessageTrackerDisabled.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_UNACKEDMESSAGETRACKERDISABLED_H_ -#define LIB_UNACKEDMESSAGETRACKERDISABLED_H_ -#include "lib/UnAckedMessageTrackerInterface.h" -namespace pulsar { - -class UnAckedMessageTrackerDisabled : public UnAckedMessageTrackerInterface { - public: - bool add(const MessageId& m) { return false; } - bool remove(const MessageId& m) { return false; } - void removeMessagesTill(const MessageId& msgId) {} - void removeTopicMessage(const std::string& topic) {} - - void clear() {} -}; -} // namespace pulsar -#endif /* LIB_UNACKEDMESSAGETRACKERDISABLED_H_ */ diff --git a/pulsar-client-cpp/lib/UnAckedMessageTrackerEnabled.cc b/pulsar-client-cpp/lib/UnAckedMessageTrackerEnabled.cc deleted file mode 100644 index 9d0160f3bce9e..0000000000000 --- a/pulsar-client-cpp/lib/UnAckedMessageTrackerEnabled.cc +++ /dev/null @@ -1,167 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "UnAckedMessageTrackerEnabled.h" - -#include - -DECLARE_LOG_OBJECT(); - -namespace pulsar { - -void UnAckedMessageTrackerEnabled::timeoutHandler() { - timeoutHandlerHelper(); - ExecutorServicePtr executorService = client_->getIOExecutorProvider()->get(); - timer_ = executorService->createDeadlineTimer(); - timer_->expires_from_now(boost::posix_time::milliseconds(tickDurationInMs_)); - timer_->async_wait([&](const boost::system::error_code& ec) { - if (ec) { - LOG_DEBUG("Ignoring timer cancelled event, code[" << ec << "]"); - } else { - timeoutHandler(); - } - }); -} - -void UnAckedMessageTrackerEnabled::timeoutHandlerHelper() { - std::unique_lock acquire(lock_); - LOG_DEBUG("UnAckedMessageTrackerEnabled::timeoutHandlerHelper invoked for consumerPtr_ " - << consumerReference_.getName().c_str()); - - std::set headPartition = timePartitions.front(); - timePartitions.pop_front(); - - std::set msgIdsToRedeliver; - if (!headPartition.empty()) { - LOG_INFO(consumerReference_.getName().c_str() - << ": " << headPartition.size() << " Messages were not acked within " - << timePartitions.size() * tickDurationInMs_ << " time"); - for (auto it = headPartition.begin(); it != headPartition.end(); it++) { - msgIdsToRedeliver.insert(*it); - messageIdPartitionMap.erase(*it); - } - } - headPartition.clear(); - timePartitions.push_back(headPartition); - - if (msgIdsToRedeliver.size() > 0) { - // redeliverUnacknowledgedMessages() may call clear() that acquire the lock again, so we should unlock - // here to avoid deadlock - acquire.unlock(); - consumerReference_.redeliverUnacknowledgedMessages(msgIdsToRedeliver); - } -} - -UnAckedMessageTrackerEnabled::UnAckedMessageTrackerEnabled(long timeoutMs, const ClientImplPtr client, - ConsumerImplBase& consumer) - : UnAckedMessageTrackerEnabled(timeoutMs, timeoutMs, client, consumer) {} - -UnAckedMessageTrackerEnabled::UnAckedMessageTrackerEnabled(long timeoutMs, long tickDurationInMs, - const ClientImplPtr client, - ConsumerImplBase& consumer) - : consumerReference_(consumer), - client_(client), - timeoutMs_(timeoutMs), - tickDurationInMs_(timeoutMs >= tickDurationInMs ? tickDurationInMs : timeoutMs) { - const int blankPartitions = - static_cast(std::ceil(static_cast(timeoutMs_) / tickDurationInMs_)) + 1; - - for (int i = 0; i < blankPartitions; i++) { - std::set msgIds; - timePartitions.push_back(msgIds); - } - - timeoutHandler(); -} - -bool UnAckedMessageTrackerEnabled::add(const MessageId& msgId) { - std::lock_guard acquire(lock_); - MessageId id(msgId.partition(), msgId.ledgerId(), msgId.entryId(), -1); - if (messageIdPartitionMap.count(id) == 0) { - std::set& partition = timePartitions.back(); - bool emplace = messageIdPartitionMap.emplace(id, partition).second; - bool insert = partition.insert(id).second; - return emplace && insert; - } - return false; -} - -bool UnAckedMessageTrackerEnabled::isEmpty() { - std::lock_guard acquire(lock_); - return messageIdPartitionMap.empty(); -} - -bool UnAckedMessageTrackerEnabled::remove(const MessageId& msgId) { - std::lock_guard acquire(lock_); - MessageId id(msgId.partition(), msgId.ledgerId(), msgId.entryId(), -1); - bool removed = false; - - std::map&>::iterator exist = messageIdPartitionMap.find(id); - if (exist != messageIdPartitionMap.end()) { - removed = exist->second.erase(id); - messageIdPartitionMap.erase(exist); - } - return removed; -} - -long UnAckedMessageTrackerEnabled::size() { - std::lock_guard acquire(lock_); - return messageIdPartitionMap.size(); -} - -void UnAckedMessageTrackerEnabled::removeMessagesTill(const MessageId& msgId) { - std::lock_guard acquire(lock_); - for (auto it = messageIdPartitionMap.begin(); it != messageIdPartitionMap.end();) { - MessageId msgIdInMap = it->first; - if (msgIdInMap <= msgId) { - it->second.erase(msgIdInMap); - messageIdPartitionMap.erase(it++); - } else { - it++; - } - } -} - -// this is only for MultiTopicsConsumerImpl, when un-subscribe a single topic, should remove all it's message. -void UnAckedMessageTrackerEnabled::removeTopicMessage(const std::string& topic) { - std::lock_guard acquire(lock_); - for (auto it = messageIdPartitionMap.begin(); it != messageIdPartitionMap.end();) { - MessageId msgIdInMap = it->first; - if (msgIdInMap.getTopicName().compare(topic) == 0) { - it->second.erase(msgIdInMap); - messageIdPartitionMap.erase(it++); - } else { - it++; - } - } -} - -void UnAckedMessageTrackerEnabled::clear() { - std::lock_guard acquire(lock_); - messageIdPartitionMap.clear(); - for (auto it = timePartitions.begin(); it != timePartitions.end(); it++) { - it->clear(); - } -} - -UnAckedMessageTrackerEnabled::~UnAckedMessageTrackerEnabled() { - if (timer_) { - timer_->cancel(); - } -} -} /* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/UnAckedMessageTrackerEnabled.h b/pulsar-client-cpp/lib/UnAckedMessageTrackerEnabled.h deleted file mode 100644 index 7ed7b0385be70..0000000000000 --- a/pulsar-client-cpp/lib/UnAckedMessageTrackerEnabled.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_UNACKEDMESSAGETRACKERENABLED_H_ -#define LIB_UNACKEDMESSAGETRACKERENABLED_H_ -#include "lib/TestUtil.h" -#include "lib/UnAckedMessageTrackerInterface.h" - -#include - -namespace pulsar { -class UnAckedMessageTrackerEnabled : public UnAckedMessageTrackerInterface { - public: - ~UnAckedMessageTrackerEnabled(); - UnAckedMessageTrackerEnabled(long timeoutMs, const ClientImplPtr, ConsumerImplBase&); - UnAckedMessageTrackerEnabled(long timeoutMs, long tickDuration, const ClientImplPtr, ConsumerImplBase&); - bool add(const MessageId& msgId); - bool remove(const MessageId& msgId); - void removeMessagesTill(const MessageId& msgId); - void removeTopicMessage(const std::string& topic); - void timeoutHandler(); - - void clear(); - - protected: - void timeoutHandlerHelper(); - bool isEmpty(); - long size(); - std::map&> messageIdPartitionMap; - std::deque> timePartitions; - std::mutex lock_; - ConsumerImplBase& consumerReference_; - ClientImplPtr client_; - DeadlineTimerPtr timer_; // DO NOT place this before client_! - long timeoutMs_; - long tickDurationInMs_; - - FRIEND_TEST(ConsumerTest, testPartitionedConsumerUnAckedMessageRedelivery); - FRIEND_TEST(ConsumerTest, testMultiTopicsConsumerUnAckedMessageRedelivery); - FRIEND_TEST(ConsumerTest, testBatchUnAckedMessageTracker); -}; -} // namespace pulsar - -#endif /* LIB_UNACKEDMESSAGETRACKERENABLED_H_ */ diff --git a/pulsar-client-cpp/lib/UnAckedMessageTrackerInterface.h b/pulsar-client-cpp/lib/UnAckedMessageTrackerInterface.h deleted file mode 100644 index 50fa72c5c6f2c..0000000000000 --- a/pulsar-client-cpp/lib/UnAckedMessageTrackerInterface.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_UNACKEDMESSAGETRACKERINTERFACE_H_ -#define LIB_UNACKEDMESSAGETRACKERINTERFACE_H_ -#include -#include -#include -#include -#include -#include "pulsar/MessageId.h" -#include "lib/ClientImpl.h" -#include "lib/ConsumerImplBase.h" -#include -#include -#include "lib/PulsarApi.pb.h" -#include -namespace pulsar { - -class UnAckedMessageTrackerInterface { - public: - virtual ~UnAckedMessageTrackerInterface() {} - UnAckedMessageTrackerInterface() {} - virtual bool add(const MessageId& m) = 0; - virtual bool remove(const MessageId& m) = 0; - virtual void removeMessagesTill(const MessageId& msgId) = 0; - virtual void clear() = 0; - // this is only for MultiTopicsConsumerImpl, when un-subscribe a single topic, should remove all it's - // message. - virtual void removeTopicMessage(const std::string& topic) = 0; -}; - -using UnAckedMessageTrackerPtr = std::shared_ptr; -} // namespace pulsar -#endif /* LIB_UNACKEDMESSAGETRACKERINTERFACE_H_ */ diff --git a/pulsar-client-cpp/lib/UnboundedBlockingQueue.h b/pulsar-client-cpp/lib/UnboundedBlockingQueue.h deleted file mode 100644 index 0f7fc2a33af25..0000000000000 --- a/pulsar-client-cpp/lib/UnboundedBlockingQueue.h +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_UNBOUNDEDBLOCKINGQUEUE_H_ -#define LIB_UNBOUNDEDBLOCKINGQUEUE_H_ - -#include -#include -#include -// For struct QueueNotEmpty -#include "BlockingQueue.h" - -template -class UnboundedBlockingQueue { - public: - typedef typename boost::circular_buffer Container; - typedef typename Container::iterator iterator; - typedef typename Container::const_iterator const_iterator; - - UnboundedBlockingQueue(size_t maxSize) : mutex_(), queue_(maxSize) {} - - ~UnboundedBlockingQueue() { - Lock lock(mutex_); - queue_.clear(); - } - - void push(const T& value) { - Lock lock(mutex_); - // If the queue is full, wait for space to be available - bool wasEmpty = queue_.empty(); - if (queue_.full()) { - queue_.set_capacity(queue_.size() * 2); - } - queue_.push_back(value); - lock.unlock(); - - if (wasEmpty) { - // Notify that an element is pushed - queueEmptyCondition_.notify_one(); - } - } - - bool pop(T& value) { - Lock lock(mutex_); - // If the queue is empty, wait until an element is available to be popped - queueEmptyCondition_.wait(lock, QueueNotEmpty >(*this)); - - if (isEmptyNoMutex() || isClosedNoMutex()) { - return false; - } - - value = queue_.front(); - queue_.pop_front(); - return true; - } - - template - bool pop(T& value, const Duration& timeout) { - Lock lock(mutex_); - if (!queueEmptyCondition_.wait_for(lock, timeout, QueueNotEmpty >(*this))) { - return false; - } - - if (isEmptyNoMutex() || isClosedNoMutex()) { - return false; - } - - value = queue_.front(); - queue_.pop_front(); - lock.unlock(); - - return true; - } - - // Check the 1st element of the queue - bool peek(T& value) { - Lock lock(mutex_); - if (queue_.empty()) { - return false; - } - - value = queue_.front(); - return true; - } - - // Remove all elements from the queue - void clear() { - Lock lock(mutex_); - queue_.clear(); - } - - // Check 1st item and clear the queue atomically - bool peekAndClear(T& value) { - Lock lock(mutex_); - if (queue_.empty()) { - return false; - } - - value = queue_.front(); - queue_.clear(); - return true; - } - - size_t size() const { - Lock lock(mutex_); - return queue_.size(); - } - - bool empty() const { - Lock lock(mutex_); - return isEmptyNoMutex(); - } - - const_iterator begin() const { return queue_.begin(); } - - const_iterator end() const { return queue_.end(); } - - iterator begin() { return queue_.begin(); } - - iterator end() { return queue_.end(); } - - void close() { - Lock lock(mutex_); - closed_ = true; - queueEmptyCondition_.notify_all(); - } - - private: - bool isEmptyNoMutex() const { return queue_.empty(); } - bool isClosedNoMutex() const { return closed_; } - - mutable std::mutex mutex_; - std::condition_variable queueEmptyCondition_; - Container queue_; - bool closed_ = false; - - typedef std::unique_lock Lock; - friend struct QueueNotEmpty >; -}; - -#endif /* LIB_BLOCKINGQUEUE_H_ */ diff --git a/pulsar-client-cpp/lib/Url.cc b/pulsar-client-cpp/lib/Url.cc deleted file mode 100644 index f31e1fcc186de..0000000000000 --- a/pulsar-client-cpp/lib/Url.cc +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "Url.h" - -#include - -#include - -#ifdef PULSAR_USE_BOOST_REGEX -#include -#define PULSAR_REGEX_NAMESPACE boost -#else -#include -#define PULSAR_REGEX_NAMESPACE std -#endif - -namespace pulsar { - -static const std::map initDefaultPortsMap() { - std::map defaultPortsMap; - defaultPortsMap["http"] = 80; - defaultPortsMap["https"] = 443; - defaultPortsMap["pulsar"] = 6650; - defaultPortsMap["pulsar+ssl"] = 6651; - return defaultPortsMap; -} - -static const std::map& defaultPortsMap() { - static std::map defaultPortsMap = initDefaultPortsMap(); - return defaultPortsMap; -} - -bool Url::parse(const std::string& urlStr, Url& url) { - std::vector values; - static const PULSAR_REGEX_NAMESPACE::regex expression( - // proto host port - "^(\?:([^:/\?#]+)://)\?(\\w+[^/\?#:]*)(\?::(\\d+))\?" - // path file parameters - "(/\?(\?:[^\?#/]*/)*)\?([^\?#]*)\?(\\\?(.*))\?"); - - PULSAR_REGEX_NAMESPACE::cmatch groups; - if (!PULSAR_REGEX_NAMESPACE::regex_match(urlStr.c_str(), groups, expression)) { - // Invalid url - return false; - } - - url.protocol_ = std::string(groups[1].first, groups[1].second); - url.host_ = std::string(groups[2].first, groups[2].second); - std::string portStr(groups[3].first, groups[3].second); - url.pathWithoutFile_ = std::string(groups[4].first, groups[4].second); - url.file_ = std::string(groups[5].first, groups[5].second); - url.parameter_ = std::string(groups[6].first, groups[6].second); - url.path_ = url.pathWithoutFile_ + url.file_; - - if (!portStr.empty()) { - url.port_ = atoi(groups[3].first); - } else { - std::map::const_iterator it = defaultPortsMap().find(url.protocol_); - if (it != defaultPortsMap().end()) { - url.port_ = it->second; - } else { - // Invalid port - return false; - } - } - - return true; -} - -const std::string& Url::protocol() const { return protocol_; } - -const std::string& Url::host() const { return host_; } - -const int Url::port() const { return port_; } - -const std::string& Url::path() const { return path_; } - -const std::string& Url::pathWithoutFile() const { return pathWithoutFile_; } - -const std::string& Url::file() const { return file_; } - -const std::string& Url::parameter() const { return parameter_; } - -std::string Url::hostPort() const { - std::stringstream ss; - ss << host_ << ':' << port_; - return ss.str(); -} - -std::ostream& operator<<(std::ostream& os, const Url& obj) { - os << "Url [Host = " << obj.host() << ", Protocol = " << obj.protocol() << ", Port = " << obj.port() - << "]"; - return os; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/Url.h b/pulsar-client-cpp/lib/Url.h deleted file mode 100644 index f5596c53800e9..0000000000000 --- a/pulsar-client-cpp/lib/Url.h +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_URL_H_ -#define LIB_URL_H_ - -#include -#include - -namespace pulsar { - -/** - * URL parsing utility - */ -class PULSAR_PUBLIC Url { - public: - static bool parse(const std::string& urlStr, Url& url); - - const std::string& protocol() const; - const std::string& host() const; - const int port() const; - const std::string& path() const; - const std::string& pathWithoutFile() const; - const std::string& file() const; - const std::string& parameter() const; - friend std::ostream& operator<<(std::ostream& os, const Url& obj); - - std::string hostPort() const; - - private: - std::string protocol_; - std::string host_; - int port_; - std::string path_; - std::string pathWithoutFile_; - std::string file_; - std::string parameter_; -}; - -} // namespace pulsar - -#endif /* LIB_URL_H_ */ diff --git a/pulsar-client-cpp/lib/UtilAllocator.h b/pulsar-client-cpp/lib/UtilAllocator.h deleted file mode 100644 index acd1414b9f6f8..0000000000000 --- a/pulsar-client-cpp/lib/UtilAllocator.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_UTILALLOCATOR_H_ -#define LIB_UTILALLOCATOR_H_ - -#include - -class HandlerAllocator : private boost::noncopyable { - public: - HandlerAllocator() : inUse_(false) {} - - void* allocate(std::size_t size) { - if (!inUse_ && size < storage_.size) { - inUse_ = true; - return storage_.address(); - } else { - return ::operator new(size); - } - } - - void deallocate(void* pointer) { - if (pointer == storage_.address()) { - inUse_ = false; - } else { - ::operator delete(pointer); - } - } - - private: - // Storage space used for handler-based custom memory allocation. - boost::aligned_storage<1024> storage_; - bool inUse_; -}; - -template -class AllocHandler { - public: - AllocHandler(HandlerAllocator& a, Handler h) : allocator_(a), handler_(h) {} - - template - void operator()(Arg1 arg1) { - handler_(arg1); - } - - template - void operator()(Arg1 arg1, Arg2 arg2) { - handler_(arg1, arg2); - } - - friend void* asio_handler_allocate(std::size_t size, AllocHandler* thisHandler) { - return thisHandler->allocator_.allocate(size); - } - - friend void asio_handler_deallocate(void* ptr, std::size_t, AllocHandler* thisHandler) { - thisHandler->allocator_.deallocate(ptr); - } - - private: - HandlerAllocator& allocator_; - Handler handler_; -}; - -#endif /* LIB_UTILALLOCATOR_H_ */ diff --git a/pulsar-client-cpp/lib/Utils.h b/pulsar-client-cpp/lib/Utils.h deleted file mode 100644 index b0f500ef0acab..0000000000000 --- a/pulsar-client-cpp/lib/Utils.h +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef UTILS_HPP_ -#define UTILS_HPP_ - -#include - -#include "Future.h" - -#include -#include - -namespace pulsar { - -struct WaitForCallback { - Promise m_promise; - - WaitForCallback(Promise promise) : m_promise(promise) {} - - void operator()(Result result) { m_promise.setValue(result); } -}; - -template -struct WaitForCallbackValue { - Promise& m_promise; - - WaitForCallbackValue(Promise& promise) : m_promise(promise) {} - - void operator()(Result result, const T& value) { - if (result == ResultOk) { - m_promise.setValue(value); - } else { - m_promise.setFailed(result); - } - } -}; - -template -struct WaitForCallbackType { - Promise m_promise; - - WaitForCallbackType(Promise promise) : m_promise(promise) {} - - void operator()(T result) { m_promise.setValue(result); } -}; - -inline std::ostream& operator<<(std::ostream& os, const std::map& m) { - os << "{"; - for (std::map::const_iterator it = m.begin(); it != m.end(); it++) { - os << "[Key: " << strResult(it->first) << ", Value: " << it->second << "], "; - } - os << "}"; - return os; -} - -/** - * Utility class that encloses an optional value - */ -template -class Optional { - public: - const T& value() const { return value_; } - - bool is_present() const { return present_; } - - bool is_empty() const { return !present_; } - - /** - * Create an Optional with the bound value - */ - static Optional of(const T& value) { return Optional(value); } - static Optional of(T&& value) { return Optional(std::move(value)); } - - /** - * Create an empty optional - */ - static Optional empty() { return Optional(); } - - Optional() : value_(), present_(false) {} - - private: - Optional(const T& value) : value_(value), present_(true) {} - Optional(T&& value) : value_(std::move(value)), present_(true) {} - - T value_; - bool present_; -}; -} // namespace pulsar - -#endif /* UTILS_HPP_ */ diff --git a/pulsar-client-cpp/lib/VersionInternal.h b/pulsar-client-cpp/lib/VersionInternal.h deleted file mode 100644 index c2560352692dd..0000000000000 --- a/pulsar-client-cpp/lib/VersionInternal.h +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef LIB_VERSION_INTERNAL_H_ -#define LIB_VERSION_INTERNAL_H_ - -#ifndef _PULSAR_VERSION_INTERNAL_ -#define _PULSAR_VERSION_INTERNAL_ "unknown" -#endif - -#endif /* LIB_VERSION_INTERNAL_H_ */ diff --git a/pulsar-client-cpp/lib/auth/AuthAthenz.cc b/pulsar-client-cpp/lib/auth/AuthAthenz.cc deleted file mode 100644 index 82d12761c56c7..0000000000000 --- a/pulsar-client-cpp/lib/auth/AuthAthenz.cc +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include -#include -namespace ptree = boost::property_tree; - -#include - -#include - -DECLARE_LOG_OBJECT() - -namespace pulsar { -AuthDataAthenz::AuthDataAthenz(ParamMap& params) { - ztsClient_ = std::make_shared(std::ref(params)); - LOG_DEBUG("AuthDataAthenz is construted.") -} - -bool AuthDataAthenz::hasDataForHttp() { return true; } - -std::string AuthDataAthenz::getHttpHeaders() { - return ztsClient_->getHeader() + ": " + ztsClient_->getRoleToken(); -} - -bool AuthDataAthenz::hasDataFromCommand() { return true; } - -std::string AuthDataAthenz::getCommandData() { return ztsClient_->getRoleToken(); } - -AuthDataAthenz::~AuthDataAthenz() {} - -AuthAthenz::AuthAthenz(AuthenticationDataPtr& authDataAthenz) { authDataAthenz_ = authDataAthenz; } - -AuthAthenz::~AuthAthenz() {} - -ParamMap parseAuthParamsString(const std::string& authParamsString) { - ParamMap params; - if (!authParamsString.empty()) { - ptree::ptree root; - std::stringstream stream; - stream << authParamsString; - try { - ptree::read_json(stream, root); - for (const auto& item : root) { - params[item.first] = item.second.get_value(); - } - } catch (ptree::json_parser_error& e) { - LOG_ERROR("Invalid String Error: " << e.what()); - } - } - return params; -} - -AuthenticationPtr AuthAthenz::create(const std::string& authParamsString) { - ParamMap params = parseAuthParamsString(authParamsString); - AuthenticationDataPtr authDataAthenz = AuthenticationDataPtr(new AuthDataAthenz(params)); - return AuthenticationPtr(new AuthAthenz(authDataAthenz)); -} - -AuthenticationPtr AuthAthenz::create(ParamMap& params) { - AuthenticationDataPtr authDataAthenz = AuthenticationDataPtr(new AuthDataAthenz(params)); - return AuthenticationPtr(new AuthAthenz(authDataAthenz)); -} - -const std::string AuthAthenz::getAuthMethodName() const { return "athenz"; } - -Result AuthAthenz::getAuthData(AuthenticationDataPtr& authDataContent) { - authDataContent = authDataAthenz_; - return ResultOk; -} - -extern "C" Authentication* create(const std::string& authParamsString) { - ParamMap params = parseAuthParamsString(authParamsString); - AuthenticationDataPtr authDataAthenz = AuthenticationDataPtr(new AuthDataAthenz(params)); - return new AuthAthenz(authDataAthenz); -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/auth/AuthAthenz.h b/pulsar-client-cpp/lib/auth/AuthAthenz.h deleted file mode 100644 index e58a4bc831fd0..0000000000000 --- a/pulsar-client-cpp/lib/auth/AuthAthenz.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_AUTH_ATHENZ_H_ -#define PULSAR_AUTH_ATHENZ_H_ - -#include -#include -#include - -namespace pulsar { - -const std::string ATHENZ_PLUGIN_NAME = "athenz"; -const std::string ATHENZ_JAVA_PLUGIN_NAME = "org.apache.pulsar.client.impl.auth.AuthenticationAthenz"; - -class AuthDataAthenz : public AuthenticationDataProvider { - public: - AuthDataAthenz(ParamMap& params); - bool hasDataForHttp(); - std::string getHttpHeaders(); - bool hasDataFromCommand(); - std::string getCommandData(); - ~AuthDataAthenz(); - - private: - std::shared_ptr ztsClient_; -}; - -} // namespace pulsar -#endif /* PULSAR_AUTH_ATHENZ_H_ */ diff --git a/pulsar-client-cpp/lib/auth/AuthBasic.cc b/pulsar-client-cpp/lib/auth/AuthBasic.cc deleted file mode 100644 index 463e1474ce994..0000000000000 --- a/pulsar-client-cpp/lib/auth/AuthBasic.cc +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "AuthBasic.h" - -#include -#include -#include -#include -#include -namespace ptree = boost::property_tree; - -#include -#include - -namespace pulsar { - -std::string base64_encode(const std::string& s) { - using namespace boost::archive::iterators; - using It = base64_from_binary>; - auto data = std::string(It(std::begin(s)), It(std::end(s))); - return data.append((3 - s.size() % 3) % 3, '='); -} - -AuthDataBasic::AuthDataBasic(const std::string& username, const std::string& password) { - commandAuthToken_ = username + ":" + password; - httpAuthToken_ = base64_encode(commandAuthToken_); -} - -AuthDataBasic::~AuthDataBasic() {} - -bool AuthDataBasic::hasDataForHttp() { return true; } - -std::string AuthDataBasic::getHttpHeaders() { return "Authorization: Basic " + httpAuthToken_; } - -bool AuthDataBasic::hasDataFromCommand() { return true; } - -std::string AuthDataBasic::getCommandData() { return commandAuthToken_; } - -// AuthBasic - -AuthBasic::AuthBasic(AuthenticationDataPtr& authDataBasic) { authDataBasic_ = authDataBasic; } - -AuthBasic::~AuthBasic() = default; - -AuthenticationPtr AuthBasic::create(const std::string& username, const std::string& password) { - AuthenticationDataPtr authDataBasic = AuthenticationDataPtr(new AuthDataBasic(username, password)); - return AuthenticationPtr(new AuthBasic(authDataBasic)); -} - -ParamMap parseBasicAuthParamsString(const std::string& authParamsString) { - ParamMap params; - if (!authParamsString.empty()) { - ptree::ptree root; - std::stringstream stream; - stream << authParamsString; - try { - ptree::read_json(stream, root); - for (const auto& item : root) { - params[item.first] = item.second.get_value(); - } - } catch (ptree::json_parser_error& e) { - throw std::runtime_error(e.message()); - } - } - return params; -} - -AuthenticationPtr AuthBasic::create(const std::string& authParamsString) { - ParamMap paramMap = parseBasicAuthParamsString(authParamsString); - return create(paramMap); -} - -AuthenticationPtr AuthBasic::create(ParamMap& params) { - auto usernameIt = params.find("username"); - if (usernameIt == params.end()) { - throw std::runtime_error("No username provided for basic provider"); - } - auto passwordIt = params.find("password"); - if (passwordIt == params.end()) { - throw std::runtime_error("No password provided for basic provider"); - } - - return create(usernameIt->second, passwordIt->second); -} - -const std::string AuthBasic::getAuthMethodName() const { return "basic"; } - -Result AuthBasic::getAuthData(AuthenticationDataPtr& authDataBasic) { - authDataBasic = authDataBasic_; - return ResultOk; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/auth/AuthBasic.h b/pulsar-client-cpp/lib/auth/AuthBasic.h deleted file mode 100644 index 89b995afa815b..0000000000000 --- a/pulsar-client-cpp/lib/auth/AuthBasic.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include -#include - -namespace pulsar { - -const std::string BASIC_PLUGIN_NAME = "basic"; -const std::string BASIC_JAVA_PLUGIN_NAME = "org.apache.pulsar.client.impl.auth.AuthenticationBasic"; - -class AuthDataBasic : public AuthenticationDataProvider { - public: - AuthDataBasic(const std::string& username, const std::string& password); - ~AuthDataBasic(); - - bool hasDataForHttp(); - std::string getHttpHeaders(); - bool hasDataFromCommand(); - std::string getCommandData(); - - private: - std::string commandAuthToken_; - std::string httpAuthToken_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/auth/AuthOauth2.cc b/pulsar-client-cpp/lib/auth/AuthOauth2.cc deleted file mode 100644 index 2fce8047a3a66..0000000000000 --- a/pulsar-client-cpp/lib/auth/AuthOauth2.cc +++ /dev/null @@ -1,416 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include -#include -#include -#include -#include - -#include -DECLARE_LOG_OBJECT() - -namespace pulsar { - -// AuthDataOauth2 - -AuthDataOauth2::AuthDataOauth2(const std::string& accessToken) { accessToken_ = accessToken; } - -AuthDataOauth2::~AuthDataOauth2() {} - -bool AuthDataOauth2::hasDataForHttp() { return true; } - -std::string AuthDataOauth2::getHttpHeaders() { return "Authorization: Bearer " + accessToken_; } - -bool AuthDataOauth2::hasDataFromCommand() { return true; } - -std::string AuthDataOauth2::getCommandData() { return accessToken_; } - -// Oauth2TokenResult - -Oauth2TokenResult::Oauth2TokenResult() { expiresIn_ = undefined_expiration; } - -Oauth2TokenResult::~Oauth2TokenResult() {} - -Oauth2TokenResult& Oauth2TokenResult::setAccessToken(const std::string& accessToken) { - accessToken_ = accessToken; - return *this; -} - -Oauth2TokenResult& Oauth2TokenResult::setIdToken(const std::string& idToken) { - idToken_ = idToken; - return *this; -} - -Oauth2TokenResult& Oauth2TokenResult::setRefreshToken(const std::string& refreshToken) { - refreshToken_ = refreshToken; - return *this; -} - -Oauth2TokenResult& Oauth2TokenResult::setExpiresIn(const int64_t expiresIn) { - expiresIn_ = expiresIn; - return *this; -} - -const std::string& Oauth2TokenResult::getAccessToken() const { return accessToken_; } - -const std::string& Oauth2TokenResult::getIdToken() const { return idToken_; } - -const std::string& Oauth2TokenResult::getRefreshToken() const { return refreshToken_; } - -int64_t Oauth2TokenResult::getExpiresIn() const { return expiresIn_; } - -// CachedToken - -CachedToken::CachedToken() {} - -CachedToken::~CachedToken() {} - -// Oauth2CachedToken - -Oauth2CachedToken::Oauth2CachedToken(Oauth2TokenResultPtr token) { - latest_ = token; - - int64_t expiredIn = token->getExpiresIn(); - if (expiredIn > 0) { - expiresAt_ = Clock::now() + std::chrono::seconds(expiredIn); - } else { - throw std::runtime_error("ExpiresIn in Oauth2TokenResult invalid value: " + - std::to_string(expiredIn)); - } - authData_ = AuthenticationDataPtr(new AuthDataOauth2(token->getAccessToken())); -} - -AuthenticationDataPtr Oauth2CachedToken::getAuthData() { return authData_; } - -Oauth2CachedToken::~Oauth2CachedToken() {} - -bool Oauth2CachedToken::isExpired() { return expiresAt_ < Clock::now(); } - -// OauthFlow - -Oauth2Flow::Oauth2Flow() {} -Oauth2Flow::~Oauth2Flow() {} - -KeyFile KeyFile::fromParamMap(ParamMap& params) { - const auto it = params.find("private_key"); - if (it != params.cend()) { - return fromFile(it->second); - } else { - return {params["client_id"], params["client_secret"]}; - } -} - -// read clientId/clientSecret from passed in `credentialsFilePath` -KeyFile KeyFile::fromFile(const std::string& credentialsFilePath) { - boost::property_tree::ptree loadPtreeRoot; - try { - boost::property_tree::read_json(credentialsFilePath, loadPtreeRoot); - } catch (const boost::property_tree::json_parser_error& e) { - LOG_ERROR("Failed to parse json input file for credentialsFilePath: " << credentialsFilePath << ": " - << e.what()); - return {}; - } - - try { - return {loadPtreeRoot.get("client_id"), loadPtreeRoot.get("client_secret")}; - } catch (const boost::property_tree::ptree_error& e) { - LOG_ERROR("Failed to get client_id or client_secret in " << credentialsFilePath << ": " << e.what()); - return {}; - } -} - -ClientCredentialFlow::ClientCredentialFlow(ParamMap& params) - : issuerUrl_(params["issuer_url"]), - keyFile_(KeyFile::fromParamMap(params)), - audience_(params["audience"]), - scope_(params["scope"]) {} - -std::string ClientCredentialFlow::getTokenEndPoint() const { return tokenEndPoint_; } - -static size_t curlWriteCallback(void* contents, size_t size, size_t nmemb, void* responseDataPtr) { - ((std::string*)responseDataPtr)->append((char*)contents, size * nmemb); - return size * nmemb; -} - -void ClientCredentialFlow::initialize() { - if (issuerUrl_.empty()) { - LOG_ERROR("Failed to initialize ClientCredentialFlow: issuer_url is not set"); - return; - } - if (!keyFile_.isValid()) { - return; - } - - CURL* handle = curl_easy_init(); - CURLcode res; - std::string responseData; - - // set header: json, request type: post - struct curl_slist* list = NULL; - list = curl_slist_append(list, "Accept: application/json"); - curl_easy_setopt(handle, CURLOPT_HTTPHEADER, list); - curl_easy_setopt(handle, CURLOPT_CUSTOMREQUEST, "GET"); - - // set URL: well-know endpoint - std::string wellKnownUrl = issuerUrl_; - if (wellKnownUrl.back() == '/') { - wellKnownUrl.pop_back(); - } - wellKnownUrl.append("/.well-known/openid-configuration"); - curl_easy_setopt(handle, CURLOPT_URL, wellKnownUrl.c_str()); - - // Write callback - curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, curlWriteCallback); - curl_easy_setopt(handle, CURLOPT_WRITEDATA, &responseData); - - // New connection is made for each call - curl_easy_setopt(handle, CURLOPT_FRESH_CONNECT, 1L); - curl_easy_setopt(handle, CURLOPT_FORBID_REUSE, 1L); - - curl_easy_setopt(handle, CURLOPT_FOLLOWLOCATION, 1L); - - char errorBuffer[CURL_ERROR_SIZE]; - curl_easy_setopt(handle, CURLOPT_ERRORBUFFER, errorBuffer); - - // Make get call to server - res = curl_easy_perform(handle); - - switch (res) { - case CURLE_OK: - long response_code; - curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &response_code); - LOG_DEBUG("Received well-known configuration data " << issuerUrl_ << " code " << response_code); - if (response_code == 200) { - boost::property_tree::ptree root; - std::stringstream stream; - stream << responseData; - try { - boost::property_tree::read_json(stream, root); - } catch (boost::property_tree::json_parser_error& e) { - LOG_ERROR("Failed to parse well-known configuration data response: " - << e.what() << "\nInput Json = " << responseData); - break; - } - - this->tokenEndPoint_ = root.get("token_endpoint"); - - LOG_DEBUG("Get token endpoint: " << this->tokenEndPoint_); - } else { - LOG_ERROR("Response failed for getting the well-known configuration " - << issuerUrl_ << ". response Code " << response_code); - } - break; - default: - LOG_ERROR("Response failed for getting the well-known configuration " - << issuerUrl_ << ". Error Code " << res << ": " << errorBuffer); - break; - } - // Free header list - curl_slist_free_all(list); - curl_easy_cleanup(handle); -} -void ClientCredentialFlow::close() {} - -ParamMap ClientCredentialFlow::generateParamMap() const { - if (!keyFile_.isValid()) { - return {}; - } - - ParamMap params; - params.emplace("grant_type", "client_credentials"); - params.emplace("client_id", keyFile_.getClientId()); - params.emplace("client_secret", keyFile_.getClientSecret()); - params.emplace("audience", audience_); - if (!scope_.empty()) { - params.emplace("scope", scope_); - } - return params; -} - -static std::string buildClientCredentialsBody(CURL* curl, const ParamMap& params) { - std::ostringstream oss; - bool addSeparater = false; - - for (const auto& kv : params) { - if (addSeparater) { - oss << "&"; - } else { - addSeparater = true; - } - - char* encodedKey = curl_easy_escape(curl, kv.first.c_str(), kv.first.length()); - if (!encodedKey) { - LOG_ERROR("curl_easy_escape for " << kv.first << " failed"); - continue; - } - char* encodedValue = curl_easy_escape(curl, kv.second.c_str(), kv.second.length()); - if (!encodedValue) { - LOG_ERROR("curl_easy_escape for " << kv.second << " failed"); - continue; - } - - oss << encodedKey << "=" << encodedValue; - curl_free(encodedKey); - curl_free(encodedValue); - } - - return oss.str(); -} - -Oauth2TokenResultPtr ClientCredentialFlow::authenticate() { - std::call_once(initializeOnce_, &ClientCredentialFlow::initialize, this); - Oauth2TokenResultPtr resultPtr = Oauth2TokenResultPtr(new Oauth2TokenResult()); - if (tokenEndPoint_.empty()) { - return resultPtr; - } - - CURL* handle = curl_easy_init(); - const auto postData = buildClientCredentialsBody(handle, generateParamMap()); - if (postData.empty()) { - curl_easy_cleanup(handle); - return resultPtr; - } - LOG_DEBUG("Generate URL encoded body for ClientCredentialFlow: " << postData); - - CURLcode res; - std::string responseData; - - struct curl_slist* list = NULL; - list = curl_slist_append(list, "Content-Type: application/x-www-form-urlencoded"); - curl_easy_setopt(handle, CURLOPT_HTTPHEADER, list); - curl_easy_setopt(handle, CURLOPT_CUSTOMREQUEST, "POST"); - - // set URL: issuerUrl - curl_easy_setopt(handle, CURLOPT_URL, tokenEndPoint_.c_str()); - - // Write callback - curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, curlWriteCallback); - curl_easy_setopt(handle, CURLOPT_WRITEDATA, &responseData); - - // New connection is made for each call - curl_easy_setopt(handle, CURLOPT_FRESH_CONNECT, 1L); - curl_easy_setopt(handle, CURLOPT_FORBID_REUSE, 1L); - - curl_easy_setopt(handle, CURLOPT_FOLLOWLOCATION, 1L); - - curl_easy_setopt(handle, CURLOPT_POSTFIELDS, postData.c_str()); - - char errorBuffer[CURL_ERROR_SIZE]; - curl_easy_setopt(handle, CURLOPT_ERRORBUFFER, errorBuffer); - - // Make get call to server - res = curl_easy_perform(handle); - - switch (res) { - case CURLE_OK: - long response_code; - curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &response_code); - LOG_DEBUG("Response received for issuerurl " << issuerUrl_ << " code " << response_code); - if (response_code == 200) { - boost::property_tree::ptree root; - std::stringstream stream; - stream << responseData; - try { - boost::property_tree::read_json(stream, root); - } catch (boost::property_tree::json_parser_error& e) { - LOG_ERROR("Failed to parse json of Oauth2 response: " - << e.what() << "\nInput Json = " << responseData << " passedin: " << postData); - break; - } - - resultPtr->setAccessToken(root.get("access_token", "")); - resultPtr->setExpiresIn( - root.get("expires_in", Oauth2TokenResult::undefined_expiration)); - resultPtr->setRefreshToken(root.get("refresh_token", "")); - resultPtr->setIdToken(root.get("id_token", "")); - - if (!resultPtr->getAccessToken().empty()) { - LOG_DEBUG("access_token: " << resultPtr->getAccessToken() - << " expires_in: " << resultPtr->getExpiresIn()); - } else { - LOG_ERROR("Response doesn't contain access_token, the response is: " << responseData); - } - } else { - LOG_ERROR("Response failed for issuerurl " << issuerUrl_ << ". response Code " - << response_code << " passedin: " << postData); - } - break; - default: - LOG_ERROR("Response failed for issuerurl " << issuerUrl_ << ". ErrorCode " << res << ": " - << errorBuffer << " passedin: " << postData); - break; - } - // Free header list - curl_slist_free_all(list); - curl_easy_cleanup(handle); - - return resultPtr; -} - -// AuthOauth2 - -AuthOauth2::AuthOauth2(ParamMap& params) : flowPtr_(new ClientCredentialFlow(params)) {} - -AuthOauth2::~AuthOauth2() {} - -ParamMap parseJsonAuthParamsString(const std::string& authParamsString) { - ParamMap params; - if (!authParamsString.empty()) { - boost::property_tree::ptree root; - std::stringstream stream; - stream << authParamsString; - try { - boost::property_tree::read_json(stream, root); - for (const auto& item : root) { - params[item.first] = item.second.get_value(); - } - } catch (boost::property_tree::json_parser_error& e) { - LOG_ERROR("Invalid String Error: " << e.what()); - } - } - return params; -} - -AuthenticationPtr AuthOauth2::create(const std::string& authParamsString) { - ParamMap params = parseJsonAuthParamsString(authParamsString); - - return create(params); -} - -AuthenticationPtr AuthOauth2::create(ParamMap& params) { return AuthenticationPtr(new AuthOauth2(params)); } - -const std::string AuthOauth2::getAuthMethodName() const { return "token"; } - -Result AuthOauth2::getAuthData(AuthenticationDataPtr& authDataContent) { - if (cachedTokenPtr_ == nullptr || cachedTokenPtr_->isExpired()) { - try { - cachedTokenPtr_ = CachedTokenPtr(new Oauth2CachedToken(flowPtr_->authenticate())); - } catch (const std::runtime_error& e) { - // The real error logs have already been printed in authenticate() - return ResultAuthenticationError; - } - } - - authDataContent = cachedTokenPtr_->getAuthData(); - return ResultOk; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/auth/AuthOauth2.h b/pulsar-client-cpp/lib/auth/AuthOauth2.h deleted file mode 100644 index c940cf969853e..0000000000000 --- a/pulsar-client-cpp/lib/auth/AuthOauth2.h +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include -#include -#include - -namespace pulsar { - -const std::string OAUTH2_TOKEN_PLUGIN_NAME = "oauth2token"; -const std::string OAUTH2_TOKEN_JAVA_PLUGIN_NAME = - "org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2"; - -class KeyFile { - public: - static KeyFile fromParamMap(ParamMap& params); - - const std::string& getClientId() const noexcept { return clientId_; } - const std::string& getClientSecret() const noexcept { return clientSecret_; } - bool isValid() const noexcept { return valid_; } - - private: - const std::string clientId_; - const std::string clientSecret_; - const bool valid_; - - KeyFile(const std::string& clientId, const std::string& clientSecret) - : clientId_(clientId), clientSecret_(clientSecret), valid_(true) {} - KeyFile() : valid_(false) {} - - static KeyFile fromFile(const std::string& filename); -}; - -class ClientCredentialFlow : public Oauth2Flow { - public: - ClientCredentialFlow(ParamMap& params); - void initialize(); - Oauth2TokenResultPtr authenticate(); - void close(); - - ParamMap generateParamMap() const; - std::string getTokenEndPoint() const; - - private: - std::string tokenEndPoint_; - const std::string issuerUrl_; - const KeyFile keyFile_; - const std::string audience_; - const std::string scope_; - std::once_flag initializeOnce_; -}; - -class Oauth2CachedToken : public CachedToken { - public: - using Clock = std::chrono::high_resolution_clock; - - Oauth2CachedToken(Oauth2TokenResultPtr token); - ~Oauth2CachedToken(); - bool isExpired(); - AuthenticationDataPtr getAuthData(); - - private: - std::chrono::time_point expiresAt_; - Oauth2TokenResultPtr latest_; - AuthenticationDataPtr authData_; -}; - -class AuthDataOauth2 : public AuthenticationDataProvider { - public: - AuthDataOauth2(const std::string& accessToken); - ~AuthDataOauth2(); - - bool hasDataForHttp(); - std::string getHttpHeaders(); - bool hasDataFromCommand(); - std::string getCommandData(); - - private: - std::string accessToken_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/auth/AuthTls.cc b/pulsar-client-cpp/lib/auth/AuthTls.cc deleted file mode 100644 index fdf7f210837d3..0000000000000 --- a/pulsar-client-cpp/lib/auth/AuthTls.cc +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -namespace pulsar { -AuthDataTls::AuthDataTls(const std::string& certificatePath, const std::string& privateKeyPath) { - tlsCertificate_ = certificatePath; - tlsPrivateKey_ = privateKeyPath; -} - -AuthDataTls::~AuthDataTls() {} - -bool AuthDataTls::hasDataForTls() { return true; } - -std::string AuthDataTls::getTlsCertificates() { return tlsCertificate_; } - -std::string AuthDataTls::getTlsPrivateKey() { return tlsPrivateKey_; } - -AuthTls::AuthTls(AuthenticationDataPtr& authDataTls) { authDataTls_ = authDataTls; } - -AuthTls::~AuthTls() {} - -AuthenticationPtr AuthTls::create(const std::string& authParamsString) { - ParamMap params = parseDefaultFormatAuthParams(authParamsString); - return create(params); -} - -AuthenticationPtr AuthTls::create(ParamMap& params) { - return create(params["tlsCertFile"], params["tlsKeyFile"]); -} - -AuthenticationPtr AuthTls::create(const std::string& certificatePath, const std::string& privateKeyPath) { - AuthenticationDataPtr authDataTls = - AuthenticationDataPtr(new AuthDataTls(certificatePath, privateKeyPath)); - return AuthenticationPtr(new AuthTls(authDataTls)); -} - -const std::string AuthTls::getAuthMethodName() const { return "tls"; } - -Result AuthTls::getAuthData(AuthenticationDataPtr& authDataContent) { - authDataContent = authDataTls_; - return ResultOk; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/auth/AuthTls.h b/pulsar-client-cpp/lib/auth/AuthTls.h deleted file mode 100644 index 510aea062be78..0000000000000 --- a/pulsar-client-cpp/lib/auth/AuthTls.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include - -namespace pulsar { - -const std::string TLS_PLUGIN_NAME = "tls"; -const std::string TLS_JAVA_PLUGIN_NAME = "org.apache.pulsar.client.impl.auth.AuthenticationTls"; - -class AuthDataTls : public AuthenticationDataProvider { - public: - AuthDataTls(const std::string& certificatePath, const std::string& privateKeyPath); - ~AuthDataTls(); - - bool hasDataForTls(); - std::string getTlsCertificates(); - std::string getTlsPrivateKey(); - - private: - std::string tlsCertificate_; - std::string tlsPrivateKey_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/auth/AuthToken.cc b/pulsar-client-cpp/lib/auth/AuthToken.cc deleted file mode 100644 index e8ebc72202d19..0000000000000 --- a/pulsar-client-cpp/lib/auth/AuthToken.cc +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "AuthToken.h" - -#include -#include -#include - -#include -#include - -namespace pulsar { - -// AuthDataToken - -AuthDataToken::AuthDataToken(const TokenSupplier &tokenSupplier) { tokenSupplier_ = tokenSupplier; } - -AuthDataToken::~AuthDataToken() {} - -bool AuthDataToken::hasDataForHttp() { return true; } - -std::string AuthDataToken::getHttpHeaders() { return "Authorization: Bearer " + tokenSupplier_(); } - -bool AuthDataToken::hasDataFromCommand() { return true; } - -std::string AuthDataToken::getCommandData() { return tokenSupplier_(); } - -static std::string readDirect(const std::string &token) { return token; } - -static std::string readFromFile(const std::string &tokenFilePath) { - std::ifstream input(tokenFilePath); - std::stringstream buffer; - buffer << input.rdbuf(); - return buffer.str(); -} - -static std::string readFromEnv(const std::string &envVarName) { - char *value = getenv(envVarName.c_str()); - if (!value) { - throw std::runtime_error("Failed to read environment variable " + envVarName); - } - return std::string(value); -} - -// AuthToken - -AuthToken::AuthToken(AuthenticationDataPtr &authDataToken) { authDataToken_ = authDataToken; } - -AuthToken::~AuthToken() {} - -AuthenticationPtr AuthToken::create(ParamMap ¶ms) { - if (params.find("token") != params.end()) { - return create(std::bind(&readDirect, params["token"])); - } else if (params.find("file") != params.end()) { - // Read token from a file - return create(std::bind(&readFromFile, params["file"])); - } else if (params.find("env") != params.end()) { - // Read token from environment variable - std::string envVarName = params["env"]; - return create(std::bind(&readFromEnv, envVarName)); - } else { - throw std::runtime_error("Invalid configuration for token provider"); - } -} - -AuthenticationPtr AuthToken::create(const std::string &authParamsString) { - ParamMap params; - if (boost::starts_with(authParamsString, "token:")) { - std::string token = authParamsString.substr(strlen("token:")); - params["token"] = token; - } else if (boost::starts_with(authParamsString, "file:")) { - // Read token from a file - std::string filePath = authParamsString.substr(strlen("file://")); - params["file"] = filePath; - } else if (boost::starts_with(authParamsString, "env:")) { - std::string envVarName = authParamsString.substr(strlen("env:")); - params["env"] = envVarName; - } else { - std::string token = authParamsString; - params["token"] = token; - } - - return create(params); -} - -AuthenticationPtr AuthToken::createWithToken(const std::string &token) { - return create(std::bind(&readDirect, token)); -} - -AuthenticationPtr AuthToken::create(const TokenSupplier &tokenSupplier) { - AuthenticationDataPtr authDataToken = AuthenticationDataPtr(new AuthDataToken(tokenSupplier)); - return AuthenticationPtr(new AuthToken(authDataToken)); -} - -const std::string AuthToken::getAuthMethodName() const { return "token"; } - -Result AuthToken::getAuthData(AuthenticationDataPtr &authDataContent) { - authDataContent = authDataToken_; - return ResultOk; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/auth/AuthToken.h b/pulsar-client-cpp/lib/auth/AuthToken.h deleted file mode 100644 index 8473fe3135303..0000000000000 --- a/pulsar-client-cpp/lib/auth/AuthToken.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include -#include -#include - -namespace pulsar { - -const std::string TOKEN_PLUGIN_NAME = "token"; -const std::string TOKEN_JAVA_PLUGIN_NAME = "org.apache.pulsar.client.impl.auth.AuthenticationToken"; - -class AuthDataToken : public AuthenticationDataProvider { - public: - AuthDataToken(const std::string& token); - AuthDataToken(const TokenSupplier& tokenSupplier); - ~AuthDataToken(); - - bool hasDataForHttp(); - std::string getHttpHeaders(); - bool hasDataFromCommand(); - std::string getCommandData(); - - private: - TokenSupplier tokenSupplier_; -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/auth/athenz/ZTSClient.cc b/pulsar-client-cpp/lib/auth/athenz/ZTSClient.cc deleted file mode 100644 index 919536fe736f7..0000000000000 --- a/pulsar-client-cpp/lib/auth/athenz/ZTSClient.cc +++ /dev/null @@ -1,391 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "ZTSClient.h" -#include - -#ifndef _MSC_VER -#include -#else -#include -#endif -#include -#include - -#include -#include -#include -#include - -#include - -#include -#include -namespace ptree = boost::property_tree; - -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunknown-warning-option" -#endif - -#include - -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - -#include -#include - -#include - -#ifdef PULSAR_USE_BOOST_REGEX -#include -#define PULSAR_REGEX_NAMESPACE boost -#else -#include -#define PULSAR_REGEX_NAMESPACE std -#endif - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -const static std::string DEFAULT_PRINCIPAL_HEADER = "Athenz-Principal-Auth"; -const static std::string DEFAULT_ROLE_HEADER = "Athenz-Role-Auth"; -const static int REQUEST_TIMEOUT = 30000; -const static int DEFAULT_TOKEN_EXPIRATION_TIME_SEC = 3600; -const static int MIN_TOKEN_EXPIRATION_TIME_SEC = 900; -const static int MAX_HTTP_REDIRECTS = 20; -const static long long FETCH_EPSILON = 60; // if cache expires in 60 seconds, get it from ZTS -const static std::string requiredParams[] = {"tenantDomain", "tenantService", "providerDomain", "privateKey", - "ztsUrl"}; - -std::map ZTSClient::roleTokenCache_; - -ZTSClient::ZTSClient(std::map ¶ms) { - // required parameter check - bool valid = true; - for (int i = 0; i < sizeof(requiredParams) / sizeof(std::string); i++) { - if (params.find(requiredParams[i]) == params.end()) { - valid = false; - LOG_ERROR(requiredParams[i] << " parameter is required"); - } - } - - if (!valid) { - LOG_ERROR("Some parameters are missing") - return; - } - - // set required value - tenantDomain_ = params[requiredParams[0]]; - tenantService_ = params[requiredParams[1]]; - providerDomain_ = params[requiredParams[2]]; - privateKeyUri_ = parseUri(params[requiredParams[3]].c_str()); - ztsUrl_ = params[requiredParams[4]]; - - // set optional value - keyId_ = params.find("keyId") == params.end() ? "0" : params["keyId"]; - principalHeader_ = - params.find("principalHeader") == params.end() ? DEFAULT_PRINCIPAL_HEADER : params["principalHeader"]; - roleHeader_ = params.find("roleHeader") == params.end() ? DEFAULT_ROLE_HEADER : params["roleHeader"]; - tokenExpirationTime_ = DEFAULT_TOKEN_EXPIRATION_TIME_SEC; - if (params.find("tokenExpirationTime") != params.end()) { - tokenExpirationTime_ = std::stoi(params["tokenExpirationTime"]); - if (tokenExpirationTime_ < MIN_TOKEN_EXPIRATION_TIME_SEC) { - LOG_WARN(tokenExpirationTime_ << " is too small as a token expiration time. " - << MIN_TOKEN_EXPIRATION_TIME_SEC << " is set instead of it."); - tokenExpirationTime_ = MIN_TOKEN_EXPIRATION_TIME_SEC; - } - } - - if (*(--ztsUrl_.end()) == '/') { - ztsUrl_.erase(--ztsUrl_.end()); - } - - LOG_DEBUG("ZTSClient is constructed properly") -} - -ZTSClient::~ZTSClient(){LOG_DEBUG("ZTSClient is destructed")} - -std::string ZTSClient::getSalt() { - unsigned long long salt = 0; - for (int i = 0; i < 8; i++) { - salt += ((unsigned long long)rand() % (1 << 8)) << 8 * i; - } - std::stringstream ss; - ss << std::hex << salt; - return ss.str(); -} - -std::string ZTSClient::ybase64Encode(const unsigned char *input, int length) { - // base64 encode - typedef boost::archive::iterators::base64_from_binary< - boost::archive::iterators::transform_width > - base64; - std::string ret = std::string(base64(input), base64(input + length)); - - // replace '+', '/' to '.', '_' for ybase64 - for (std::string::iterator itr = ret.begin(); itr != ret.end(); itr++) { - switch (*itr) { - case '+': - ret.replace(itr, itr + 1, "."); - break; - case '/': - ret.replace(itr, itr + 1, "_"); - break; - default: - break; - } - } - - // padding by '-' - for (int i = 4 - ret.size() % 4; i; i--) { - ret.push_back('-'); - } - - return ret; -} - -char *ZTSClient::base64Decode(const char *input) { - if (input == NULL) { - return NULL; - } - - size_t length = strlen(input); - if (length == 0) { - return NULL; - } - - BIO *bio, *b64; - char *result = (char *)malloc(length); - - bio = BIO_new_mem_buf((void *)input, -1); - b64 = BIO_new(BIO_f_base64()); - bio = BIO_push(b64, bio); - - BIO_set_flags(bio, BIO_FLAGS_BASE64_NO_NL); - int decodeStrLen = BIO_read(bio, result, length); - BIO_free_all(bio); - if (decodeStrLen > 0) { - result[decodeStrLen] = '\0'; - return result; - } - free(result); - - return NULL; -} - -const std::string ZTSClient::getPrincipalToken() const { - // construct unsigned principal token - std::string unsignedTokenString = "v=S1"; - char host[BUFSIZ] = {}; - long long t = (long long)time(NULL); - - gethostname(host, sizeof(host)); - - unsignedTokenString += ";d=" + tenantDomain_; - unsignedTokenString += ";n=" + tenantService_; - unsignedTokenString += ";h=" + std::string(host); - unsignedTokenString += ";a=" + getSalt(); - unsignedTokenString += ";t=" + std::to_string(t); - unsignedTokenString += ";e=" + std::to_string(t + tokenExpirationTime_); - unsignedTokenString += ";k=" + keyId_; - - LOG_DEBUG("Created unsigned principal token: " << unsignedTokenString); - - // signing - const char *unsignedToken = unsignedTokenString.c_str(); - unsigned char signature[BUFSIZ] = {}; - unsigned char hash[SHA256_DIGEST_LENGTH] = {}; - unsigned int siglen; - FILE *fp; - RSA *privateKey; - - if (privateKeyUri_.scheme == "data") { - if (privateKeyUri_.mediaTypeAndEncodingType != "application/x-pem-file;base64") { - LOG_ERROR("Unsupported mediaType or encodingType: " << privateKeyUri_.mediaTypeAndEncodingType); - return ""; - } - char *decodeStr = base64Decode(privateKeyUri_.data.c_str()); - - if (decodeStr == NULL) { - LOG_ERROR("Failed to decode privateKey"); - return ""; - } - - BIO *bio = BIO_new_mem_buf((void *)decodeStr, -1); - BIO_set_flags(bio, BIO_FLAGS_BASE64_NO_NL); - if (bio == NULL) { - LOG_ERROR("Failed to create key BIO"); - free(decodeStr); - return ""; - } - privateKey = PEM_read_bio_RSAPrivateKey(bio, NULL, NULL, NULL); - BIO_free(bio); - free(decodeStr); - if (privateKey == NULL) { - LOG_ERROR("Failed to load privateKey"); - return ""; - } - } else if (privateKeyUri_.scheme == "file") { - fp = fopen(privateKeyUri_.path.c_str(), "r"); - if (fp == NULL) { - LOG_ERROR("Failed to open athenz private key file: " << privateKeyUri_.path); - return ""; - } - - privateKey = PEM_read_RSAPrivateKey(fp, NULL, NULL, NULL); - fclose(fp); - if (privateKey == NULL) { - LOG_ERROR("Failed to read private key: " << privateKeyUri_.path); - return ""; - } - } else { - LOG_ERROR("Unsupported URI Scheme: " << privateKeyUri_.scheme); - return ""; - } - - SHA256((unsigned char *)unsignedToken, unsignedTokenString.length(), hash); - RSA_sign(NID_sha256, hash, SHA256_DIGEST_LENGTH, signature, &siglen, privateKey); - - std::string principalToken = unsignedTokenString + ";s=" + ybase64Encode(signature, siglen); - LOG_DEBUG("Created signed principal token: " << principalToken); - - RSA_free(privateKey); - - return principalToken; -} - -static size_t curlWriteCallback(void *contents, size_t size, size_t nmemb, void *responseDataPtr) { - ((std::string *)responseDataPtr)->append((char *)contents, size * nmemb); - return size * nmemb; -} - -static std::mutex cacheMtx_; -const std::string ZTSClient::getRoleToken() const { - RoleToken roleToken; - std::string cacheKey = "p=" + tenantDomain_ + "." + tenantService_ + ";d=" + providerDomain_; - - // locked block - { - std::lock_guard lock(cacheMtx_); - roleToken = roleTokenCache_[cacheKey]; - } - - if (!roleToken.token.empty() && roleToken.expiryTime > (long long)time(NULL) + FETCH_EPSILON) { - LOG_DEBUG("Got cached role token " << roleToken.token); - return roleToken.token; - } - - std::string completeUrl = ztsUrl_ + "/zts/v1/domain/" + providerDomain_ + "/token"; - - CURL *handle; - CURLcode res; - std::string responseData; - - handle = curl_easy_init(); - - // set URL - curl_easy_setopt(handle, CURLOPT_URL, completeUrl.c_str()); - - // Write callback - curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, curlWriteCallback); - curl_easy_setopt(handle, CURLOPT_WRITEDATA, &responseData); - - // New connection is made for each call - curl_easy_setopt(handle, CURLOPT_FRESH_CONNECT, 1L); - curl_easy_setopt(handle, CURLOPT_FORBID_REUSE, 1L); - - // Skipping signal handling - results in timeouts not honored during the DNS lookup - curl_easy_setopt(handle, CURLOPT_NOSIGNAL, 1L); - - // Timer - curl_easy_setopt(handle, CURLOPT_TIMEOUT_MS, REQUEST_TIMEOUT); - - // Redirects - curl_easy_setopt(handle, CURLOPT_FOLLOWLOCATION, 1L); - curl_easy_setopt(handle, CURLOPT_MAXREDIRS, MAX_HTTP_REDIRECTS); - - // Fail if HTTP return code >= 400 - curl_easy_setopt(handle, CURLOPT_FAILONERROR, 1L); - - struct curl_slist *list = NULL; - std::string httpHeader = principalHeader_ + ": " + getPrincipalToken(); - list = curl_slist_append(list, httpHeader.c_str()); - curl_easy_setopt(handle, CURLOPT_HTTPHEADER, list); - - // Make get call to server - res = curl_easy_perform(handle); - - // Free header list - curl_slist_free_all(list); - - switch (res) { - case CURLE_OK: - long response_code; - curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &response_code); - LOG_DEBUG("Response received for url " << completeUrl << " code " << response_code); - if (response_code == 200) { - ptree::ptree root; - std::stringstream stream; - stream << responseData; - try { - ptree::read_json(stream, root); - } catch (ptree::json_parser_error &e) { - LOG_ERROR("Failed to parse json of ZTS response: " << e.what() - << "\nInput Json = " << responseData); - break; - } - - roleToken.token = root.get("token"); - roleToken.expiryTime = root.get("expiryTime"); - std::lock_guard lock(cacheMtx_); - roleTokenCache_[cacheKey] = roleToken; - LOG_DEBUG("Got role token " << roleToken.token) - } else { - LOG_ERROR("Response failed for url " << completeUrl << ". response Code " << response_code) - } - break; - default: - LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); - break; - } - curl_easy_cleanup(handle); - - return roleToken.token; -} - -const std::string ZTSClient::getHeader() const { return roleHeader_; } - -PrivateKeyUri ZTSClient::parseUri(const char *uri) { - PrivateKeyUri uriSt; - // scheme mediatype[;base64] path file - static const PULSAR_REGEX_NAMESPACE::regex expression( - R"(^(?:([A-Za-z]+):)(?:([/\w\-]+;\w+),([=\w]+))?(?:\/\/)?([^?#]+)?)"); - PULSAR_REGEX_NAMESPACE::cmatch groups; - if (PULSAR_REGEX_NAMESPACE::regex_match(uri, groups, expression)) { - uriSt.scheme = groups.str(1); - uriSt.mediaTypeAndEncodingType = groups.str(2); - uriSt.data = groups.str(3); - uriSt.path = groups.str(4); - } - return uriSt; -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/auth/athenz/ZTSClient.h b/pulsar-client-cpp/lib/auth/athenz/ZTSClient.h deleted file mode 100644 index fdc690c4851da..0000000000000 --- a/pulsar-client-cpp/lib/auth/athenz/ZTSClient.h +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include - -namespace pulsar { - -struct RoleToken { - std::string token; - long long expiryTime; -}; - -struct PrivateKeyUri { - std::string scheme; - std::string mediaTypeAndEncodingType; - std::string data; - std::string path; -}; - -class PULSAR_PUBLIC ZTSClient { - public: - ZTSClient(std::map& params); - const std::string getRoleToken() const; - const std::string getHeader() const; - ~ZTSClient(); - - private: - std::string tenantDomain_; - std::string tenantService_; - std::string providerDomain_; - PrivateKeyUri privateKeyUri_; - std::string ztsUrl_; - std::string keyId_; - std::string principalHeader_; - std::string roleHeader_; - int tokenExpirationTime_; - static std::map roleTokenCache_; - static std::string getSalt(); - static std::string ybase64Encode(const unsigned char* input, int length); - static char* base64Decode(const char* input); - const std::string getPrincipalToken() const; - static PrivateKeyUri parseUri(const char* uri); - - friend class ZTSClientWrapper; -}; -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/c/cStringList.cc b/pulsar-client-cpp/lib/c/cStringList.cc deleted file mode 100644 index cfe2c26be2282..0000000000000 --- a/pulsar-client-cpp/lib/c/cStringList.cc +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include "c_structs.h" - -pulsar_string_list_t *pulsar_string_list_create() { return new pulsar_string_list_t; } - -void pulsar_string_list_free(pulsar_string_list_t *list) { delete list; } - -int pulsar_string_list_size(pulsar_string_list_t *list) { return list->list.size(); } - -void pulsar_string_list_append(pulsar_string_list_t *list, const char *item) { list->list.push_back(item); } - -const char *pulsar_string_list_get(pulsar_string_list_t *list, int index) { - return list->list[index].c_str(); -} diff --git a/pulsar-client-cpp/lib/c/cStringMap.cc b/pulsar-client-cpp/lib/c/cStringMap.cc deleted file mode 100644 index 221dce4b57db2..0000000000000 --- a/pulsar-client-cpp/lib/c/cStringMap.cc +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include "c_structs.h" - -pulsar_string_map_t *pulsar_string_map_create() { return new pulsar_string_map_t; } - -void pulsar_string_map_free(pulsar_string_map_t *map) { delete map; } - -int pulsar_string_map_size(pulsar_string_map_t *map) { return map->map.size(); } - -void pulsar_string_map_put(pulsar_string_map_t *map, const char *key, const char *value) { - map->map[key] = value; -} - -const char *pulsar_string_map_get(pulsar_string_map_t *map, const char *key) { - std::map::iterator it = map->map.find(key); - - if (it == map->map.end()) { - return NULL; - } else { - return it->second.c_str(); - } -} - -const char *pulsar_string_map_get_key(pulsar_string_map_t *map, int idx) { - std::map::iterator it = map->map.begin(); - while (idx-- > 0) { - ++it; - } - - return it->first.c_str(); -} - -const char *pulsar_string_map_get_value(pulsar_string_map_t *map, int idx) { - std::map::iterator it = map->map.begin(); - while (idx-- > 0) { - ++it; - } - - return it->second.c_str(); -} \ No newline at end of file diff --git a/pulsar-client-cpp/lib/c/c_Authentication.cc b/pulsar-client-cpp/lib/c/c_Authentication.cc deleted file mode 100644 index 8384fac5f6b3d..0000000000000 --- a/pulsar-client-cpp/lib/c/c_Authentication.cc +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include - -#include "c_structs.h" - -#include - -pulsar_authentication_t *pulsar_authentication_create(const char *dynamicLibPath, - const char *authParamsString) { - pulsar_authentication_t *authentication = new pulsar_authentication_t; - authentication->auth = pulsar::AuthFactory::create(dynamicLibPath, authParamsString); - return authentication; -} - -void pulsar_authentication_free(pulsar_authentication_t *authentication) { delete authentication; } - -pulsar_authentication_t *pulsar_authentication_tls_create(const char *certificatePath, - const char *privateKeyPath) { - pulsar_authentication_t *authentication = new pulsar_authentication_t; - authentication->auth = pulsar::AuthTls::create(certificatePath, privateKeyPath); - return authentication; -} - -pulsar_authentication_t *pulsar_authentication_athenz_create(const char *authParamsString) { - pulsar_authentication_t *authentication = new pulsar_authentication_t; - authentication->auth = pulsar::AuthAthenz::create(authParamsString); - return authentication; -} - -pulsar_authentication_t *pulsar_authentication_token_create(const char *token) { - pulsar_authentication_t *authentication = new pulsar_authentication_t; - authentication->auth = pulsar::AuthToken::createWithToken(token); - return authentication; -} - -static std::string tokenSupplierWrapper(token_supplier supplier, void *ctx) { - const char *token = supplier(ctx); - std::string tokenStr = token; - free((void *)token); - return tokenStr; -} - -pulsar_authentication_t *pulsar_authentication_token_create_with_supplier(token_supplier tokenSupplier, - void *ctx) { - pulsar_authentication_t *authentication = new pulsar_authentication_t; - authentication->auth = pulsar::AuthToken::create(std::bind(&tokenSupplierWrapper, tokenSupplier, ctx)); - return authentication; -} - -pulsar_authentication_t *pulsar_authentication_oauth2_create(const char *authParamsString) { - pulsar_authentication_t *authentication = new pulsar_authentication_t; - authentication->auth = pulsar::AuthOauth2::create(authParamsString); - return authentication; -} - -pulsar_authentication_t *pulsar_authentication_basic_create(const char *username, const char *password) { - pulsar_authentication_t *authentication = new pulsar_authentication_t; - authentication->auth = pulsar::AuthBasic::create(username, password); - return authentication; -} diff --git a/pulsar-client-cpp/lib/c/c_Client.cc b/pulsar-client-cpp/lib/c/c_Client.cc deleted file mode 100644 index dd1b71e45c5a6..0000000000000 --- a/pulsar-client-cpp/lib/c/c_Client.cc +++ /dev/null @@ -1,243 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include "c_structs.h" - -pulsar_client_t *pulsar_client_create(const char *serviceUrl, - const pulsar_client_configuration_t *clientConfiguration) { - pulsar_client_t *c_client = new pulsar_client_t; - c_client->client.reset(new pulsar::Client(std::string(serviceUrl), clientConfiguration->conf)); - return c_client; -} - -void pulsar_client_free(pulsar_client_t *client) { delete client; } - -pulsar_result pulsar_client_create_producer(pulsar_client_t *client, const char *topic, - const pulsar_producer_configuration_t *conf, - pulsar_producer_t **c_producer) { - pulsar::Producer producer; - pulsar::Result res = client->client->createProducer(topic, conf->conf, producer); - if (res == pulsar::ResultOk) { - (*c_producer) = new pulsar_producer_t; - (*c_producer)->producer = producer; - return pulsar_result_Ok; - } else { - return (pulsar_result)res; - } -} - -static void handle_create_producer_callback(pulsar::Result result, pulsar::Producer producer, - pulsar_create_producer_callback callback, void *ctx) { - if (result == pulsar::ResultOk) { - pulsar_producer_t *c_producer = new pulsar_producer_t; - c_producer->producer = producer; - callback(pulsar_result_Ok, c_producer, ctx); - } else { - callback((pulsar_result)result, NULL, ctx); - } -} - -void pulsar_client_create_producer_async(pulsar_client_t *client, const char *topic, - const pulsar_producer_configuration_t *conf, - pulsar_create_producer_callback callback, void *ctx) { - client->client->createProducerAsync(topic, conf->conf, - std::bind(&handle_create_producer_callback, std::placeholders::_1, - std::placeholders::_2, callback, ctx)); -} - -pulsar_result pulsar_client_subscribe(pulsar_client_t *client, const char *topic, - const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_consumer_t **c_consumer) { - pulsar::Consumer consumer; - pulsar::Result res = - client->client->subscribe(topic, subscriptionName, conf->consumerConfiguration, consumer); - if (res == pulsar::ResultOk) { - (*c_consumer) = new pulsar_consumer_t; - (*c_consumer)->consumer = consumer; - return pulsar_result_Ok; - } else { - return (pulsar_result)res; - } -} - -static void handle_subscribe_callback(pulsar::Result result, pulsar::Consumer consumer, - pulsar_subscribe_callback callback, void *ctx) { - if (result == pulsar::ResultOk) { - pulsar_consumer_t *c_consumer = new pulsar_consumer_t; - c_consumer->consumer = consumer; - callback(pulsar_result_Ok, c_consumer, ctx); - } else { - callback((pulsar_result)result, NULL, ctx); - } -} - -void pulsar_client_subscribe_async(pulsar_client_t *client, const char *topic, const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_subscribe_callback callback, void *ctx) { - client->client->subscribeAsync( - topic, subscriptionName, conf->consumerConfiguration, - std::bind(&handle_subscribe_callback, std::placeholders::_1, std::placeholders::_2, callback, ctx)); -} - -pulsar_result pulsar_client_subscribe_multi_topics(pulsar_client_t *client, const char **topics, - int topicsCount, const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_consumer_t **c_consumer) { - pulsar::Consumer consumer; - std::vector topicsList; - for (int i = 0; i < topicsCount; i++) { - topicsList.push_back(topics[i]); - } - - pulsar::Result res = - client->client->subscribe(topicsList, subscriptionName, conf->consumerConfiguration, consumer); - if (res == pulsar::ResultOk) { - (*c_consumer) = new pulsar_consumer_t; - (*c_consumer)->consumer = consumer; - return pulsar_result_Ok; - } else { - return (pulsar_result)res; - } -} - -void pulsar_client_subscribe_multi_topics_async(pulsar_client_t *client, const char **topics, int topicsCount, - const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_subscribe_callback callback, void *ctx) { - std::vector topicsList; - for (int i = 0; i < topicsCount; i++) { - topicsList.push_back(topics[i]); - } - - client->client->subscribeAsync( - topicsList, subscriptionName, conf->consumerConfiguration, - std::bind(&handle_subscribe_callback, std::placeholders::_1, std::placeholders::_2, callback, ctx)); -} - -pulsar_result pulsar_client_subscribe_pattern(pulsar_client_t *client, const char *topicPattern, - const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_consumer_t **c_consumer) { - pulsar::Consumer consumer; - pulsar::Result res = client->client->subscribeWithRegex(topicPattern, subscriptionName, - conf->consumerConfiguration, consumer); - if (res == pulsar::ResultOk) { - (*c_consumer) = new pulsar_consumer_t; - (*c_consumer)->consumer = consumer; - return pulsar_result_Ok; - } else { - return (pulsar_result)res; - } -} - -void pulsar_client_subscribe_pattern_async(pulsar_client_t *client, const char *topicPattern, - const char *subscriptionName, - const pulsar_consumer_configuration_t *conf, - pulsar_subscribe_callback callback, void *ctx) { - client->client->subscribeWithRegexAsync( - topicPattern, subscriptionName, conf->consumerConfiguration, - std::bind(&handle_subscribe_callback, std::placeholders::_1, std::placeholders::_2, callback, ctx)); -} - -pulsar_result pulsar_client_create_reader(pulsar_client_t *client, const char *topic, - const pulsar_message_id_t *startMessageId, - pulsar_reader_configuration_t *conf, pulsar_reader_t **c_reader) { - pulsar::Reader reader; - pulsar::Result res = client->client->createReader(topic, startMessageId->messageId, conf->conf, reader); - if (res == pulsar::ResultOk) { - (*c_reader) = new pulsar_reader_t; - (*c_reader)->reader = reader; - return pulsar_result_Ok; - } else { - return (pulsar_result)res; - } -} - -static void handle_reader_callback(pulsar::Result result, pulsar::Reader reader, - pulsar_reader_callback callback, void *ctx) { - if (result == pulsar::ResultOk) { - pulsar_reader_t *c_reader = new pulsar_reader_t; - c_reader->reader = reader; - callback(pulsar_result_Ok, c_reader, ctx); - } else { - callback((pulsar_result)result, NULL, ctx); - } -} - -void pulsar_client_create_reader_async(pulsar_client_t *client, const char *topic, - const pulsar_message_id_t *startMessageId, - pulsar_reader_configuration_t *conf, pulsar_reader_callback callback, - void *ctx) { - client->client->createReaderAsync( - topic, startMessageId->messageId, conf->conf, - std::bind(&handle_reader_callback, std::placeholders::_1, std::placeholders::_2, callback, ctx)); -} - -pulsar_result pulsar_client_get_topic_partitions(pulsar_client_t *client, const char *topic, - pulsar_string_list_t **partitions) { - std::vector partitionsList; - pulsar::Result res = client->client->getPartitionsForTopic(topic, partitionsList); - if (res == pulsar::ResultOk) { - (*partitions) = pulsar_string_list_create(); - - for (int i = 0; i < partitionsList.size(); i++) { - pulsar_string_list_append(*partitions, partitionsList[i].c_str()); - } - - return pulsar_result_Ok; - } else { - return (pulsar_result)res; - } -} - -static void handle_get_partitions_callback(pulsar::Result result, - const std::vector &partitionsList, - pulsar_get_partitions_callback callback, void *ctx) { - if (result == pulsar::ResultOk) { - pulsar_string_list_t *partitions = pulsar_string_list_create(); - - for (int i = 0; i < partitionsList.size(); i++) { - pulsar_string_list_append(partitions, partitionsList[i].c_str()); - } - - callback((pulsar_result)result, partitions, ctx); - } else { - callback((pulsar_result)result, NULL, ctx); - } -} - -void pulsar_client_get_topic_partitions_async(pulsar_client_t *client, const char *topic, - pulsar_get_partitions_callback callback, void *ctx) { - client->client->getPartitionsForTopicAsync( - topic, std::bind(&handle_get_partitions_callback, std::placeholders::_1, std::placeholders::_2, - callback, ctx)); -} - -pulsar_result pulsar_client_close(pulsar_client_t *client) { return (pulsar_result)client->client->close(); } - -static void handle_client_close(pulsar::Result result, pulsar_close_callback callback, void *ctx) { - callback((pulsar_result)result, ctx); -} - -void pulsar_client_close_async(pulsar_client_t *client, pulsar_close_callback callback, void *ctx) { - client->client->closeAsync(std::bind(handle_client_close, std::placeholders::_1, callback, ctx)); -} diff --git a/pulsar-client-cpp/lib/c/c_ClientConfiguration.cc b/pulsar-client-cpp/lib/c/c_ClientConfiguration.cc deleted file mode 100644 index 8f4051d79f129..0000000000000 --- a/pulsar-client-cpp/lib/c/c_ClientConfiguration.cc +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include "c_structs.h" - -pulsar_client_configuration_t *pulsar_client_configuration_create() { - pulsar_client_configuration_t *c_conf = new pulsar_client_configuration_t; - c_conf->conf = pulsar::ClientConfiguration(); - return c_conf; -} - -void pulsar_client_configuration_free(pulsar_client_configuration_t *conf) { delete conf; } - -void pulsar_client_configuration_set_auth(pulsar_client_configuration_t *conf, - pulsar_authentication_t *authentication) { - conf->conf.setAuth(authentication->auth); -} - -void pulsar_client_configuration_set_operation_timeout_seconds(pulsar_client_configuration_t *conf, - int timeout) { - conf->conf.setOperationTimeoutSeconds(timeout); -} - -int pulsar_client_configuration_get_operation_timeout_seconds(pulsar_client_configuration_t *conf) { - return conf->conf.getOperationTimeoutSeconds(); -} - -void pulsar_client_configuration_set_io_threads(pulsar_client_configuration_t *conf, int threads) { - conf->conf.setIOThreads(threads); -} - -int pulsar_client_configuration_get_io_threads(pulsar_client_configuration_t *conf) { - return conf->conf.getIOThreads(); -} - -void pulsar_client_configuration_set_message_listener_threads(pulsar_client_configuration_t *conf, - int threads) { - conf->conf.setMessageListenerThreads(threads); -} - -int pulsar_client_configuration_get_message_listener_threads(pulsar_client_configuration_t *conf) { - return conf->conf.getMessageListenerThreads(); -} - -void pulsar_client_configuration_set_concurrent_lookup_request(pulsar_client_configuration_t *conf, - int concurrentLookupRequest) { - conf->conf.setConcurrentLookupRequest(concurrentLookupRequest); -} - -int pulsar_client_configuration_get_concurrent_lookup_request(pulsar_client_configuration_t *conf) { - return conf->conf.getConcurrentLookupRequest(); -} - -class PulsarCLogger : public pulsar::Logger { - std::string file_; - pulsar_logger logger_; - void *ctx_; - - public: - PulsarCLogger(const std::string &file, pulsar_logger logger, void *ctx) - : file_(file), logger_(logger), ctx_(ctx) {} - - bool isEnabled(Level level) { return level >= pulsar::Logger::LEVEL_INFO; } - - void log(Level level, int line, const std::string &message) { - logger_((pulsar_logger_level_t)level, file_.c_str(), line, message.c_str(), ctx_); - } -}; - -class PulsarCLoggerFactory : public pulsar::LoggerFactory { - pulsar_logger logger_; - void *ctx_; - - public: - PulsarCLoggerFactory(pulsar_logger logger, void *ctx) : logger_(logger), ctx_(ctx) {} - - pulsar::Logger *getLogger(const std::string &fileName) { - return new PulsarCLogger(fileName, logger_, ctx_); - } -}; - -void pulsar_client_configuration_set_logger(pulsar_client_configuration_t *conf, pulsar_logger logger, - void *ctx) { - conf->conf.setLogger(new PulsarCLoggerFactory(logger, ctx)); -} - -void pulsar_client_configuration_set_use_tls(pulsar_client_configuration_t *conf, int useTls) { - conf->conf.setUseTls(useTls); -} - -int pulsar_client_configuration_is_use_tls(pulsar_client_configuration_t *conf) { - return conf->conf.isUseTls(); -} - -void pulsar_client_configuration_set_validate_hostname(pulsar_client_configuration_t *conf, - int validateHostName) { - conf->conf.setValidateHostName(validateHostName); -} - -int pulsar_client_configuration_is_validate_hostname(pulsar_client_configuration_t *conf) { - return conf->conf.isValidateHostName(); -} - -void pulsar_client_configuration_set_tls_trust_certs_file_path(pulsar_client_configuration_t *conf, - const char *tlsTrustCertsFilePath) { - conf->conf.setTlsTrustCertsFilePath(tlsTrustCertsFilePath); -} - -const char *pulsar_client_configuration_get_tls_trust_certs_file_path(pulsar_client_configuration_t *conf) { - return conf->conf.getTlsTrustCertsFilePath().c_str(); -} - -void pulsar_client_configuration_set_tls_allow_insecure_connection(pulsar_client_configuration_t *conf, - int allowInsecure) { - conf->conf.setTlsAllowInsecureConnection(allowInsecure); -} - -int pulsar_client_configuration_is_tls_allow_insecure_connection(pulsar_client_configuration_t *conf) { - return conf->conf.isTlsAllowInsecureConnection(); -} - -void pulsar_client_configuration_set_stats_interval_in_seconds(pulsar_client_configuration_t *conf, - const unsigned int interval) { - conf->conf.setStatsIntervalInSeconds(interval); -} - -const unsigned int pulsar_client_configuration_get_stats_interval_in_seconds( - pulsar_client_configuration_t *conf) { - return conf->conf.getStatsIntervalInSeconds(); -} - -void pulsar_client_configuration_set_memory_limit(pulsar_client_configuration_t *conf, - unsigned long long memoryLimitBytes) { - conf->conf.setMemoryLimit(memoryLimitBytes); -} - -/** - * @return the client memory limit in bytes - */ -unsigned long long pulsar_client_configuration_get_memory_limit(pulsar_client_configuration_t *conf) { - return conf->conf.getMemoryLimit(); -} diff --git a/pulsar-client-cpp/lib/c/c_Consumer.cc b/pulsar-client-cpp/lib/c/c_Consumer.cc deleted file mode 100644 index 062c801fb9835..0000000000000 --- a/pulsar-client-cpp/lib/c/c_Consumer.cc +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include "c_structs.h" - -const char *pulsar_consumer_get_topic(pulsar_consumer_t *consumer) { - return consumer->consumer.getTopic().c_str(); -} - -const char *pulsar_consumer_get_subscription_name(pulsar_consumer_t *consumer) { - return consumer->consumer.getSubscriptionName().c_str(); -} - -pulsar_result pulsar_consumer_unsubscribe(pulsar_consumer_t *consumer) { - return (pulsar_result)consumer->consumer.unsubscribe(); -} - -void pulsar_consumer_unsubscribe_async(pulsar_consumer_t *consumer, pulsar_result_callback callback, - void *ctx) { - consumer->consumer.unsubscribeAsync( - std::bind(handle_result_callback, std::placeholders::_1, callback, ctx)); -} - -pulsar_result pulsar_consumer_receive(pulsar_consumer_t *consumer, pulsar_message_t **msg) { - pulsar::Message message; - pulsar::Result res = consumer->consumer.receive(message); - if (res == pulsar::ResultOk) { - (*msg) = new pulsar_message_t; - (*msg)->message = message; - } - return (pulsar_result)res; -} - -pulsar_result pulsar_consumer_receive_with_timeout(pulsar_consumer_t *consumer, pulsar_message_t **msg, - int timeoutMs) { - pulsar::Message message; - pulsar::Result res = consumer->consumer.receive(message, timeoutMs); - if (res == pulsar::ResultOk) { - (*msg) = new pulsar_message_t; - (*msg)->message = message; - } - return (pulsar_result)res; -} - -static void handle_receive_callback(pulsar::Result result, pulsar::Message message, - pulsar_receive_callback callback, void *ctx) { - if (callback) { - pulsar_message_t *msg = new pulsar_message_t; - msg->message = message; - callback((pulsar_result)result, msg, ctx); - } -} - -void pulsar_consumer_receive_async(pulsar_consumer_t *consumer, pulsar_receive_callback callback, void *ctx) { - consumer->consumer.receiveAsync( - std::bind(handle_receive_callback, std::placeholders::_1, std::placeholders::_2, callback, ctx)); -} - -pulsar_result pulsar_consumer_acknowledge(pulsar_consumer_t *consumer, pulsar_message_t *message) { - return (pulsar_result)consumer->consumer.acknowledge(message->message); -} - -pulsar_result pulsar_consumer_acknowledge_id(pulsar_consumer_t *consumer, pulsar_message_id_t *messageId) { - return (pulsar_result)consumer->consumer.acknowledge(messageId->messageId); -} - -void pulsar_consumer_acknowledge_async(pulsar_consumer_t *consumer, pulsar_message_t *message, - pulsar_result_callback callback, void *ctx) { - consumer->consumer.acknowledgeAsync( - message->message, std::bind(handle_result_callback, std::placeholders::_1, callback, ctx)); -} - -void pulsar_consumer_acknowledge_async_id(pulsar_consumer_t *consumer, pulsar_message_id_t *messageId, - pulsar_result_callback callback, void *ctx) { - consumer->consumer.acknowledgeAsync( - messageId->messageId, std::bind(handle_result_callback, std::placeholders::_1, callback, ctx)); -} - -pulsar_result pulsar_consumer_acknowledge_cumulative(pulsar_consumer_t *consumer, pulsar_message_t *message) { - return (pulsar_result)consumer->consumer.acknowledgeCumulative(message->message); -} - -pulsar_result pulsar_consumer_acknowledge_cumulative_id(pulsar_consumer_t *consumer, - pulsar_message_id_t *messageId) { - return (pulsar_result)consumer->consumer.acknowledge(messageId->messageId); -} - -void pulsar_consumer_acknowledge_cumulative_async(pulsar_consumer_t *consumer, pulsar_message_t *message, - pulsar_result_callback callback, void *ctx) { - consumer->consumer.acknowledgeCumulativeAsync( - message->message, std::bind(handle_result_callback, std::placeholders::_1, callback, ctx)); -} - -void pulsar_consumer_acknowledge_cumulative_async_id(pulsar_consumer_t *consumer, - pulsar_message_id_t *messageId, - pulsar_result_callback callback, void *ctx) { - consumer->consumer.acknowledgeCumulativeAsync( - messageId->messageId, std::bind(handle_result_callback, std::placeholders::_1, callback, ctx)); -} - -void pulsar_consumer_negative_acknowledge(pulsar_consumer_t *consumer, pulsar_message_t *message) { - consumer->consumer.negativeAcknowledge(message->message); -} - -void pulsar_consumer_negative_acknowledge_id(pulsar_consumer_t *consumer, pulsar_message_id_t *messageId) { - consumer->consumer.negativeAcknowledge(messageId->messageId); -} - -pulsar_result pulsar_consumer_close(pulsar_consumer_t *consumer) { - return (pulsar_result)consumer->consumer.close(); -} - -void pulsar_consumer_close_async(pulsar_consumer_t *consumer, pulsar_result_callback callback, void *ctx) { - consumer->consumer.closeAsync(std::bind(handle_result_callback, std::placeholders::_1, callback, ctx)); -} - -void pulsar_consumer_free(pulsar_consumer_t *consumer) { delete consumer; } - -pulsar_result pulsar_consumer_pause_message_listener(pulsar_consumer_t *consumer) { - return (pulsar_result)consumer->consumer.pauseMessageListener(); -} - -pulsar_result resume_message_listener(pulsar_consumer_t *consumer) { - return (pulsar_result)consumer->consumer.resumeMessageListener(); -} - -void pulsar_consumer_redeliver_unacknowledged_messages(pulsar_consumer_t *consumer) { - return consumer->consumer.redeliverUnacknowledgedMessages(); -} - -void pulsar_consumer_seek_async(pulsar_consumer_t *consumer, pulsar_message_id_t *messageId, - pulsar_result_callback callback, void *ctx) { - consumer->consumer.seekAsync(messageId->messageId, - std::bind(handle_result_callback, std::placeholders::_1, callback, ctx)); -} - -pulsar_result pulsar_consumer_seek(pulsar_consumer_t *consumer, pulsar_message_id_t *messageId) { - return (pulsar_result)consumer->consumer.seek(messageId->messageId); -} - -int pulsar_consumer_is_connected(pulsar_consumer_t *consumer) { return consumer->consumer.isConnected(); } - -pulsar_result pulsar_consumer_get_last_message_id(pulsar_consumer_t *consumer, - pulsar_message_id_t *messageId) { - return (pulsar_result)consumer->consumer.getLastMessageId(messageId->messageId); -} diff --git a/pulsar-client-cpp/lib/c/c_ConsumerConfiguration.cc b/pulsar-client-cpp/lib/c/c_ConsumerConfiguration.cc deleted file mode 100644 index 8e1d1ae6617a6..0000000000000 --- a/pulsar-client-cpp/lib/c/c_ConsumerConfiguration.cc +++ /dev/null @@ -1,219 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include - -#include "c_structs.h" - -pulsar_consumer_configuration_t *pulsar_consumer_configuration_create() { - return new pulsar_consumer_configuration_t; -} - -void pulsar_consumer_configuration_free(pulsar_consumer_configuration_t *consumer_configuration) { - delete consumer_configuration; -} - -void pulsar_consumer_configuration_set_consumer_type(pulsar_consumer_configuration_t *consumer_configuration, - pulsar_consumer_type consumerType) { - consumer_configuration->consumerConfiguration.setConsumerType((pulsar::ConsumerType)consumerType); -} - -pulsar_consumer_type pulsar_consumer_configuration_get_consumer_type( - pulsar_consumer_configuration_t *consumer_configuration) { - return (pulsar_consumer_type)consumer_configuration->consumerConfiguration.getConsumerType(); -} - -void pulsar_consumer_configuration_set_schema_info(pulsar_consumer_configuration_t *consumer_configuration, - pulsar_schema_type schemaType, const char *name, - const char *schema, pulsar_string_map_t *properties) { - auto schemaInfo = pulsar::SchemaInfo((pulsar::SchemaType)schemaType, name, schema, properties->map); - consumer_configuration->consumerConfiguration.setSchema(schemaInfo); -} - -static void message_listener_callback(pulsar::Consumer consumer, const pulsar::Message &msg, - pulsar_message_listener listener, void *ctx) { - pulsar_consumer_t c_consumer; - c_consumer.consumer = consumer; - pulsar_message_t *message = new pulsar_message_t; - message->message = msg; - listener(&c_consumer, message, ctx); -} - -void pulsar_consumer_configuration_set_message_listener( - pulsar_consumer_configuration_t *consumer_configuration, pulsar_message_listener messageListener, - void *ctx) { - consumer_configuration->consumerConfiguration.setMessageListener(std::bind( - message_listener_callback, std::placeholders::_1, std::placeholders::_2, messageListener, ctx)); -} - -int pulsar_consumer_configuration_has_message_listener( - pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.hasMessageListener(); -} - -void pulsar_consumer_configuration_set_receiver_queue_size( - pulsar_consumer_configuration_t *consumer_configuration, int size) { - consumer_configuration->consumerConfiguration.setReceiverQueueSize(size); -} - -int pulsar_consumer_configuration_get_receiver_queue_size( - pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.getReceiverQueueSize(); -} - -void pulsar_consumer_set_max_total_receiver_queue_size_across_partitions( - pulsar_consumer_configuration_t *consumer_configuration, int maxTotalReceiverQueueSizeAcrossPartitions) { - consumer_configuration->consumerConfiguration.setMaxTotalReceiverQueueSizeAcrossPartitions( - maxTotalReceiverQueueSizeAcrossPartitions); -} - -int pulsar_consumer_get_max_total_receiver_queue_size_across_partitions( - pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.getMaxTotalReceiverQueueSizeAcrossPartitions(); -} - -void pulsar_consumer_set_consumer_name(pulsar_consumer_configuration_t *consumer_configuration, - const char *consumerName) { - consumer_configuration->consumerConfiguration.setConsumerName(consumerName); -} - -const char *pulsar_consumer_get_consumer_name(pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.getConsumerName().c_str(); -} - -void pulsar_consumer_set_unacked_messages_timeout_ms(pulsar_consumer_configuration_t *consumer_configuration, - const uint64_t milliSeconds) { - consumer_configuration->consumerConfiguration.setUnAckedMessagesTimeoutMs(milliSeconds); -} - -long pulsar_consumer_get_unacked_messages_timeout_ms( - pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.getUnAckedMessagesTimeoutMs(); -} - -void pulsar_configure_set_negative_ack_redelivery_delay_ms( - pulsar_consumer_configuration_t *consumer_configuration, long redeliveryDelayMillis) { - consumer_configuration->consumerConfiguration.setNegativeAckRedeliveryDelayMs(redeliveryDelayMillis); -} - -long pulsar_configure_get_negative_ack_redelivery_delay_ms( - pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.getNegativeAckRedeliveryDelayMs(); -} - -void pulsar_configure_set_ack_grouping_time_ms(pulsar_consumer_configuration_t *consumer_configuration, - long ackGroupingMillis) { - consumer_configuration->consumerConfiguration.setAckGroupingTimeMs(ackGroupingMillis); -} - -long pulsar_configure_get_ack_grouping_time_ms(pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.getAckGroupingTimeMs(); -} - -void pulsar_configure_set_ack_grouping_max_size(pulsar_consumer_configuration_t *consumer_configuration, - long maxGroupingSize) { - consumer_configuration->consumerConfiguration.setAckGroupingMaxSize(maxGroupingSize); -} - -long pulsar_configure_get_ack_grouping_max_size(pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.getAckGroupingMaxSize(); -} - -int pulsar_consumer_is_encryption_enabled(pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.isEncryptionEnabled(); -} - -void pulsar_consumer_configuration_set_default_crypto_key_reader( - pulsar_consumer_configuration_t *consumer_configuration, const char *public_key_path, - const char *private_key_path) { - std::shared_ptr keyReader = - std::make_shared(public_key_path, private_key_path); - consumer_configuration->consumerConfiguration.setCryptoKeyReader(keyReader); -} - -pulsar_consumer_crypto_failure_action pulsar_consumer_configuration_get_crypto_failure_action( - pulsar_consumer_configuration_t *consumer_configuration) { - return (pulsar_consumer_crypto_failure_action) - consumer_configuration->consumerConfiguration.getCryptoFailureAction(); -} - -void pulsar_consumer_configuration_set_crypto_failure_action( - pulsar_consumer_configuration_t *consumer_configuration, - pulsar_consumer_crypto_failure_action cryptoFailureAction) { - consumer_configuration->consumerConfiguration.setCryptoFailureAction( - (pulsar::ConsumerCryptoFailureAction)cryptoFailureAction); -} - -int pulsar_consumer_is_read_compacted(pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.isReadCompacted(); -} - -void pulsar_consumer_set_read_compacted(pulsar_consumer_configuration_t *consumer_configuration, - int compacted) { - consumer_configuration->consumerConfiguration.setReadCompacted(compacted); -} - -void pulsar_consumer_configuration_set_property(pulsar_consumer_configuration_t *conf, const char *name, - const char *value) { - conf->consumerConfiguration.setProperty(name, value); -} - -void pulsar_consumer_set_subscription_initial_position( - pulsar_consumer_configuration_t *consumer_configuration, initial_position subscriptionInitialPosition) { - consumer_configuration->consumerConfiguration.setSubscriptionInitialPosition( - (pulsar::InitialPosition)subscriptionInitialPosition); -} - -int pulsar_consumer_get_subscription_initial_position( - pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.getSubscriptionInitialPosition(); -} - -void pulsar_consumer_configuration_set_priority_level(pulsar_consumer_configuration_t *consumer_configuration, - int priority_level) { - consumer_configuration->consumerConfiguration.setPriorityLevel(priority_level); -} - -int pulsar_consumer_configuration_get_priority_level( - pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.getPriorityLevel(); -} - -void pulsar_consumer_configuration_set_max_pending_chunked_message( - pulsar_consumer_configuration_t *consumer_configuration, int max_pending_chunked_message) { - consumer_configuration->consumerConfiguration.setMaxPendingChunkedMessage(max_pending_chunked_message); -} - -int pulsar_consumer_configuration_get_max_pending_chunked_message( - pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.getMaxPendingChunkedMessage(); -} - -void pulsar_consumer_configuration_set_auto_ack_oldest_chunked_message_on_queue_full( - pulsar_consumer_configuration_t *consumer_configuration, - int auto_ack_oldest_chunked_message_on_queue_full) { - consumer_configuration->consumerConfiguration.setAutoAckOldestChunkedMessageOnQueueFull( - auto_ack_oldest_chunked_message_on_queue_full); -} - -int pulsar_consumer_configuration_is_auto_ack_oldest_chunked_message_on_queue_full( - pulsar_consumer_configuration_t *consumer_configuration) { - return consumer_configuration->consumerConfiguration.isAutoAckOldestChunkedMessageOnQueueFull(); -} diff --git a/pulsar-client-cpp/lib/c/c_Message.cc b/pulsar-client-cpp/lib/c/c_Message.cc deleted file mode 100644 index 4fe4c391fcbad..0000000000000 --- a/pulsar-client-cpp/lib/c/c_Message.cc +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include "c_structs.h" - -pulsar_message_t *pulsar_message_create() { return new pulsar_message_t; } - -void pulsar_message_free(pulsar_message_t *message) { delete message; } - -void pulsar_message_set_content(pulsar_message_t *message, const void *data, size_t size) { - message->builder.setContent(data, size); -} - -void pulsar_message_set_allocated_content(pulsar_message_t *message, void *data, size_t size) { - message->builder.setAllocatedContent(data, size); -} - -void pulsar_message_set_property(pulsar_message_t *message, const char *name, const char *value) { - message->builder.setProperty(name, value); -} - -void pulsar_message_set_partition_key(pulsar_message_t *message, const char *partitionKey) { - message->builder.setPartitionKey(partitionKey); -} - -void pulsar_message_set_ordering_key(pulsar_message_t *message, const char *orderingKey) { - message->builder.setOrderingKey(orderingKey); -} - -void pulsar_message_set_event_timestamp(pulsar_message_t *message, uint64_t eventTimestamp) { - message->builder.setEventTimestamp(eventTimestamp); -} - -void pulsar_message_set_sequence_id(pulsar_message_t *message, int64_t sequenceId) { - message->builder.setSequenceId(sequenceId); -} - -void pulsar_message_set_deliver_after(pulsar_message_t *message, uint64_t delayMillis) { - message->builder.setDeliverAfter(std::chrono::milliseconds(delayMillis)); -} - -void pulsar_message_set_deliver_at(pulsar_message_t *message, uint64_t deliveryTimestampMillis) { - message->builder.setDeliverAt(deliveryTimestampMillis); -} - -void pulsar_message_set_replication_clusters(pulsar_message_t *message, const char **clusters, size_t size) { - const char **c = clusters; - std::vector clustersList; - for (size_t i = 0; i < size; i++) { - clustersList.push_back(*c); - ++c; - } - - message->builder.setReplicationClusters(clustersList); -} - -void pulsar_message_disable_replication(pulsar_message_t *message, int flag) { - message->builder.disableReplication(flag); -} - -int pulsar_message_has_property(pulsar_message_t *message, const char *name) { - return message->message.hasProperty(name); -} - -const char *pulsar_message_get_property(pulsar_message_t *message, const char *name) { - return message->message.getProperty(name).c_str(); -} - -const void *pulsar_message_get_data(pulsar_message_t *message) { return message->message.getData(); } - -uint32_t pulsar_message_get_length(pulsar_message_t *message) { return message->message.getLength(); } - -pulsar_message_id_t *pulsar_message_get_message_id(pulsar_message_t *message) { - pulsar_message_id_t *messageId = new pulsar_message_id_t; - messageId->messageId = message->message.getMessageId(); - return messageId; -} - -const char *pulsar_message_get_partitionKey(pulsar_message_t *message) { - return message->message.getPartitionKey().c_str(); -} - -int pulsar_message_has_partition_key(pulsar_message_t *message) { return message->message.hasPartitionKey(); } - -const char *pulsar_message_get_orderingKey(pulsar_message_t *message) { - return message->message.getOrderingKey().c_str(); -} - -int pulsar_message_has_ordering_key(pulsar_message_t *message) { return message->message.hasOrderingKey(); } - -uint64_t pulsar_message_get_publish_timestamp(pulsar_message_t *message) { - return message->message.getPublishTimestamp(); -} - -uint64_t pulsar_message_get_event_timestamp(pulsar_message_t *message) { - return message->message.getEventTimestamp(); -} - -pulsar_string_map_t *pulsar_message_get_properties(pulsar_message_t *message) { - pulsar_string_map_t *map = pulsar_string_map_create(); - map->map = message->message.getProperties(); - return map; -} - -const char *pulsar_message_get_topic_name(pulsar_message_t *message) { - return message->message.getTopicName().c_str(); -} - -int pulsar_message_get_redelivery_count(pulsar_message_t *message) { - return message->message.getRedeliveryCount(); -} diff --git a/pulsar-client-cpp/lib/c/c_MessageId.cc b/pulsar-client-cpp/lib/c/c_MessageId.cc deleted file mode 100644 index 537bb709e3b2e..0000000000000 --- a/pulsar-client-cpp/lib/c/c_MessageId.cc +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include "c_structs.h" - -#include -#include - -std::once_flag initialized; - -static pulsar_message_id_t earliest; -static pulsar_message_id_t latest; - -static void initialize() { - earliest.messageId = pulsar::MessageId::earliest(); - latest.messageId = pulsar::MessageId::latest(); -} - -const pulsar_message_id_t *pulsar_message_id_earliest() { - std::call_once(initialized, &initialize); - return &earliest; -} - -const pulsar_message_id_t *pulsar_message_id_latest() { - std::call_once(initialized, &initialize); - return &latest; -} - -void *pulsar_message_id_serialize(pulsar_message_id_t *messageId, int *len) { - std::string str; - messageId->messageId.serialize(str); - void *p = malloc(str.length()); - memcpy(p, str.c_str(), str.length()); - *len = str.length(); - return p; -} - -pulsar_message_id_t *pulsar_message_id_deserialize(const void *buffer, uint32_t len) { - std::string strId((const char *)buffer, len); - pulsar_message_id_t *messageId = new pulsar_message_id_t; - messageId->messageId = pulsar::MessageId::deserialize(strId); - return messageId; -} - -char *pulsar_message_id_str(pulsar_message_id_t *messageId) { - std::stringstream ss; - ss << messageId->messageId; - std::string s = ss.str(); - -#ifdef _MSC_VER - // strndup is not available in MSVC - char *sdup = (char *)malloc(s.length() + 1); - memcpy(sdup, s.c_str(), s.length()); - sdup[s.length()] = '\0'; - return sdup; -#else - return strndup(s.c_str(), s.length()); -#endif -} - -void pulsar_message_id_free(pulsar_message_id_t *messageId) { delete messageId; } diff --git a/pulsar-client-cpp/lib/c/c_MessageRouter.cc b/pulsar-client-cpp/lib/c/c_MessageRouter.cc deleted file mode 100644 index 3184357e9a7d7..0000000000000 --- a/pulsar-client-cpp/lib/c/c_MessageRouter.cc +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include "c_structs.h" - -int pulsar_topic_metadata_get_num_partitions(pulsar_topic_metadata_t *topicMetadata) { - return topicMetadata->metadata->getNumPartitions(); -} diff --git a/pulsar-client-cpp/lib/c/c_Producer.cc b/pulsar-client-cpp/lib/c/c_Producer.cc deleted file mode 100644 index d53c162977a06..0000000000000 --- a/pulsar-client-cpp/lib/c/c_Producer.cc +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include "c_structs.h" - -const char *pulsar_producer_get_topic(pulsar_producer_t *producer) { - return producer->producer.getTopic().c_str(); -} - -const char *pulsar_producer_get_producer_name(pulsar_producer_t *producer) { - return producer->producer.getProducerName().c_str(); -} - -void pulsar_producer_free(pulsar_producer_t *producer) { delete producer; } - -pulsar_result pulsar_producer_send(pulsar_producer_t *producer, pulsar_message_t *msg) { - msg->message = msg->builder.build(); - return (pulsar_result)producer->producer.send(msg->message); -} - -static void handle_producer_send(pulsar::Result result, pulsar::MessageId messageId, - pulsar_send_callback callback, void *ctx) { - if (result == pulsar::ResultOk) { - pulsar_message_id_t *c_message_id = new pulsar_message_id_t; - c_message_id->messageId = messageId; - callback(pulsar_result_Ok, c_message_id, ctx); - } else { - callback((pulsar_result)result, NULL, ctx); - } -} - -void pulsar_producer_send_async(pulsar_producer_t *producer, pulsar_message_t *msg, - pulsar_send_callback callback, void *ctx) { - msg->message = msg->builder.build(); - producer->producer.sendAsync(msg->message, std::bind(&handle_producer_send, std::placeholders::_1, - std::placeholders::_2, callback, ctx)); -} - -int64_t pulsar_producer_get_last_sequence_id(pulsar_producer_t *producer) { - return producer->producer.getLastSequenceId(); -} - -pulsar_result pulsar_producer_close(pulsar_producer_t *producer) { - return (pulsar_result)producer->producer.close(); -} - -void pulsar_producer_close_async(pulsar_producer_t *producer, pulsar_close_callback callback, void *ctx) { - producer->producer.closeAsync(std::bind(handle_result_callback, std::placeholders::_1, callback, ctx)); -} - -pulsar_result pulsar_producer_flush(pulsar_producer_t *producer) { - return (pulsar_result)producer->producer.flush(); -} - -void pulsar_producer_flush_async(pulsar_producer_t *producer, pulsar_close_callback callback, void *ctx) { - producer->producer.flushAsync(std::bind(handle_result_callback, std::placeholders::_1, callback, ctx)); -} - -int pulsar_producer_is_connected(pulsar_producer_t *producer) { return producer->producer.isConnected(); } diff --git a/pulsar-client-cpp/lib/c/c_ProducerConfiguration.cc b/pulsar-client-cpp/lib/c/c_ProducerConfiguration.cc deleted file mode 100644 index fbc5714d35a7e..0000000000000 --- a/pulsar-client-cpp/lib/c/c_ProducerConfiguration.cc +++ /dev/null @@ -1,234 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -#include "c_structs.h" - -pulsar_producer_configuration_t *pulsar_producer_configuration_create() { - pulsar_producer_configuration_t *c_conf = new pulsar_producer_configuration_t; - c_conf->conf = pulsar::ProducerConfiguration(); - return c_conf; -} - -void pulsar_producer_configuration_free(pulsar_producer_configuration_t *conf) { delete conf; } - -void pulsar_producer_configuration_set_producer_name(pulsar_producer_configuration_t *conf, - const char *producerName) { - conf->conf.setProducerName(producerName); -} - -const char *pulsar_producer_configuration_get_producer_name(pulsar_producer_configuration_t *conf) { - return conf->conf.getProducerName().c_str(); -} - -void pulsar_producer_configuration_set_send_timeout(pulsar_producer_configuration_t *conf, - int sendTimeoutMs) { - conf->conf.setSendTimeout(sendTimeoutMs); -} - -int pulsar_producer_configuration_get_send_timeout(pulsar_producer_configuration_t *conf) { - return conf->conf.getSendTimeout(); -} - -void pulsar_producer_configuration_set_initial_sequence_id(pulsar_producer_configuration_t *conf, - int64_t initialSequenceId) { - conf->conf.setInitialSequenceId(initialSequenceId); -} - -int64_t pulsar_producer_configuration_get_initial_sequence_id(pulsar_producer_configuration_t *conf) { - return conf->conf.getInitialSequenceId(); -} - -void pulsar_producer_configuration_set_compression_type(pulsar_producer_configuration_t *conf, - pulsar_compression_type compressionType) { - conf->conf.setCompressionType((pulsar::CompressionType)compressionType); -} - -pulsar_compression_type pulsar_producer_configuration_get_compression_type( - pulsar_producer_configuration_t *conf) { - return (pulsar_compression_type)conf->conf.getCompressionType(); -} - -void pulsar_producer_configuration_set_schema_info(pulsar_producer_configuration_t *conf, - pulsar_schema_type schemaType, const char *name, - const char *schema, pulsar_string_map_t *properties) { - auto schemaInfo = pulsar::SchemaInfo((pulsar::SchemaType)schemaType, name, schema, properties->map); - conf->conf.setSchema(schemaInfo); -} - -void pulsar_producer_configuration_set_max_pending_messages(pulsar_producer_configuration_t *conf, - int maxPendingMessages) { - conf->conf.setMaxPendingMessages(maxPendingMessages); -} - -int pulsar_producer_configuration_get_max_pending_messages(pulsar_producer_configuration_t *conf) { - return conf->conf.getMaxPendingMessages(); -} - -void pulsar_producer_configuration_set_max_pending_messages_across_partitions( - pulsar_producer_configuration_t *conf, int maxPendingMessagesAcrossPartitions) { - conf->conf.setMaxPendingMessagesAcrossPartitions(maxPendingMessagesAcrossPartitions); -} - -int pulsar_producer_configuration_get_max_pending_messages_across_partitions( - pulsar_producer_configuration_t *conf) { - return conf->conf.getMaxPendingMessagesAcrossPartitions(); -} - -void pulsar_producer_configuration_set_partitions_routing_mode(pulsar_producer_configuration_t *conf, - pulsar_partitions_routing_mode mode) { - conf->conf.setPartitionsRoutingMode((pulsar::ProducerConfiguration::PartitionsRoutingMode)mode); -} - -pulsar_partitions_routing_mode pulsar_producer_configuration_get_partitions_routing_mode( - pulsar_producer_configuration_t *conf) { - return (pulsar_partitions_routing_mode)conf->conf.getPartitionsRoutingMode(); -} - -void pulsar_producer_configuration_set_hashing_scheme(pulsar_producer_configuration_t *conf, - pulsar_hashing_scheme scheme) { - conf->conf.setHashingScheme((pulsar::ProducerConfiguration::HashingScheme)scheme); -} - -pulsar_hashing_scheme pulsar_producer_configuration_get_hashing_scheme( - pulsar_producer_configuration_t *conf) { - return (pulsar_hashing_scheme)conf->conf.getHashingScheme(); -} - -class MessageRoutingPolicy : public pulsar::MessageRoutingPolicy { - pulsar_message_router _router; - void *_ctx; - - public: - MessageRoutingPolicy(pulsar_message_router router, void *ctx) : _router(router), _ctx(ctx) {} - - int getPartition(const pulsar::Message &msg, const pulsar::TopicMetadata &topicMetadata) { - pulsar_message_t message; - message.message = msg; - - pulsar_topic_metadata_t metadata; - metadata.metadata = &topicMetadata; - - return _router(&message, &metadata, _ctx); - } -}; - -void pulsar_producer_configuration_set_message_router(pulsar_producer_configuration_t *conf, - pulsar_message_router router, void *ctx) { - conf->conf.setMessageRouter(std::make_shared(router, ctx)); -} - -void pulsar_producer_configuration_set_lazy_start_partitioned_producers( - pulsar_producer_configuration_t *conf, int useLazyStartPartitionedProducers) { - conf->conf.setLazyStartPartitionedProducers(useLazyStartPartitionedProducers); -} - -int pulsar_producer_configuration_get_lazy_start_partitioned_producers( - pulsar_producer_configuration_t *conf) { - return conf->conf.getLazyStartPartitionedProducers(); -} - -void pulsar_producer_configuration_set_block_if_queue_full(pulsar_producer_configuration_t *conf, - int blockIfQueueFull) { - conf->conf.setBlockIfQueueFull(blockIfQueueFull); -} - -int pulsar_producer_configuration_get_block_if_queue_full(pulsar_producer_configuration_t *conf) { - return conf->conf.getBlockIfQueueFull(); -} - -void pulsar_producer_configuration_set_batching_enabled(pulsar_producer_configuration_t *conf, - int batchingEnabled) { - conf->conf.setBatchingEnabled(batchingEnabled); -} - -int pulsar_producer_configuration_get_batching_enabled(pulsar_producer_configuration_t *conf) { - return conf->conf.getBatchingEnabled(); -} - -void pulsar_producer_configuration_set_batching_max_messages(pulsar_producer_configuration_t *conf, - unsigned int batchingMaxMessages) { - conf->conf.setBatchingMaxMessages(batchingMaxMessages); -} - -unsigned int pulsar_producer_configuration_get_batching_max_messages(pulsar_producer_configuration_t *conf) { - return conf->conf.getBatchingMaxMessages(); -} - -void pulsar_producer_configuration_set_batching_max_allowed_size_in_bytes( - pulsar_producer_configuration_t *conf, unsigned long batchingMaxAllowedSizeInBytes) { - conf->conf.setBatchingMaxAllowedSizeInBytes(batchingMaxAllowedSizeInBytes); -} - -unsigned long pulsar_producer_configuration_get_batching_max_allowed_size_in_bytes( - pulsar_producer_configuration_t *conf) { - return conf->conf.getBatchingMaxAllowedSizeInBytes(); -} - -void pulsar_producer_configuration_set_batching_max_publish_delay_ms( - pulsar_producer_configuration_t *conf, unsigned long batchingMaxPublishDelayMs) { - conf->conf.setBatchingMaxPublishDelayMs(batchingMaxPublishDelayMs); -} - -unsigned long pulsar_producer_configuration_get_batching_max_publish_delay_ms( - pulsar_producer_configuration_t *conf) { - return conf->conf.getBatchingMaxPublishDelayMs(); -} - -void pulsar_producer_configuration_set_property(pulsar_producer_configuration_t *conf, const char *name, - const char *value) { - conf->conf.setProperty(name, value); -} - -int pulsar_producer_is_encryption_enabled(pulsar_producer_configuration_t *conf) { - return conf->conf.isEncryptionEnabled(); -} - -void pulsar_producer_configuration_set_default_crypto_key_reader(pulsar_producer_configuration_t *conf, - const char *public_key_path, - const char *private_key_path) { - std::shared_ptr keyReader = - std::make_shared(public_key_path, private_key_path); - conf->conf.setCryptoKeyReader(keyReader); -} - -pulsar_producer_crypto_failure_action pulsar_producer_configuration_get_crypto_failure_action( - pulsar_producer_configuration_t *conf) { - return (pulsar_producer_crypto_failure_action)conf->conf.getCryptoFailureAction(); -} - -void pulsar_producer_configuration_set_crypto_failure_action( - pulsar_producer_configuration_t *conf, pulsar_producer_crypto_failure_action cryptoFailureAction) { - conf->conf.setCryptoFailureAction((pulsar::ProducerCryptoFailureAction)cryptoFailureAction); -} - -void pulsar_producer_configuration_set_encryption_key(pulsar_producer_configuration_t *conf, - const char *key) { - conf->conf.addEncryptionKey(key); -} - -void pulsar_producer_configuration_set_chunking_enabled(pulsar_producer_configuration_t *conf, - int chunkingEnabled) { - conf->conf.setChunkingEnabled(chunkingEnabled); -} - -int pulsar_producer_configuration_is_chunking_enabled(pulsar_producer_configuration_t *conf) { - return conf->conf.isChunkingEnabled(); -} diff --git a/pulsar-client-cpp/lib/c/c_Reader.cc b/pulsar-client-cpp/lib/c/c_Reader.cc deleted file mode 100644 index a28d9c234c189..0000000000000 --- a/pulsar-client-cpp/lib/c/c_Reader.cc +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include - -#include "c_structs.h" - -const char *pulsar_reader_get_topic(pulsar_reader_t *reader) { return reader->reader.getTopic().c_str(); } - -pulsar_result pulsar_reader_read_next(pulsar_reader_t *reader, pulsar_message_t **msg) { - pulsar::Message message; - pulsar::Result res = reader->reader.readNext(message); - if (res == pulsar::ResultOk) { - (*msg) = new pulsar_message_t; - (*msg)->message = message; - } - return (pulsar_result)res; -} - -pulsar_result pulsar_reader_read_next_with_timeout(pulsar_reader_t *reader, pulsar_message_t **msg, - int timeoutMs) { - pulsar::Message message; - pulsar::Result res = reader->reader.readNext(message, timeoutMs); - if (res == pulsar::ResultOk) { - (*msg) = new pulsar_message_t; - (*msg)->message = message; - } - return (pulsar_result)res; -} - -pulsar_result pulsar_reader_close(pulsar_reader_t *reader) { return (pulsar_result)reader->reader.close(); } - -void pulsar_reader_close_async(pulsar_reader_t *reader, pulsar_result_callback callback, void *ctx) { - reader->reader.closeAsync(std::bind(handle_result_callback, std::placeholders::_1, callback, ctx)); -} - -void pulsar_reader_free(pulsar_reader_t *reader) { delete reader; } - -pulsar_result pulsar_reader_has_message_available(pulsar_reader_t *reader, int *available) { - bool isAvailable; - pulsar_result result = (pulsar_result)reader->reader.hasMessageAvailable(isAvailable); - *available = isAvailable; - return result; -} - -int pulsar_reader_is_connected(pulsar_reader_t *reader) { return reader->reader.isConnected(); } diff --git a/pulsar-client-cpp/lib/c/c_ReaderConfiguration.cc b/pulsar-client-cpp/lib/c/c_ReaderConfiguration.cc deleted file mode 100644 index 9d3a1a0838408..0000000000000 --- a/pulsar-client-cpp/lib/c/c_ReaderConfiguration.cc +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include -#include -#include -#include - -#include "c_structs.h" - -pulsar_reader_configuration_t *pulsar_reader_configuration_create() { - return new pulsar_reader_configuration_t; -} - -void pulsar_reader_configuration_free(pulsar_reader_configuration_t *configuration) { delete configuration; } - -static void message_listener_callback(pulsar::Reader reader, const pulsar::Message &msg, - pulsar_reader_listener listener, void *ctx) { - pulsar_reader_t c_reader; - c_reader.reader = reader; - pulsar_message_t *message = new pulsar_message_t; - message->message = msg; - listener(&c_reader, message, ctx); -} - -void pulsar_reader_configuration_set_reader_listener(pulsar_reader_configuration_t *configuration, - pulsar_reader_listener listener, void *ctx) { - configuration->conf.setReaderListener( - std::bind(message_listener_callback, std::placeholders::_1, std::placeholders::_2, listener, ctx)); -} - -int pulsar_reader_configuration_has_reader_listener(pulsar_reader_configuration_t *configuration) { - return configuration->conf.hasReaderListener(); -} - -void pulsar_reader_configuration_set_receiver_queue_size(pulsar_reader_configuration_t *configuration, - int size) { - configuration->conf.setReceiverQueueSize(size); -} - -int pulsar_reader_configuration_get_receiver_queue_size(pulsar_reader_configuration_t *configuration) { - return configuration->conf.getReceiverQueueSize(); -} - -void pulsar_reader_configuration_set_reader_name(pulsar_reader_configuration_t *configuration, - const char *readerName) { - configuration->conf.setReaderName(readerName); -} - -const char *pulsar_reader_configuration_get_reader_name(pulsar_reader_configuration_t *configuration) { - return configuration->conf.getReaderName().c_str(); -} - -void pulsar_reader_configuration_set_subscription_role_prefix(pulsar_reader_configuration_t *configuration, - const char *subscriptionRolePrefix) { - configuration->conf.setSubscriptionRolePrefix(subscriptionRolePrefix); -} - -const char *pulsar_reader_configuration_get_subscription_role_prefix( - pulsar_reader_configuration_t *configuration) { - return configuration->conf.getSubscriptionRolePrefix().c_str(); -} - -void pulsar_reader_configuration_set_read_compacted(pulsar_reader_configuration_t *configuration, - int readCompacted) { - configuration->conf.setReadCompacted(readCompacted); -} - -int pulsar_reader_configuration_is_read_compacted(pulsar_reader_configuration_t *configuration) { - return configuration->conf.isReadCompacted(); -} \ No newline at end of file diff --git a/pulsar-client-cpp/lib/c/c_Result.cc b/pulsar-client-cpp/lib/c/c_Result.cc deleted file mode 100644 index 157a91f33ab35..0000000000000 --- a/pulsar-client-cpp/lib/c/c_Result.cc +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include - -const char *pulsar_result_str(pulsar_result result) { return pulsar::strResult((pulsar::Result)result); } diff --git a/pulsar-client-cpp/lib/c/c_structs.h b/pulsar-client-cpp/lib/c/c_structs.h deleted file mode 100644 index eb8889a9ff06e..0000000000000 --- a/pulsar-client-cpp/lib/c/c_structs.h +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include - -#include -#include - -struct _pulsar_client { - std::unique_ptr client; -}; - -struct _pulsar_client_configuration { - pulsar::ClientConfiguration conf; -}; - -struct _pulsar_producer { - pulsar::Producer producer; -}; - -struct _pulsar_producer_configuration { - pulsar::ProducerConfiguration conf; -}; - -struct _pulsar_consumer { - pulsar::Consumer consumer; -}; - -struct _pulsar_consumer_configuration { - pulsar::ConsumerConfiguration consumerConfiguration; -}; - -struct _pulsar_reader { - pulsar::Reader reader; -}; - -struct _pulsar_reader_configuration { - pulsar::ReaderConfiguration conf; -}; - -struct _pulsar_message { - pulsar::MessageBuilder builder; - pulsar::Message message; -}; - -struct _pulsar_message_id { - pulsar::MessageId messageId; -}; - -struct _pulsar_authentication { - pulsar::AuthenticationPtr auth; -}; - -struct _pulsar_topic_metadata { - const pulsar::TopicMetadata* metadata; -}; - -typedef void (*pulsar_result_callback)(pulsar_result res, void* ctx); - -inline void handle_result_callback(pulsar::Result result, pulsar_result_callback callback, void* ctx) { - if (callback) { - callback((pulsar_result)result, ctx); - } -} - -struct _pulsar_string_map { - std::map map; -}; - -struct _pulsar_string_list { - std::vector list; -}; diff --git a/pulsar-client-cpp/lib/checksum/ChecksumProvider.cc b/pulsar-client-cpp/lib/checksum/ChecksumProvider.cc deleted file mode 100644 index 390e55c7c6bc6..0000000000000 --- a/pulsar-client-cpp/lib/checksum/ChecksumProvider.cc +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "ChecksumProvider.h" - -#include -#if defined(__ARM_FEATURE_CRC32) -#include "crc32c_arm.h" -#else -#include "crc32c_sse42.h" -#endif -#include "crc32c_sw.h" - -namespace pulsar { -bool isCrc32cSupported = crc32cSupported(); - -#if defined(__ARM_FEATURE_CRC32) -bool crc32cSupported() { return crc32c_arm64_initialize(); } -#else -bool crc32cSupported() { return crc32c_initialize(); } -#endif - -/** - * computes crc32c checksum: uses sse4.2 hardware-instruction to compute crc32c if machine supports else it - * computes using sw algo - * @param - * previousChecksum = in case of incremental-checksum-computation pass previous computed else pass 0 in other - * case. - * data = for which checksum will be computed - * length = length of data from offset - */ -uint32_t computeChecksum(uint32_t previousChecksum, const void* data, int length) { - if (isCrc32cSupported) { - return crc32cHw(previousChecksum, data, length); - } else { - return crc32cSw(previousChecksum, data, length); - } -} - -/** - * Computes crc32c using hardware instruction - */ -uint32_t crc32cHw(uint32_t previousChecksum, const void* data, int length) { - assert(isCrc32cSupported); -#if defined(__ARM_FEATURE_CRC32) - // Use hardware nano instruction - return crc32c_arm64(previousChecksum, data, length); -#else - // Use hardware sse4.2 instruction - return crc32c(previousChecksum, data, length, 0); -#endif -} - -/** - * Computes crc32c using sw crc-table algo - */ -uint32_t crc32cSw(uint32_t previousChecksum, const void* data, int length) { - return crc32c_sw(previousChecksum, data, length); -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/checksum/ChecksumProvider.h b/pulsar-client-cpp/lib/checksum/ChecksumProvider.h deleted file mode 100644 index 378b32175ddaf..0000000000000 --- a/pulsar-client-cpp/lib/checksum/ChecksumProvider.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef _CHECKSUM_PROVIDER_H_ -#define _CHECKSUM_PROVIDER_H_ - -#include -#include - -namespace pulsar { - -PULSAR_PUBLIC bool crc32cSupported(); -PULSAR_PUBLIC uint32_t computeChecksum(uint32_t previousChecksum, const void *data, int length); -PULSAR_PUBLIC uint32_t crc32cHw(uint32_t previousChecksum, const void *data, int length); -PULSAR_PUBLIC uint32_t crc32cSw(uint32_t previousChecksum, const void *data, int length); -} // namespace pulsar - -#endif // _CHECKSUM_PROVIDER_H_ diff --git a/pulsar-client-cpp/lib/checksum/crc32c_arm.cc b/pulsar-client-cpp/lib/checksum/crc32c_arm.cc deleted file mode 100644 index d937a16756ce7..0000000000000 --- a/pulsar-client-cpp/lib/checksum/crc32c_arm.cc +++ /dev/null @@ -1,209 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -// Copyright (c) 2018, Arm Limited and affiliates. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#include "crc32c_arm.h" -#include "lib/checksum/crc32c_sw.h" - -#if defined(HAVE_ARM64_CRC) - -#if defined(__linux__) -#include -#endif -#ifdef PULSAR_AUXV_GETAUXVAL_PRESENT -#include -#endif -#ifndef HWCAP_CRC32 -#define HWCAP_CRC32 (1 << 7) -#endif -#ifndef HWCAP_PMULL -#define HWCAP_PMULL (1 << 4) -#endif -#if defined(__APPLE__) -#include -#endif - -#ifdef HAVE_ARM64_CRYPTO -/* unfolding to compute 8 * 3 = 24 bytes parallelly */ -#define CRC32C24BYTES(ITR) \ - crc1 = crc32c_u64(crc1, *(buf64 + BLK_LENGTH + (ITR))); \ - crc2 = crc32c_u64(crc2, *(buf64 + BLK_LENGTH * 2 + (ITR))); \ - crc0 = crc32c_u64(crc0, *(buf64 + (ITR))); - -/* unfolding to compute 24 * 7 = 168 bytes parallelly */ -#define CRC32C7X24BYTES(ITR) \ - do { \ - CRC32C24BYTES((ITR)*7 + 0) \ - CRC32C24BYTES((ITR)*7 + 1) \ - CRC32C24BYTES((ITR)*7 + 2) \ - CRC32C24BYTES((ITR)*7 + 3) \ - CRC32C24BYTES((ITR)*7 + 4) \ - CRC32C24BYTES((ITR)*7 + 5) \ - CRC32C24BYTES((ITR)*7 + 6) \ - } while (0) -#endif -namespace pulsar { -static bool initialized = false; -static bool pmull_runtime_flag = false; - -bool crc32c_arm64_initialize() { - bool has_crc32c_arm_runtime = false; - if (!initialized) { - has_crc32c_arm_runtime = crc32c_runtime_check(); - if (has_crc32c_arm_runtime) { - pmull_runtime_flag = crc32c_pmull_runtime_check(); - } - } - initialized = true; - return has_crc32c_arm_runtime; -} - -uint32_t crc32c_runtime_check() { -#if !defined(__APPLE__) - uint64_t auxv = 0; -#if defined(PULSAR_AUXV_GETAUXVAL_PRESENT) - auxv = getauxval(AT_HWCAP); -#elif defined(__FreeBSD__) - elf_aux_info(AT_HWCAP, &auxv, sizeof(auxv)); -#endif - return (auxv & HWCAP_CRC32) != 0; -#else - int r; - size_t l = sizeof(r); - if (sysctlbyname("hw.optional.armv8_crc32", &r, &l, NULL, 0) == -1) return 0; - return r == 1; -#endif -} - -bool crc32c_pmull_runtime_check() { -#if !defined(__APPLE__) - uint64_t auxv = 0; -#if defined(PULSAR_AUXV_GETAUXVAL_PRESENT) - auxv = getauxval(AT_HWCAP); -#elif defined(__FreeBSD__) - elf_aux_info(AT_HWCAP, &auxv, sizeof(auxv)); -#endif - return (auxv & HWCAP_PMULL) != 0; -#else - return true; -#endif -} - -uint32_t crc32c_arm64(uint32_t crc, const void *data, size_t len) { - const uint8_t *buf8; - const uint64_t *buf64 = (uint64_t *)data; - int length = (int)len; - crc ^= 0xffffffff; - - /* - * Pmull runtime check here. - * Raspberry Pi supports crc32 but doesn't support pmull. - * Skip Crc32c Parallel computation if no crypto extension available. - */ - if (pmull_runtime_flag) { -/* Macro (HAVE_ARM64_CRYPTO) is used for compiling check */ -#ifdef HAVE_ARM64_CRYPTO -/* Crc32c Parallel computation - * Algorithm comes from Intel whitepaper: - * crc-iscsi-polynomial-crc32-instruction-paper - * - * Input data is divided into three equal-sized blocks - * Three parallel blocks (crc0, crc1, crc2) for 1024 Bytes - * One Block: 42(BLK_LENGTH) * 8(step length: crc32c_u64) bytes - */ -#define BLK_LENGTH 42 - while (length >= 1024) { - uint64_t t0, t1; - uint32_t crc0 = 0, crc1 = 0, crc2 = 0; - - /* Parallel Param: - * k0 = CRC32(x ^ (42 * 8 * 8 * 2 - 1)); - * k1 = CRC32(x ^ (42 * 8 * 8 - 1)); - */ - uint32_t k0 = 0xe417f38a, k1 = 0x8f158014; - - /* Prefetch data for following block to avoid cache miss */ - PREF1KL1((uint8_t *)buf64, 1024); - - /* First 8 byte for better pipelining */ - crc0 = crc32c_u64(crc, *buf64++); - - /* 3 blocks crc32c parallel computation - * Macro unfolding to compute parallelly - * 168 * 6 = 1008 (bytes) - */ - CRC32C7X24BYTES(0); - CRC32C7X24BYTES(1); - CRC32C7X24BYTES(2); - CRC32C7X24BYTES(3); - CRC32C7X24BYTES(4); - CRC32C7X24BYTES(5); - buf64 += (BLK_LENGTH * 3); - - /* Last 8 bytes */ - crc = crc32c_u64(crc2, *buf64++); - - t0 = (uint64_t)vmull_p64(crc0, k0); - t1 = (uint64_t)vmull_p64(crc1, k1); - - /* Merge (crc0, crc1, crc2) -> crc */ - crc1 = crc32c_u64(0, t1); - crc ^= crc1; - crc0 = crc32c_u64(0, t0); - crc ^= crc0; - - length -= 1024; - } - - if (length == 0) return crc ^ (0xffffffffU); -#endif - } // if Pmull runtime check here - - buf8 = (const uint8_t *)buf64; - while (length >= 8) { - crc = crc32c_u64(crc, *(const uint64_t *)buf8); - buf8 += 8; - length -= 8; - } - - /* The following is more efficient than the straight loop */ - if (length >= 4) { - crc = crc32c_u32(crc, *(const uint32_t *)buf8); - buf8 += 4; - length -= 4; - } - - if (length >= 2) { - crc = crc32c_u16(crc, *(const uint16_t *)buf8); - buf8 += 2; - length -= 2; - } - - if (length >= 1) crc = crc32c_u8(crc, *buf8); - - crc ^= 0xffffffff; - return crc; -} - -} // namespace pulsar -#endif diff --git a/pulsar-client-cpp/lib/checksum/crc32c_arm.h b/pulsar-client-cpp/lib/checksum/crc32c_arm.h deleted file mode 100644 index 862215288394c..0000000000000 --- a/pulsar-client-cpp/lib/checksum/crc32c_arm.h +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -// Copyright (c) 2018, Arm Limited and affiliates. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#ifndef UTIL_CRC32C_ARM64_H -#define UTIL_CRC32C_ARM64_H - -#include -#include - -#if defined(__aarch64__) || defined(__AARCH64__) - -#ifdef __ARM_FEATURE_CRC32 -#define HAVE_ARM64_CRC -#include -#define crc32c_u8(crc, v) __crc32cb(crc, v) -#define crc32c_u16(crc, v) __crc32ch(crc, v) -#define crc32c_u32(crc, v) __crc32cw(crc, v) -#define crc32c_u64(crc, v) __crc32cd(crc, v) -#define PREF4X64L1(buffer, PREF_OFFSET, ITR) \ - __asm__("PRFM PLDL1KEEP, [%x[v],%[c]]" ::[v] "r"(buffer), [ c ] "I"((PREF_OFFSET) + ((ITR) + 0) * 64)); \ - __asm__("PRFM PLDL1KEEP, [%x[v],%[c]]" ::[v] "r"(buffer), [ c ] "I"((PREF_OFFSET) + ((ITR) + 1) * 64)); \ - __asm__("PRFM PLDL1KEEP, [%x[v],%[c]]" ::[v] "r"(buffer), [ c ] "I"((PREF_OFFSET) + ((ITR) + 2) * 64)); \ - __asm__("PRFM PLDL1KEEP, [%x[v],%[c]]" ::[v] "r"(buffer), [ c ] "I"((PREF_OFFSET) + ((ITR) + 3) * 64)); - -#define PREF1KL1(buffer, PREF_OFFSET) \ - PREF4X64L1(buffer, (PREF_OFFSET), 0) \ - PREF4X64L1(buffer, (PREF_OFFSET), 4) \ - PREF4X64L1(buffer, (PREF_OFFSET), 8) \ - PREF4X64L1(buffer, (PREF_OFFSET), 12) -namespace pulsar { -bool crc32c_arm64_initialize(); -uint32_t crc32c_arm64(uint32_t crc, const void* data, size_t len); -uint32_t crc32c_runtime_check(); -bool crc32c_pmull_runtime_check(); -} // namespace pulsar -#ifdef __ARM_FEATURE_CRYPTO -#define HAVE_ARM64_CRYPTO -#include -#endif // __ARM_FEATURE_CRYPTO -#endif // __ARM_FEATURE_CRC32 - -#endif // defined(__aarch64__) || defined(__AARCH64__) - -#endif diff --git a/pulsar-client-cpp/lib/checksum/crc32c_sse42.cc b/pulsar-client-cpp/lib/checksum/crc32c_sse42.cc deleted file mode 100644 index c5dba04ba8e41..0000000000000 --- a/pulsar-client-cpp/lib/checksum/crc32c_sse42.cc +++ /dev/null @@ -1,272 +0,0 @@ -/******************************************************************************* - * Copyright 2014 Trevor Robinson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - ******************************************************************************/ -#include "crc32c_sse42.h" - -#include -#if BOOST_VERSION >= 105500 -#include -#else -#if _MSC_VER -#pragma message("Boost version is < 1.55, disable CRC32C") -#else -#warning "Boost version is < 1.55, disable CRC32C" -#endif -#endif - -#include -#include -#include "lib/checksum/crc32c_sw.h" -#include "gf2.hpp" - -#if BOOST_ARCH_X86_64 && !defined(__arm64__) -#define PULSAR_X86_64 -#include // SSE4.2 -#include // PCLMUL -#else -#ifdef _MSC_VER -#pragma message("BOOST_ARCH_X86_64 is not defined, CRC32C will be disabled") -#else -#warning "BOOST_ARCH_X86_64 is not defined, CRC32C SSE4.2 will be disabled" -#endif -#endif - -#ifdef _MSC_VER -#include -#elif defined(PULSAR_X86_64) -#include -#endif - -//#define CRC32C_DEBUG -#define CRC32C_PCLMULQDQ - -#ifdef CRC32C_DEBUG -#include -#define DEBUG_PRINTF1(fmt, v1) printf(fmt, v1) -#define DEBUG_PRINTF2(fmt, v1, v2) printf(fmt, v1, v2) -#define DEBUG_PRINTF3(fmt, v1, v2, v3) printf(fmt, v1, v2, v3) -#define DEBUG_PRINTF4(fmt, v1, v2, v3, v4) printf(fmt, v1, v2, v3, v4) -#else -#define DEBUG_PRINTF1(fmt, v1) -#define DEBUG_PRINTF2(fmt, v1, v2) -#define DEBUG_PRINTF3(fmt, v1, v2, v3) -#define DEBUG_PRINTF4(fmt, v1, v2, v3, v4) -#endif - -namespace pulsar { - -static bool initialized = false; -static bool has_sse42 = false; -static bool has_pclmulqdq = false; - -bool crc32c_initialize() { - if (!initialized) { -#ifdef _MSC_VER - const uint32_t cpuid_ecx_sse42 = (1 << 20); - const uint32_t cpuid_ecx_pclmulqdq = (1 << 1); - int CPUInfo[4] = {}; - __cpuid(CPUInfo, 1); - has_sse42 = (CPUInfo[2] & cpuid_ecx_sse42) != 0; - has_pclmulqdq = (CPUInfo[2] & cpuid_ecx_pclmulqdq) != 0; -#elif defined(PULSAR_X86_64) - const uint32_t cpuid_ecx_sse42 = (1 << 20); - const uint32_t cpuid_ecx_pclmulqdq = (1 << 1); - unsigned int eax, ebx, ecx, edx; - if (__get_cpuid(1, &eax, &ebx, &ecx, &edx)) { - has_sse42 = (ecx & cpuid_ecx_sse42) != 0; - has_pclmulqdq = (ecx & cpuid_ecx_pclmulqdq) != 0; - } -#else - has_sse42 = false; - has_pclmulqdq = false; -#endif - DEBUG_PRINTF1("has_sse42 = %d\n", has_sse42); - DEBUG_PRINTF1("has_pclmulqdq = %d\n", has_pclmulqdq); - initialized = true; - } - - return has_sse42; -} - -chunk_config::chunk_config(size_t words, const chunk_config *next) : words(words), next(next) { - assert(words > 0); - assert(!next || next->words < words); - const size_t loop_bytes = loops() * 8; - make_shift_table(loop_bytes, shift1); - make_shift_table(loop_bytes * 2, shift2); -} - -void chunk_config::make_shift_table(size_t bytes, uint32_t table[256]) { - bitmatrix<32, 32> op; - op.lower_shift(); - op[0] = 0x82f63b78; // reversed CRC-32C polynomial - bitmatrix<32, 32> m; - pow(m, op, bytes * 8); - for (unsigned int i = 0; i < 256; ++i) table[i] = (const bitvector<32>)mul(m, bitvector<32>(i)); -} - -#ifdef PULSAR_X86_64 - -static uint32_t crc32c_chunk(uint32_t crc, const void *buf, const chunk_config &config) { - DEBUG_PRINTF3(" crc32c_chunk(crc = 0x%08x, buf = %p, config.words = " SIZE_T_FORMAT ")", crc, buf, - config.words); - - const uint64_t *pq = (const uint64_t *)buf; - uint64_t crc0 = config.extra() > 1 ? _mm_crc32_u64(crc, *pq++) : crc; - uint64_t crc1 = 0; - uint64_t crc2 = 0; - const size_t loops = config.loops(); - for (unsigned int i = 0; i < loops; ++i, ++pq) { - crc1 = _mm_crc32_u64(crc1, pq[1 * loops]); - crc2 = _mm_crc32_u64(crc2, pq[2 * loops]); - crc0 = _mm_crc32_u64(crc0, pq[0 * loops]); - } - pq += 2 * loops; - uint64_t tmp = *pq++; -#ifdef CRC32C_PCLMULQDQ - if (has_pclmulqdq) { - __m128i k = _mm_set_epi64x(config.shift1[1], config.shift2[1]); - __m128i mul1 = _mm_clmulepi64_si128(_mm_cvtsi64_si128((int64_t)crc1), k, 0x10); - __m128i mul0 = _mm_clmulepi64_si128(_mm_cvtsi64_si128((int64_t)crc0), k, 0x00); - tmp ^= (uint64_t)_mm_cvtsi128_si64(mul1); - tmp ^= (uint64_t)_mm_cvtsi128_si64(mul0); - } else -#endif - { - tmp ^= config.shift1[crc1 & 0xff]; - tmp ^= ((uint64_t)config.shift1[(crc1 >> 8) & 0xff]) << 8; - tmp ^= ((uint64_t)config.shift1[(crc1 >> 16) & 0xff]) << 16; - tmp ^= ((uint64_t)config.shift1[(crc1 >> 24) & 0xff]) << 24; - - tmp ^= config.shift2[crc0 & 0xff]; - tmp ^= ((uint64_t)config.shift2[(crc0 >> 8) & 0xff]) << 8; - tmp ^= ((uint64_t)config.shift2[(crc0 >> 16) & 0xff]) << 16; - tmp ^= ((uint64_t)config.shift2[(crc0 >> 24) & 0xff]) << 24; - } - crc2 = _mm_crc32_u64(crc2, tmp); - if (config.extra() > 2) // only if words is divisible by 3 - crc2 = _mm_crc32_u64(crc2, *pq); - crc = (uint32_t)crc2; - - DEBUG_PRINTF1(" = 0x%08x\n", crc); - return crc; -} - -static uint32_t crc32c_words(uint32_t crc, const void *buf, size_t count) { - DEBUG_PRINTF3(" crc32c_words(crc = 0x%08x, buf = %p, count = " SIZE_T_FORMAT ")", crc, buf, count); - - const uint64_t *pq = (const uint64_t *)buf; - size_t loops = (count + 7) / 8; - assert(loops > 0); - switch (count & 7) { - case 0: - do { - crc = (uint32_t)_mm_crc32_u64(crc, *pq++); - case 7: - crc = (uint32_t)_mm_crc32_u64(crc, *pq++); - case 6: - crc = (uint32_t)_mm_crc32_u64(crc, *pq++); - case 5: - crc = (uint32_t)_mm_crc32_u64(crc, *pq++); - case 4: - crc = (uint32_t)_mm_crc32_u64(crc, *pq++); - case 3: - crc = (uint32_t)_mm_crc32_u64(crc, *pq++); - case 2: - crc = (uint32_t)_mm_crc32_u64(crc, *pq++); - case 1: - crc = (uint32_t)_mm_crc32_u64(crc, *pq++); - } while (--loops > 0); - } - - DEBUG_PRINTF1(" = 0x%08x\n", crc); - return crc; -} - -static uint32_t crc32c_bytes(uint32_t crc, const void *buf, size_t count) { - DEBUG_PRINTF3(" crc32c_bytes(crc = 0x%08x, buf = %p, count = " SIZE_T_FORMAT ")", crc, buf, count); - - const uint8_t *pc = (const uint8_t *)buf; - size_t loops = (count + 7) / 8; - assert(loops > 0); - switch (count & 7) { - case 0: - do { - crc = (uint32_t)_mm_crc32_u8(crc, *pc++); - case 7: - crc = (uint32_t)_mm_crc32_u8(crc, *pc++); - case 6: - crc = (uint32_t)_mm_crc32_u8(crc, *pc++); - case 5: - crc = (uint32_t)_mm_crc32_u8(crc, *pc++); - case 4: - crc = (uint32_t)_mm_crc32_u8(crc, *pc++); - case 3: - crc = (uint32_t)_mm_crc32_u8(crc, *pc++); - case 2: - crc = (uint32_t)_mm_crc32_u8(crc, *pc++); - case 1: - crc = (uint32_t)_mm_crc32_u8(crc, *pc++); - } while (--loops > 0); - } - - DEBUG_PRINTF1(" = 0x%08x\n", crc); - return crc; -} - -uint32_t crc32c(uint32_t init, const void *buf, size_t len, const chunk_config *config) { - DEBUG_PRINTF3("crc32c(init = 0x%08x, buf = %p, len = " SIZE_T_FORMAT ")\n", init, buf, len); - - uint32_t crc = ~init; - const char *pc = (const char *)buf; - if (len >= 24) { - if ((uintptr_t)pc & 7) { - size_t unaligned = 8 - ((uintptr_t)pc & 7); - crc = crc32c_bytes(crc, pc, unaligned); - pc += unaligned; - len -= unaligned; - } - size_t words = len / 8; - while (config) { - while (words >= config->words) { - crc = crc32c_chunk(crc, pc, *config); - pc += config->words * 8; - words -= config->words; - } - config = config->next; - } - if (words > 0) { - crc = crc32c_words(crc, pc, words); - pc += words * 8; - } - len &= 7; - } - if (len) crc = crc32c_bytes(crc, pc, len); - crc = ~crc; - - DEBUG_PRINTF1("crc = 0x%08x\n", crc); - return crc; -} - -#else // ! PULSAR_X86_64 - -uint32_t crc32c(uint32_t init, const void *buf, size_t len, const chunk_config *config) { - // SSE 4.2 extension for hw implementation are not present - return crc32c_sw(init, buf, len); // fallback to the software implementation -} - -#endif - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/checksum/crc32c_sse42.h b/pulsar-client-cpp/lib/checksum/crc32c_sse42.h deleted file mode 100644 index 8e91bfc3c95f2..0000000000000 --- a/pulsar-client-cpp/lib/checksum/crc32c_sse42.h +++ /dev/null @@ -1,47 +0,0 @@ -/******************************************************************************* - * Copyright 2014 Trevor Robinson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - ******************************************************************************/ -#include "int_types.h" - -namespace pulsar { - -bool crc32c_initialize(); - -class chunk_config { - public: - enum - { - min_words = 4 - }; - - const size_t words; - const chunk_config *const next; - uint32_t shift1[256]; - uint32_t shift2[256]; - - chunk_config(size_t words, const chunk_config *next = 0); - - size_t loops() const { return (words - 1) / 3; } - - size_t extra() const { return (words - 1) % 3 + 1; } - - private: - chunk_config &operator=(const chunk_config &); - - static void make_shift_table(size_t bytes, uint32_t table[256]); -}; - -uint32_t crc32c(uint32_t init, const void *buf, size_t len, const chunk_config *config); -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/checksum/crc32c_sw.cc b/pulsar-client-cpp/lib/checksum/crc32c_sw.cc deleted file mode 100644 index d03802ce456df..0000000000000 --- a/pulsar-client-cpp/lib/checksum/crc32c_sw.cc +++ /dev/null @@ -1,102 +0,0 @@ -/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction - * Copyright (C) 2013 Mark Adler - * Version 1.1 1 Aug 2013 Mark Adler - */ - -/* - This software is provided 'as-is', without any express or implied - warranty. In no event will the author be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Mark Adler - madler@alumni.caltech.edu - */ - -/* Use hardware CRC instruction on Intel SSE 4.2 processors. This computes a - CRC-32C, *not* the CRC-32 used by Ethernet and zip, gzip, etc. A software - version is provided as a fall-back, as well as for speed comparisons. */ - -/* Version history: - 1.0 10 Feb 2013 First version - 1.1 1 Aug 2013 Correct comments on why three crc instructions in parallel - */ - -#include "crc32c_sw.h" -#include - -namespace pulsar { - -/* CRC-32C (iSCSI) polynomial in reversed bit order. */ -#define POLY 0x82f63b78 - -/* Table for a quadword-at-a-time software crc. */ -std::once_flag crc32c_once_sw; -static uint32_t crc32c_table[8][256]; - -/* Construct table for software CRC-32C calculation. */ -static void crc32c_init_sw() { - uint32_t n, crc, k; - - for (n = 0; n < 256; n++) { - crc = n; - crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; - crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; - crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; - crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; - crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; - crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; - crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; - crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; - crc32c_table[0][n] = crc; - } - for (n = 0; n < 256; n++) { - crc = crc32c_table[0][n]; - for (k = 1; k < 8; k++) { - crc = crc32c_table[0][crc & 0xff] ^ (crc >> 8); - crc32c_table[k][n] = crc; - } - } -} - -/* Table-driven software version as a fall-back. This is about 15 times slower - than using the hardware instructions. This assumes little-endian integers, - as is the case on Intel processors that the assembler code here is for. */ -uint32_t crc32c_sw(uint32_t crci, const void *buf, int len) { - const char *next = (const char *)buf; - uint64_t crc; - - std::call_once(crc32c_once_sw, &crc32c_init_sw); - crc = crci ^ 0xffffffff; - while (len && ((uintptr_t)next & 7) != 0) { - crc = crc32c_table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8); - len--; - } - while (len >= 8) { - crc ^= *(uint64_t *)next; - crc = crc32c_table[7][crc & 0xff] ^ crc32c_table[6][(crc >> 8) & 0xff] ^ - crc32c_table[5][(crc >> 16) & 0xff] ^ crc32c_table[4][(crc >> 24) & 0xff] ^ - crc32c_table[3][(crc >> 32) & 0xff] ^ crc32c_table[2][(crc >> 40) & 0xff] ^ - crc32c_table[1][(crc >> 48) & 0xff] ^ crc32c_table[0][crc >> 56]; - next += 8; - len -= 8; - } - while (len) { - crc = crc32c_table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8); - len--; - } - return (uint32_t)crc ^ 0xffffffff; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/checksum/crc32c_sw.h b/pulsar-client-cpp/lib/checksum/crc32c_sw.h deleted file mode 100644 index 9db6e979abfec..0000000000000 --- a/pulsar-client-cpp/lib/checksum/crc32c_sw.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include - -namespace pulsar { - -uint32_t crc32c_sw(uint32_t crc, const void* data, int length); - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/checksum/gf2.hpp b/pulsar-client-cpp/lib/checksum/gf2.hpp deleted file mode 100644 index 06bde0edde22b..0000000000000 --- a/pulsar-client-cpp/lib/checksum/gf2.hpp +++ /dev/null @@ -1,203 +0,0 @@ -/******************************************************************************* - * Copyright 2014 Trevor Robinson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - ******************************************************************************/ -#include "int_types.h" -#include // std::swap - -#ifdef _MSC_VER -#pragma warning(disable:4146) // unary minus operator applied to unsigned type, result still unsigned -#endif - -namespace pulsar { - -// Type trait for unsigned integers of at least N bytes -template -struct uint_bytes { - enum { - is_defined = 0 - }; -}; - -template -struct defined_uint_bytes { - enum { - is_defined = 1 - }; - typedef T type; -}; - -template<> struct uint_bytes<1> : defined_uint_bytes { -}; -template<> struct uint_bytes<2> : defined_uint_bytes { -}; -template<> struct uint_bytes<3> : defined_uint_bytes { -}; -template<> struct uint_bytes<4> : defined_uint_bytes { -}; -template<> struct uint_bytes<5> : defined_uint_bytes { -}; -template<> struct uint_bytes<6> : defined_uint_bytes { -}; -template<> struct uint_bytes<7> : defined_uint_bytes { -}; -template<> struct uint_bytes<8> : defined_uint_bytes { -}; - -// Type trait for unsigned integers of at least N bits -template -struct uint_bits : uint_bytes<(N + 7) / 8> { - enum { - bits = 8 * sizeof(typename uint_bits::type) - }; -}; - -// Bit vector of N bits; currently just exposes an unsigned integer -template -class bitvector { - typedef typename uint_bits::type type; - type value; - public: - bitvector() { - } - bitvector(type value) - : value(value) { - } - operator type&() { - return value; - } - operator type() const { - return value; - } -}; - -// Bit matrix of M columns by N rows -template -class bitmatrix { - typedef bitvector row; - row value[N]; - public: - bitmatrix() { - } - explicit bitmatrix(bool b) { - if (b) - identity(); - else - null(); - } - void null() { - for (unsigned int i = 0; i < N; ++i) - value[i] = 0; - } - void identity() { - for (unsigned int i = 0; i < N; ++i) - value[i] = i < M ? (const row) 1 << i : 0; - } - void lower_shift() { - for (unsigned int i = 0; i < N; ++i) - value[i] = i > 0 && i <= M ? (const row) 1 << (i - 1) : 0; - } - void upper_shift() { - for (unsigned int i = 0; i < N; ++i) - value[i] = i + 1 < M ? (const row) 1 << (i + 1) : 0; - } - operator bitvector*() { - return value; - } - operator const bitvector*() const { - return value; - } -}; - -/* - * Multiplies MxN matrix A by N-row vector B in GF(2). - * - * For M,N = 3: - * - * | a b c | | x | | ax + by + cz | - * A = | d e f |, B = | y |, AB = | dx + ey + fz | - * | g h i | | z | | gx + hy + iz | - * - * In GF(2), addition corresponds to XOR and multiplication to AND: - * - * | (a & x) ^ (b & y) ^ (c & z) | - * AB = | (d & x) ^ (e & y) ^ (f & z) | - * | (g & x) ^ (h & y) ^ (i & z) | - * - * Trading variable names for [row,column] indices: - * - * AB = (A[,0] & B[0]) ^ (A[,1] & B[1]) ^ (A[,2] & B[2]) ^ ... - * - * Assuming columns are represented as words and rows as bit offsets, - * all rows of AB can be calculated in parallel: - * - * AB = (A[0] & -((B >> 0) & 1) ^ (A[1] & -((B >> 1) & 1) ^ ... - */ -template -bitvector mul(const bitmatrix& a, const bitvector b) { - bitvector result(0); - for (unsigned int i = 0; i < N; ++i) - result ^= a[i] & -((b >> i) & 1); - return result; -} - -/* - * Multiplies MxN matrix A by NxP matrix B in GF(2). - * - * For M,N,P = 3: - * - * | a b c | | j k l | | (aj + bm + cp) (ak + bn + cq) (al + bo + cr) | - * A = | d e f |, B = | m n o |, AB = | (dj + em + fp) (dk + en + fq) (dl + eo + fr) | - * | g h i | | p q r | | (gj + hm + ip) (gk + hn + iq) (gl + ho + ir) | - */ -template -void mul(bitmatrix& result, const bitmatrix& a, const bitmatrix& b) { - for (unsigned int i = 0; i < P; i++) - result[i] = mul(a, b[i]); -} - -/* - * Squares an NxN matrix in GF(2). - */ -template -void sqr(bitmatrix& result, const bitmatrix& a) { - mul(result, a, a); -} - -/* - * Raises an NxN matrix to the power n in GF(2) by squaring. - */ -template -void pow(bitmatrix& result, const bitmatrix& a, uint64_t n) { - result.identity(); - if (n > 0) { - bitmatrix square = a; - bitmatrix temp; - bitmatrix *ptemp = &temp, *psquare = &square, *presult = &result; - for (;;) { - if (n & 1) { - mul(*ptemp, *presult, *psquare); - std::swap(ptemp, presult); - } - if (!(n >>= 1)) - break; - sqr(*ptemp, *psquare); - std::swap(ptemp, psquare); - } - if (presult != &result) - result = *presult; - } -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/checksum/int_types.h b/pulsar-client-cpp/lib/checksum/int_types.h deleted file mode 100644 index 6623bd026c93f..0000000000000 --- a/pulsar-client-cpp/lib/checksum/int_types.h +++ /dev/null @@ -1,43 +0,0 @@ -/******************************************************************************* - * Copyright 2014 Trevor Robinson - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - ******************************************************************************/ -#include // size_t - -#if defined(_MSC_VER) && _MSC_VER < 1600 // stdint.h added in MSVC 2010 - -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; - -#else - -#include - -#endif - -#if defined(_MSC_VER) && _MSC_VER < 1900 // MSVC 2015 - -#define SIZE_T_FORMAT "%Iu" - -#else - -#define SIZE_T_FORMAT "%zu" - -#endif diff --git a/pulsar-client-cpp/lib/lz4/lz4.cc b/pulsar-client-cpp/lib/lz4/lz4.cc deleted file mode 100644 index d63b977ac89c9..0000000000000 --- a/pulsar-client-cpp/lib/lz4/lz4.cc +++ /dev/null @@ -1,1533 +0,0 @@ -/* - LZ4 - Fast LZ compression algorithm - Copyright (C) 2011-2015, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/Cyan4973/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -/************************************** - * Tuning parameters - **************************************/ -/* - * HEAPMODE : - * Select how default compression functions will allocate memory for their hash table, - * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). - */ -#define HEAPMODE 0 - -/* - * ACCELERATION_DEFAULT : - * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0 - */ -#define ACCELERATION_DEFAULT 1 - -/************************************** - * CPU Feature Detection - **************************************/ -/* - * LZ4_FORCE_SW_BITCOUNT - * Define this parameter if your target system or compiler does not support hardware bit count - */ -#if defined(_MSC_VER) && \ - defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */ -#define LZ4_FORCE_SW_BITCOUNT -#endif - -/************************************** - * Includes - **************************************/ -#include "lz4.h" - -/************************************** - * Compiler Options - **************************************/ -#ifdef _MSC_VER /* Visual Studio */ -#define FORCE_INLINE static __forceinline -#include -#pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -#pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */ -#else -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */ -#if defined(__GNUC__) || defined(__clang__) -#define FORCE_INLINE static inline __attribute__((always_inline)) -#else -#define FORCE_INLINE static inline -#endif -#else -#define FORCE_INLINE static -#endif /* __STDC_VERSION__ */ -#endif /* _MSC_VER */ - -/* LZ4_GCC_VERSION is defined into lz4.h */ -#if (LZ4_GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__) -#define expect(expr, value) (__builtin_expect((expr), (value))) -#else -#define expect(expr, value) (expr) -#endif - -#define likely(expr) expect((expr) != 0, 1) -#define unlikely(expr) expect((expr) != 0, 0) - -/************************************** - * Memory routines - **************************************/ -#include /* malloc, calloc, free */ -#define ALLOCATOR(n, s) calloc(n, s) -#define FREEMEM free -#include /* memset, memcpy */ -#define MEM_INIT memset - -/************************************** - * Basic Types - **************************************/ -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */ -#include -typedef uint8_t BYTE; -typedef uint16_t U16; -typedef uint32_t U32; -typedef int32_t S32; -typedef uint64_t U64; -#else -typedef unsigned char BYTE; -typedef unsigned short U16; -typedef unsigned int U32; -typedef signed int S32; -typedef unsigned long long U64; -#endif - -/************************************** - * Reading and writing into memory - **************************************/ -#define STEPSIZE sizeof(size_t) - -namespace pulsar { - -static unsigned LZ4_64bits(void) { return sizeof(void *) == 8; } - -static unsigned LZ4_isLittleEndian(void) { - const union { - U32 i; - BYTE c[4]; - } one = {1}; /* don't use static : performance detrimental */ - return one.c[0]; -} - -static U16 LZ4_read16(const void *memPtr) { - U16 val16; - memcpy(&val16, memPtr, 2); - return val16; -} - -static U16 LZ4_readLE16(const void *memPtr) { - if (LZ4_isLittleEndian()) { - return LZ4_read16(memPtr); - } else { - const BYTE *p = (const BYTE *)memPtr; - return (U16)((U16)p[0] + (p[1] << 8)); - } -} - -static void LZ4_writeLE16(void *memPtr, U16 value) { - if (LZ4_isLittleEndian()) { - memcpy(memPtr, &value, 2); - } else { - BYTE *p = (BYTE *)memPtr; - p[0] = (BYTE)value; - p[1] = (BYTE)(value >> 8); - } -} - -static U32 LZ4_read32(const void *memPtr) { - U32 val32; - memcpy(&val32, memPtr, 4); - return val32; -} - -static U64 LZ4_read64(const void *memPtr) { - U64 val64; - memcpy(&val64, memPtr, 8); - return val64; -} - -static size_t LZ4_read_ARCH(const void *p) { - if (LZ4_64bits()) - return (size_t)LZ4_read64(p); - else - return (size_t)LZ4_read32(p); -} - -static void LZ4_copy4(void *dstPtr, const void *srcPtr) { memcpy(dstPtr, srcPtr, 4); } - -static void LZ4_copy8(void *dstPtr, const void *srcPtr) { memcpy(dstPtr, srcPtr, 8); } - -/* customized version of memcpy, which may overwrite up to 7 bytes beyond dstEnd */ -static void LZ4_wildCopy(void *dstPtr, const void *srcPtr, void *dstEnd) { - BYTE *d = (BYTE *)dstPtr; - const BYTE *s = (const BYTE *)srcPtr; - BYTE *e = (BYTE *)dstEnd; - do { - LZ4_copy8(d, s); - d += 8; - s += 8; - } while (d < e); -} - -/************************************** - * Common Constants - **************************************/ -#define MINMATCH 4 - -#define COPYLENGTH 8 -#define LASTLITERALS 5 -#define MFLIMIT (COPYLENGTH + MINMATCH) -static const int LZ4_minLength = (MFLIMIT + 1); - -#define KB *(1 << 10) -#define MB *(1 << 20) -#define GB *(1U << 30) - -#define MAXD_LOG 16 -#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) - -#define ML_BITS 4 -#define ML_MASK ((1U << ML_BITS) - 1) -#define RUN_BITS (8 - ML_BITS) -#define RUN_MASK ((1U << RUN_BITS) - 1) - -/************************************** - * Common Utils - **************************************/ -#define LZ4_STATIC_ASSERT(c) \ - { \ - enum \ - { \ - LZ4_static_assert = 1 / (int)(!!(c)) \ - }; \ - } /* use only *after* variable declarations */ - -/************************************** - * Common functions - **************************************/ -static unsigned LZ4_NbCommonBytes(size_t val) { - if (LZ4_isLittleEndian()) { - if (LZ4_64bits()) { -#if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanForward64(&r, (U64)val); - return (int)(r >> 3); -#elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_ctzll((U64)val) >> 3); -#else - static const int DeBruijnBytePos[64] = {0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, - 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, - 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, - 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7}; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -#endif - } else /* 32 bits */ - { -#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r; - _BitScanForward(&r, (U32)val); - return (int)(r >> 3); -#elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_ctz((U32)val) >> 3); -#else - static const int DeBruijnBytePos[32] = {0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, - 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1}; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -#endif - } - } else /* Big Endian CPU */ - { - if (LZ4_64bits()) { -#if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanReverse64(&r, val); - return (unsigned)(r >> 3); -#elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_clzll((U64)val) >> 3); -#else - unsigned r; - if (!(val >> 32)) { - r = 4; - } else { - r = 0; - val >>= 32; - } - if (!(val >> 16)) { - r += 2; - val >>= 8; - } else { - val >>= 24; - } - r += (!val); - return r; -#endif - } else /* 32 bits */ - { -#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanReverse(&r, (unsigned long)val); - return (unsigned)(r >> 3); -#elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_clz((U32)val) >> 3); -#else - unsigned r; - if (!(val >> 16)) { - r = 2; - val >>= 8; - } else { - r = 0; - val >>= 24; - } - r += (!val); - return r; -#endif - } - } -} - -static unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit) { - const BYTE *const pStart = pIn; - - while (likely(pIn < pInLimit - (STEPSIZE - 1))) { - size_t diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); - if (!diff) { - pIn += STEPSIZE; - pMatch += STEPSIZE; - continue; - } - pIn += LZ4_NbCommonBytes(diff); - return (unsigned)(pIn - pStart); - } - - if (LZ4_64bits()) - if ((pIn < (pInLimit - 3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { - pIn += 4; - pMatch += 4; - } - if ((pIn < (pInLimit - 1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { - pIn += 2; - pMatch += 2; - } - if ((pIn < pInLimit) && (*pMatch == *pIn)) pIn++; - return (unsigned)(pIn - pStart); -} - -#ifndef LZ4_COMMONDEFS_ONLY -/************************************** - * Local Constants - **************************************/ -#define LZ4_HASHLOG (LZ4_MEMORY_USAGE - 2) -#define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) -#define HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ - -static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT - 1)); -static const U32 LZ4_skipTrigger = - 6; /* Increase this value ==> compression run slower on incompressible data */ - -/************************************** - * Local Structures and types - **************************************/ -typedef struct { - U32 hashTable[HASH_SIZE_U32]; - U32 currentOffset; - U32 initCheck; - const BYTE *dictionary; - BYTE *bufferStart; /* obsolete, used for slideInputBuffer */ - U32 dictSize; -} LZ4_stream_t_internal; - -typedef enum -{ - notLimited = 0, - limitedOutput = 1 -} limitedOutput_directive; -typedef enum -{ - byPtr, - byU32, - byU16 -} tableType_t; - -typedef enum -{ - noDict = 0, - withPrefix64k, - usingExtDict -} dict_directive; -typedef enum -{ - noDictIssue = 0, - dictSmall -} dictIssue_directive; - -typedef enum -{ - endOnOutputSize = 0, - endOnInputSize = 1 -} endCondition_directive; -typedef enum -{ - full = 0, - partial = 1 -} earlyEnd_directive; - -/************************************** - * Local Utils - **************************************/ -int LZ4_versionNumber(void) { return LZ4_VERSION_NUMBER; } -int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } -int LZ4_sizeofState() { return LZ4_STREAMSIZE; } - -/******************************** - * Compression functions - ********************************/ - -static U32 LZ4_hashSequence(U32 sequence, tableType_t const tableType) { - if (tableType == byU16) - return (((sequence)*2654435761U) >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1))); - else - return (((sequence)*2654435761U) >> ((MINMATCH * 8) - LZ4_HASHLOG)); -} - -static const U64 prime5bytes = 889523592379ULL; -static U32 LZ4_hashSequence64(size_t sequence, tableType_t const tableType) { - const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG; - const U32 hashMask = (1 << hashLog) - 1; - return ((sequence * prime5bytes) >> (40 - hashLog)) & hashMask; -} - -static U32 LZ4_hashSequenceT(size_t sequence, tableType_t const tableType) { - if (LZ4_64bits()) return LZ4_hashSequence64(sequence, tableType); - return LZ4_hashSequence((U32)sequence, tableType); -} - -static U32 LZ4_hashPosition(const void *p, tableType_t tableType) { - return LZ4_hashSequenceT(LZ4_read_ARCH(p), tableType); -} - -static void LZ4_putPositionOnHash(const BYTE *p, U32 h, void *tableBase, tableType_t const tableType, - const BYTE *srcBase) { - switch (tableType) { - case byPtr: { - const BYTE **hashTable = (const BYTE **)tableBase; - hashTable[h] = p; - return; - } - case byU32: { - U32 *hashTable = (U32 *)tableBase; - hashTable[h] = (U32)(p - srcBase); - return; - } - case byU16: { - U16 *hashTable = (U16 *)tableBase; - hashTable[h] = (U16)(p - srcBase); - return; - } - } -} - -static void LZ4_putPosition(const BYTE *p, void *tableBase, tableType_t tableType, const BYTE *srcBase) { - U32 h = LZ4_hashPosition(p, tableType); - LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); -} - -static const BYTE *LZ4_getPositionOnHash(U32 h, void *tableBase, tableType_t tableType, const BYTE *srcBase) { - if (tableType == byPtr) { - const BYTE **hashTable = (const BYTE **)tableBase; - return hashTable[h]; - } - if (tableType == byU32) { - U32 *hashTable = (U32 *)tableBase; - return hashTable[h] + srcBase; - } - { - U16 *hashTable = (U16 *)tableBase; - return hashTable[h] + srcBase; - } /* default, to ensure a return */ -} - -static const BYTE *LZ4_getPosition(const BYTE *p, void *tableBase, tableType_t tableType, - const BYTE *srcBase) { - U32 h = LZ4_hashPosition(p, tableType); - return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); -} - -FORCE_INLINE int LZ4_compress_generic(void *const ctx, const char *const source, char *const dest, - const int inputSize, const int maxOutputSize, - const limitedOutput_directive outputLimited, - const tableType_t tableType, const dict_directive dict, - const dictIssue_directive dictIssue, const U32 acceleration) { - LZ4_stream_t_internal *const dictPtr = (LZ4_stream_t_internal *)ctx; - - const BYTE *ip = (const BYTE *)source; - const BYTE *base; - const BYTE *lowLimit; - const BYTE *const lowRefLimit = ip - dictPtr->dictSize; - const BYTE *const dictionary = dictPtr->dictionary; - const BYTE *const dictEnd = dictionary + dictPtr->dictSize; - const size_t dictDelta = dictEnd - (const BYTE *)source; - const BYTE *anchor = (const BYTE *)source; - const BYTE *const iend = ip + inputSize; - const BYTE *const mflimit = iend - MFLIMIT; - const BYTE *const matchlimit = iend - LASTLITERALS; - - BYTE *op = (BYTE *)dest; - BYTE *const olimit = op + maxOutputSize; - - U32 forwardH; - size_t refDelta = 0; - - /* Init conditions */ - if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) - return 0; /* Unsupported input size, too large (or negative) */ - switch (dict) { - case noDict: - default: - base = (const BYTE *)source; - lowLimit = (const BYTE *)source; - break; - case withPrefix64k: - base = (const BYTE *)source - dictPtr->currentOffset; - lowLimit = (const BYTE *)source - dictPtr->dictSize; - break; - case usingExtDict: - base = (const BYTE *)source - dictPtr->currentOffset; - lowLimit = (const BYTE *)source; - break; - } - if ((tableType == byU16) && (inputSize >= LZ4_64Klimit)) - return 0; /* Size too large (not within 64K limit) */ - if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ - - /* First Byte */ - LZ4_putPosition(ip, ctx, tableType, base); - ip++; - forwardH = LZ4_hashPosition(ip, tableType); - - /* Main Loop */ - for (;;) { - const BYTE *match; - BYTE *token; - { - const BYTE *forwardIp = ip; - unsigned step = 1; - unsigned searchMatchNb = acceleration << LZ4_skipTrigger; - - /* Find a match */ - do { - U32 h = forwardH; - ip = forwardIp; - forwardIp += step; - step = (searchMatchNb++ >> LZ4_skipTrigger); - - if (unlikely(forwardIp > mflimit)) goto _last_literals; - - match = LZ4_getPositionOnHash(h, ctx, tableType, base); - if (dict == usingExtDict) { - if (match < (const BYTE *)source) { - refDelta = dictDelta; - lowLimit = dictionary; - } else { - refDelta = 0; - lowLimit = (const BYTE *)source; - } - } - forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putPositionOnHash(ip, h, ctx, tableType, base); - - } while (((dictIssue == dictSmall) ? (match < lowRefLimit) : 0) || - ((tableType == byU16) ? 0 : (match + MAX_DISTANCE < ip)) || - (LZ4_read32(match + refDelta) != LZ4_read32(ip))); - } - - /* Catch up */ - while ((ip > anchor) && (match + refDelta > lowLimit) && (unlikely(ip[-1] == match[refDelta - 1]))) { - ip--; - match--; - } - - { - /* Encode Literal length */ - unsigned litLength = (unsigned)(ip - anchor); - token = op++; - if ((outputLimited) && - (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength / 255) > olimit))) - return 0; /* Check output limit */ - if (litLength >= RUN_MASK) { - int len = (int)litLength - RUN_MASK; - *token = (RUN_MASK << ML_BITS); - for (; len >= 255; len -= 255) *op++ = 255; - *op++ = (BYTE)len; - } else - *token = (BYTE)(litLength << ML_BITS); - - /* Copy Literals */ - LZ4_wildCopy(op, anchor, op + litLength); - op += litLength; - } - - _next_match: - /* Encode Offset */ - LZ4_writeLE16(op, (U16)(ip - match)); - op += 2; - - /* Encode MatchLength */ - { - unsigned matchLength; - - if ((dict == usingExtDict) && (lowLimit == dictionary)) { - const BYTE *limit; - match += refDelta; - limit = ip + (dictEnd - match); - if (limit > matchlimit) limit = matchlimit; - matchLength = LZ4_count(ip + MINMATCH, match + MINMATCH, limit); - ip += MINMATCH + matchLength; - if (ip == limit) { - unsigned more = LZ4_count(ip, (const BYTE *)source, matchlimit); - matchLength += more; - ip += more; - } - } else { - matchLength = LZ4_count(ip + MINMATCH, match + MINMATCH, matchlimit); - ip += MINMATCH + matchLength; - } - - if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength >> 8) > olimit))) - return 0; /* Check output limit */ - if (matchLength >= ML_MASK) { - *token += ML_MASK; - matchLength -= ML_MASK; - for (; matchLength >= 510; matchLength -= 510) { - *op++ = 255; - *op++ = 255; - } - if (matchLength >= 255) { - matchLength -= 255; - *op++ = 255; - } - *op++ = (BYTE)matchLength; - } else - *token += (BYTE)(matchLength); - } - - anchor = ip; - - /* Test end of chunk */ - if (ip > mflimit) break; - - /* Fill table */ - LZ4_putPosition(ip - 2, ctx, tableType, base); - - /* Test next position */ - match = LZ4_getPosition(ip, ctx, tableType, base); - if (dict == usingExtDict) { - if (match < (const BYTE *)source) { - refDelta = dictDelta; - lowLimit = dictionary; - } else { - refDelta = 0; - lowLimit = (const BYTE *)source; - } - } - LZ4_putPosition(ip, ctx, tableType, base); - if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1) && (match + MAX_DISTANCE >= ip) && - (LZ4_read32(match + refDelta) == LZ4_read32(ip))) { - token = op++; - *token = 0; - goto _next_match; - } - - /* Prepare next loop */ - forwardH = LZ4_hashPosition(++ip, tableType); - } - -_last_literals: - /* Encode Last Literals */ - { - const size_t lastRun = (size_t)(iend - anchor); - if ((outputLimited) && - ((op - (BYTE *)dest) + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize)) - return 0; /* Check output limit */ - if (lastRun >= RUN_MASK) { - size_t accumulator = lastRun - RUN_MASK; - *op++ = RUN_MASK << ML_BITS; - for (; accumulator >= 255; accumulator -= 255) *op++ = 255; - *op++ = (BYTE)accumulator; - } else { - *op++ = (BYTE)(lastRun << ML_BITS); - } - memcpy(op, anchor, lastRun); - op += lastRun; - } - - /* End */ - return (int)(((char *)op) - dest); -} - -int LZ4_compress_fast_extState(void *state, const char *source, char *dest, int inputSize, int maxOutputSize, - int acceleration) { - LZ4_resetStream((LZ4_stream_t *)state); - if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; - - if (maxOutputSize >= LZ4_compressBound(inputSize)) { - if (inputSize < LZ4_64Klimit) - return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noDict, - noDictIssue, acceleration); - else - return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, - LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration); - } else { - if (inputSize < LZ4_64Klimit) - return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, - noDict, noDictIssue, acceleration); - else - return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, - LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration); - } -} - -int LZ4_compress_fast(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration) { -#if (HEAPMODE) - void *ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ -#else - LZ4_stream_t ctx; - void *ctxPtr = &ctx; -#endif - - int result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); - -#if (HEAPMODE) - FREEMEM(ctxPtr); -#endif - return result; -} - -int LZ4_compress_default(const char *source, char *dest, int inputSize, int maxOutputSize) { - return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1); -} - -/* hidden debug function */ -/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */ -int LZ4_compress_fast_force(const char *source, char *dest, int inputSize, int maxOutputSize, - int acceleration) { - LZ4_stream_t ctx; - - LZ4_resetStream(&ctx); - - if (inputSize < LZ4_64Klimit) - return LZ4_compress_generic(&ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, - noDict, noDictIssue, acceleration); - else - return LZ4_compress_generic(&ctx, source, dest, inputSize, maxOutputSize, limitedOutput, - LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration); -} - -/******************************** - * destSize variant - ********************************/ - -static int LZ4_compress_destSize_generic(void *const ctx, const char *const src, char *const dst, - int *const srcSizePtr, const int targetDstSize, - const tableType_t tableType) { - const BYTE *ip = (const BYTE *)src; - const BYTE *base = (const BYTE *)src; - const BYTE *lowLimit = (const BYTE *)src; - const BYTE *anchor = ip; - const BYTE *const iend = ip + *srcSizePtr; - const BYTE *const mflimit = iend - MFLIMIT; - const BYTE *const matchlimit = iend - LASTLITERALS; - - BYTE *op = (BYTE *)dst; - BYTE *const oend = op + targetDstSize; - BYTE *const oMaxLit = - op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */; - BYTE *const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */); - BYTE *const oMaxSeq = oMaxLit - 1 /* token */; - - U32 forwardH; - - /* Init conditions */ - if (targetDstSize < 1) return 0; /* Impossible to store anything */ - if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) - return 0; /* Unsupported input size, too large (or negative) */ - if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit)) - return 0; /* Size too large (not within 64K limit) */ - if (*srcSizePtr < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ - - /* First Byte */ - *srcSizePtr = 0; - LZ4_putPosition(ip, ctx, tableType, base); - ip++; - forwardH = LZ4_hashPosition(ip, tableType); - - /* Main Loop */ - for (;;) { - const BYTE *match; - BYTE *token; - { - const BYTE *forwardIp = ip; - unsigned step = 1; - unsigned searchMatchNb = 1 << LZ4_skipTrigger; - - /* Find a match */ - do { - U32 h = forwardH; - ip = forwardIp; - forwardIp += step; - step = (searchMatchNb++ >> LZ4_skipTrigger); - - if (unlikely(forwardIp > mflimit)) goto _last_literals; - - match = LZ4_getPositionOnHash(h, ctx, tableType, base); - forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putPositionOnHash(ip, h, ctx, tableType, base); - - } while (((tableType == byU16) ? 0 : (match + MAX_DISTANCE < ip)) || - (LZ4_read32(match) != LZ4_read32(ip))); - } - - /* Catch up */ - while ((ip > anchor) && (match > lowLimit) && (unlikely(ip[-1] == match[-1]))) { - ip--; - match--; - } - - { - /* Encode Literal length */ - unsigned litLength = (unsigned)(ip - anchor); - token = op++; - if (op + ((litLength + 240) / 255) + litLength > oMaxLit) { - /* Not enough space for a last match */ - op--; - goto _last_literals; - } - if (litLength >= RUN_MASK) { - unsigned len = litLength - RUN_MASK; - *token = (RUN_MASK << ML_BITS); - for (; len >= 255; len -= 255) *op++ = 255; - *op++ = (BYTE)len; - } else - *token = (BYTE)(litLength << ML_BITS); - - /* Copy Literals */ - LZ4_wildCopy(op, anchor, op + litLength); - op += litLength; - } - - _next_match: - /* Encode Offset */ - LZ4_writeLE16(op, (U16)(ip - match)); - op += 2; - - /* Encode MatchLength */ - { - size_t matchLength; - - matchLength = LZ4_count(ip + MINMATCH, match + MINMATCH, matchlimit); - - if (op + ((matchLength + 240) / 255) > oMaxMatch) { - /* Match description too long : reduce it */ - matchLength = (15 - 1) + (oMaxMatch - op) * 255; - } - // printf("offset %5i, matchLength%5i \n", (int)(ip-match), matchLength + MINMATCH); - ip += MINMATCH + matchLength; - - if (matchLength >= ML_MASK) { - *token += ML_MASK; - matchLength -= ML_MASK; - while (matchLength >= 255) { - matchLength -= 255; - *op++ = 255; - } - *op++ = (BYTE)matchLength; - } else - *token += (BYTE)(matchLength); - } - - anchor = ip; - - /* Test end of block */ - if (ip > mflimit) break; - if (op > oMaxSeq) break; - - /* Fill table */ - LZ4_putPosition(ip - 2, ctx, tableType, base); - - /* Test next position */ - match = LZ4_getPosition(ip, ctx, tableType, base); - LZ4_putPosition(ip, ctx, tableType, base); - if ((match + MAX_DISTANCE >= ip) && (LZ4_read32(match) == LZ4_read32(ip))) { - token = op++; - *token = 0; - goto _next_match; - } - - /* Prepare next loop */ - forwardH = LZ4_hashPosition(++ip, tableType); - } - -_last_literals: - /* Encode Last Literals */ - { - size_t lastRunSize = (size_t)(iend - anchor); - if (op + 1 /* token */ + ((lastRunSize + 240) / 255) /* litLength */ + lastRunSize /* literals */ > - oend) { - /* adapt lastRunSize to fill 'dst' */ - lastRunSize = (oend - op) - 1; - lastRunSize -= (lastRunSize + 240) / 255; - } - ip = anchor + lastRunSize; - - if (lastRunSize >= RUN_MASK) { - size_t accumulator = lastRunSize - RUN_MASK; - *op++ = RUN_MASK << ML_BITS; - for (; accumulator >= 255; accumulator -= 255) *op++ = 255; - *op++ = (BYTE)accumulator; - } else { - *op++ = (BYTE)(lastRunSize << ML_BITS); - } - memcpy(op, anchor, lastRunSize); - op += lastRunSize; - } - - /* End */ - *srcSizePtr = (int)(((const char *)ip) - src); - return (int)(((char *)op) - dst); -} - -static int LZ4_compress_destSize_extState(void *state, const char *src, char *dst, int *srcSizePtr, - int targetDstSize) { - LZ4_resetStream((LZ4_stream_t *)state); - - if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) /* compression success is guaranteed */ - { - return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1); - } else { - if (*srcSizePtr < LZ4_64Klimit) - return LZ4_compress_destSize_generic(state, src, dst, srcSizePtr, targetDstSize, byU16); - else - return LZ4_compress_destSize_generic(state, src, dst, srcSizePtr, targetDstSize, - LZ4_64bits() ? byU32 : byPtr); - } -} - -int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr, int targetDstSize) { -#if (HEAPMODE) - void *ctx = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ -#else - LZ4_stream_t ctxBody; - void *ctx = &ctxBody; -#endif - - int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize); - -#if (HEAPMODE) - FREEMEM(ctx); -#endif - return result; -} - -/******************************** - * Streaming functions - ********************************/ - -LZ4_stream_t *LZ4_createStream(void) { - LZ4_stream_t *lz4s = (LZ4_stream_t *)ALLOCATOR(8, LZ4_STREAMSIZE_U64); - LZ4_STATIC_ASSERT( - LZ4_STREAMSIZE >= - sizeof( - LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */ - LZ4_resetStream(lz4s); - return lz4s; -} - -void LZ4_resetStream(LZ4_stream_t *LZ4_stream) { MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t)); } - -int LZ4_freeStream(LZ4_stream_t *LZ4_stream) { - FREEMEM(LZ4_stream); - return (0); -} - -#define HASH_UNIT sizeof(size_t) -int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize) { - LZ4_stream_t_internal *dict = (LZ4_stream_t_internal *)LZ4_dict; - const BYTE *p = (const BYTE *)dictionary; - const BYTE *const dictEnd = p + dictSize; - const BYTE *base; - - if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */ - LZ4_resetStream(LZ4_dict); - - if (dictSize < (int)HASH_UNIT) { - dict->dictionary = NULL; - dict->dictSize = 0; - return 0; - } - - if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; - dict->currentOffset += 64 KB; - base = p - dict->currentOffset; - dict->dictionary = p; - dict->dictSize = (U32)(dictEnd - p); - dict->currentOffset += dict->dictSize; - - while (p <= dictEnd - HASH_UNIT) { - LZ4_putPosition(p, dict->hashTable, byU32, base); - p += 3; - } - - return dict->dictSize; -} - -static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, const BYTE *src) { - if ((LZ4_dict->currentOffset > 0x80000000) || - ((size_t)LZ4_dict->currentOffset > (size_t)src)) /* address space overflow */ - { - /* rescale hash table */ - U32 delta = LZ4_dict->currentOffset - 64 KB; - const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; - int i; - for (i = 0; i < HASH_SIZE_U32; i++) { - if (LZ4_dict->hashTable[i] < delta) - LZ4_dict->hashTable[i] = 0; - else - LZ4_dict->hashTable[i] -= delta; - } - LZ4_dict->currentOffset = 64 KB; - if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB; - LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; - } -} - -int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize, - int maxOutputSize, int acceleration) { - LZ4_stream_t_internal *streamPtr = (LZ4_stream_t_internal *)LZ4_stream; - const BYTE *const dictEnd = streamPtr->dictionary + streamPtr->dictSize; - - const BYTE *smallest = (const BYTE *)source; - if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */ - if ((streamPtr->dictSize > 0) && (smallest > dictEnd)) smallest = dictEnd; - LZ4_renormDictT(streamPtr, smallest); - if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; - - /* Check overlapping input/dictionary space */ - { - const BYTE *sourceEnd = (const BYTE *)source + inputSize; - if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) { - streamPtr->dictSize = (U32)(dictEnd - sourceEnd); - if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; - if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; - streamPtr->dictionary = dictEnd - streamPtr->dictSize; - } - } - - /* prefix mode : source data follows dictionary */ - if (dictEnd == (const BYTE *)source) { - int result; - if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) - result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, - byU32, withPrefix64k, dictSmall, acceleration); - else - result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, - byU32, withPrefix64k, noDictIssue, acceleration); - streamPtr->dictSize += (U32)inputSize; - streamPtr->currentOffset += (U32)inputSize; - return result; - } - - /* external dictionary mode */ - { - int result; - if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) - result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, - byU32, usingExtDict, dictSmall, acceleration); - else - result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, - byU32, usingExtDict, noDictIssue, acceleration); - streamPtr->dictionary = (const BYTE *)source; - streamPtr->dictSize = (U32)inputSize; - streamPtr->currentOffset += (U32)inputSize; - return result; - } -} - -/* Hidden debug function, to force external dictionary mode */ -int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source, char *dest, int inputSize) { - LZ4_stream_t_internal *streamPtr = (LZ4_stream_t_internal *)LZ4_dict; - int result; - const BYTE *const dictEnd = streamPtr->dictionary + streamPtr->dictSize; - - const BYTE *smallest = dictEnd; - if (smallest > (const BYTE *)source) smallest = (const BYTE *)source; - LZ4_renormDictT((LZ4_stream_t_internal *)LZ4_dict, smallest); - - result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, - noDictIssue, 1); - - streamPtr->dictionary = (const BYTE *)source; - streamPtr->dictSize = (U32)inputSize; - streamPtr->currentOffset += (U32)inputSize; - - return result; -} - -int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize) { - LZ4_stream_t_internal *dict = (LZ4_stream_t_internal *)LZ4_dict; - const BYTE *previousDictEnd = dict->dictionary + dict->dictSize; - - if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */ - if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize; - - memmove(safeBuffer, previousDictEnd - dictSize, dictSize); - - dict->dictionary = (const BYTE *)safeBuffer; - dict->dictSize = (U32)dictSize; - - return dictSize; -} - -/******************************* - * Decompression functions - *******************************/ -/* - * This generic decompression function cover all use cases. - * It shall be instantiated several times, using different sets of directives - * Note that it is essential this generic function is really inlined, - * in order to remove useless branches during compilation optimization. - */ -FORCE_INLINE int LZ4_decompress_generic( - const char *const source, char *const dest, int inputSize, - int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */ - - int endOnInput, /* endOnOutputSize, endOnInputSize */ - int partialDecoding, /* full, partial */ - int targetOutputSize, /* only used if partialDecoding==partial */ - int dict, /* noDict, withPrefix64k, usingExtDict */ - const BYTE *const lowPrefix, /* == dest if dict == noDict */ - const BYTE *const dictStart, /* only if dict==usingExtDict */ - const size_t dictSize /* note : = 0 if noDict */ -) { - /* Local Variables */ - const BYTE *ip = (const BYTE *)source; - const BYTE *const iend = ip + inputSize; - - BYTE *op = (BYTE *)dest; - BYTE *const oend = op + outputSize; - BYTE *cpy; - BYTE *oexit = op + targetOutputSize; - const BYTE *const lowLimit = lowPrefix - dictSize; - - const BYTE *const dictEnd = (const BYTE *)dictStart + dictSize; - const size_t dec32table[] = {4, 1, 2, 1, 4, 4, 4, 4}; - const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3}; - - const int safeDecode = (endOnInput == endOnInputSize); - const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); - - /* Special cases */ - if ((partialDecoding) && (oexit > oend - MFLIMIT)) - oexit = oend - MFLIMIT; /* targetOutputSize too high => decode everything */ - if ((endOnInput) && (unlikely(outputSize == 0))) - return ((inputSize == 1) && (*ip == 0)) ? 0 : -1; /* Empty output buffer */ - if ((!endOnInput) && (unlikely(outputSize == 0))) return (*ip == 0 ? 1 : -1); - - /* Main Loop */ - while (1) { - unsigned token; - size_t length; - const BYTE *match; - - /* get literal length */ - token = *ip++; - if ((length = (token >> ML_BITS)) == RUN_MASK) { - unsigned s; - do { - s = *ip++; - length += s; - } while (likely((endOnInput) ? ip < iend - RUN_MASK : 1) && (s == 255)); - if ((safeDecode) && unlikely((size_t)(op + length) < (size_t)(op))) - goto _output_error; /* overflow detection */ - if ((safeDecode) && unlikely((size_t)(ip + length) < (size_t)(ip))) - goto _output_error; /* overflow detection */ - } - - /* copy literals */ - cpy = op + length; - if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT)) || - (ip + length > iend - (2 + 1 + LASTLITERALS)))) || - ((!endOnInput) && (cpy > oend - COPYLENGTH))) { - if (partialDecoding) { - if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */ - if ((endOnInput) && (ip + length > iend)) - goto _output_error; /* Error : read attempt beyond end of input buffer */ - } else { - if ((!endOnInput) && (cpy != oend)) - goto _output_error; /* Error : block decoding must stop exactly there */ - if ((endOnInput) && ((ip + length != iend) || (cpy > oend))) - goto _output_error; /* Error : input must be consumed */ - } - memcpy(op, ip, length); - ip += length; - op += length; - break; /* Necessarily EOF, due to parsing restrictions */ - } - LZ4_wildCopy(op, ip, cpy); - ip += length; - op = cpy; - - /* get offset */ - match = cpy - LZ4_readLE16(ip); - ip += 2; - if ((checkOffset) && (unlikely(match < lowLimit))) - goto _output_error; /* Error : offset outside destination buffer */ - - /* get matchlength */ - length = token & ML_MASK; - if (length == ML_MASK) { - unsigned s; - do { - if ((endOnInput) && (ip > iend - LASTLITERALS)) goto _output_error; - s = *ip++; - length += s; - } while (s == 255); - if ((safeDecode) && unlikely((size_t)(op + length) < (size_t)op)) - goto _output_error; /* overflow detection */ - } - length += MINMATCH; - - /* check external dictionary */ - if ((dict == usingExtDict) && (match < lowPrefix)) { - if (unlikely(op + length > oend - LASTLITERALS)) - goto _output_error; /* doesn't respect parsing restriction */ - - if (length <= (size_t)(lowPrefix - match)) { - /* match can be copied as a single segment from external dictionary */ - match = dictEnd - (lowPrefix - match); - memmove(op, match, length); - op += length; - } else { - /* match encompass external dictionary and current segment */ - size_t copySize = (size_t)(lowPrefix - match); - memcpy(op, dictEnd - copySize, copySize); - op += copySize; - copySize = length - copySize; - if (copySize > (size_t)(op - lowPrefix)) /* overlap within current segment */ - { - BYTE *const endOfMatch = op + copySize; - const BYTE *copyFrom = lowPrefix; - while (op < endOfMatch) *op++ = *copyFrom++; - } else { - memcpy(op, lowPrefix, copySize); - op += copySize; - } - } - continue; - } - - /* copy repeated sequence */ - cpy = op + length; - if (unlikely((op - match) < 8)) { - const size_t dec64 = dec64table[op - match]; - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += dec32table[op - match]; - LZ4_copy4(op + 4, match); - op += 8; - match -= dec64; - } else { - LZ4_copy8(op, match); - op += 8; - match += 8; - } - - if (unlikely(cpy > oend - 12)) { - if (cpy > oend - LASTLITERALS) - goto _output_error; /* Error : last LASTLITERALS bytes must be literals */ - if (op < oend - 8) { - LZ4_wildCopy(op, match, oend - 8); - match += (oend - 8) - op; - op = oend - 8; - } - while (op < cpy) *op++ = *match++; - } else - LZ4_wildCopy(op, match, cpy); - op = cpy; /* correction */ - } - - /* end of decoding */ - if (endOnInput) - return (int)(((char *)op) - dest); /* Nb of output bytes decoded */ - else - return (int)(((const char *)ip) - source); /* Nb of input bytes read */ - -/* Overflow error detected */ -_output_error: - return (int)(-(((const char *)ip) - source)) - 1; -} - -int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize) { - return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, - noDict, (BYTE *)dest, NULL, 0); -} - -int LZ4_decompress_safe_partial(const char *source, char *dest, int compressedSize, int targetOutputSize, - int maxDecompressedSize) { - return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, - targetOutputSize, noDict, (BYTE *)dest, NULL, 0); -} - -int LZ4_decompress_fast(const char *source, char *dest, int originalSize) { - return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, - (BYTE *)(dest - 64 KB), NULL, 64 KB); -} - -/* streaming decompression functions */ - -typedef struct { - const BYTE *externalDict; - size_t extDictSize; - const BYTE *prefixEnd; - size_t prefixSize; -} LZ4_streamDecode_t_internal; - -/* - * If you prefer dynamic allocation methods, - * LZ4_createStreamDecode() - * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure. - */ -LZ4_streamDecode_t *LZ4_createStreamDecode(void) { - LZ4_streamDecode_t *lz4s = (LZ4_streamDecode_t *)ALLOCATOR(1, sizeof(LZ4_streamDecode_t)); - return lz4s; -} - -int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream) { - FREEMEM(LZ4_stream); - return 0; -} - -/* - * LZ4_setStreamDecode - * Use this function to instruct where to find the dictionary - * This function is not necessary if previous data is still available where it was decoded. - * Loading a size of 0 is allowed (same effect as no dictionary). - * Return : 1 if OK, 0 if error - */ -int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize) { - LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *)LZ4_streamDecode; - lz4sd->prefixSize = (size_t)dictSize; - lz4sd->prefixEnd = (const BYTE *)dictionary + dictSize; - lz4sd->externalDict = NULL; - lz4sd->extDictSize = 0; - return 1; -} - -/* -*_continue() : - These decoding functions allow decompression of multiple blocks in "streaming" mode. - Previously decoded blocks must still be available at the memory position where they were decoded. - If it's not possible, save the relevant part of decoded data into a safe buffer, - and indicate where it stands using LZ4_setStreamDecode() -*/ -int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, - int compressedSize, int maxOutputSize) { - LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *)LZ4_streamDecode; - int result; - - if (lz4sd->prefixEnd == (BYTE *)dest) { - result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, - usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, - lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize += result; - lz4sd->prefixEnd += result; - } else { - lz4sd->extDictSize = lz4sd->prefixSize; - lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, - usingExtDict, (BYTE *)dest, lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize = result; - lz4sd->prefixEnd = (BYTE *)dest + result; - } - - return result; -} - -int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, - int originalSize) { - LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *)LZ4_streamDecode; - int result; - - if (lz4sd->prefixEnd == (BYTE *)dest) { - result = LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict, - lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, - lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize += originalSize; - lz4sd->prefixEnd += originalSize; - } else { - lz4sd->extDictSize = lz4sd->prefixSize; - lz4sd->externalDict = (BYTE *)dest - lz4sd->extDictSize; - result = LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict, - (BYTE *)dest, lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize = originalSize; - lz4sd->prefixEnd = (BYTE *)dest + originalSize; - } - - return result; -} - -/* -Advanced decoding functions : -*_usingDict() : - These decoding functions work the same as "_continue" ones, - the dictionary must be explicitly provided within parameters -*/ - -FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source, char *dest, int compressedSize, - int maxOutputSize, int safe, const char *dictStart, - int dictSize) { - if (dictSize == 0) - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, - (BYTE *)dest, NULL, 0); - if (dictStart + dictSize == dest) { - if (dictSize >= (int)(64 KB - 1)) - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, - withPrefix64k, (BYTE *)dest - 64 KB, NULL, 0); - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, - (BYTE *)dest - dictSize, NULL, 0); - } - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, - (BYTE *)dest, (const BYTE *)dictStart, dictSize); -} - -int LZ4_decompress_safe_usingDict(const char *source, char *dest, int compressedSize, int maxOutputSize, - const char *dictStart, int dictSize) { - return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, - dictSize); -} - -int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, - int dictSize) { - return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize); -} - -/* debug function */ -int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, int compressedSize, int maxOutputSize, - const char *dictStart, int dictSize) { - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, - usingExtDict, (BYTE *)dest, (const BYTE *)dictStart, dictSize); -} - -/*************************************************** - * Obsolete Functions - ***************************************************/ -/* obsolete compression functions */ -int LZ4_compress_limitedOutput(const char *source, char *dest, int inputSize, int maxOutputSize) { - return LZ4_compress_default(source, dest, inputSize, maxOutputSize); -} -int LZ4_compress(const char *source, char *dest, int inputSize) { - return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); -} -int LZ4_compress_limitedOutput_withState(void *state, const char *src, char *dst, int srcSize, int dstSize) { - return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); -} -int LZ4_compress_withState(void *state, const char *src, char *dst, int srcSize) { - return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); -} -int LZ4_compress_limitedOutput_continue(LZ4_stream_t *LZ4_stream, const char *src, char *dst, int srcSize, - int maxDstSize) { - return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); -} -int LZ4_compress_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize) { - return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); -} - -/* -These function names are deprecated and should no longer be used. -They are only provided here for compatibility with older user programs. -- LZ4_uncompress is totally equivalent to LZ4_decompress_fast -- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe -*/ -int LZ4_uncompress(const char *source, char *dest, int outputSize) { - return LZ4_decompress_fast(source, dest, outputSize); -} -int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize, int maxOutputSize) { - return LZ4_decompress_safe(source, dest, isize, maxOutputSize); -} - -/* Obsolete Streaming functions */ - -int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; } - -static void LZ4_init(LZ4_stream_t_internal *lz4ds, BYTE *base) { - MEM_INIT(lz4ds, 0, LZ4_STREAMSIZE); - lz4ds->bufferStart = base; -} - -int LZ4_resetStreamState(void *state, char *inputBuffer) { - if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */ - LZ4_init((LZ4_stream_t_internal *)state, (BYTE *)inputBuffer); - return 0; -} - -void *LZ4_create(char *inputBuffer) { - void *lz4ds = ALLOCATOR(8, LZ4_STREAMSIZE_U64); - LZ4_init((LZ4_stream_t_internal *)lz4ds, (BYTE *)inputBuffer); - return lz4ds; -} - -char *LZ4_slideInputBuffer(void *LZ4_Data) { - LZ4_stream_t_internal *ctx = (LZ4_stream_t_internal *)LZ4_Data; - int dictSize = LZ4_saveDict((LZ4_stream_t *)LZ4_Data, (char *)ctx->bufferStart, 64 KB); - return (char *)(ctx->bufferStart + dictSize); -} - -/* Obsolete streaming decompression functions */ - -int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, int compressedSize, int maxOutputSize) { - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, - withPrefix64k, (BYTE *)dest - 64 KB, NULL, 64 KB); -} - -int LZ4_decompress_fast_withPrefix64k(const char *source, char *dest, int originalSize) { - return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, - (BYTE *)dest - 64 KB, NULL, 64 KB); -} - -#endif /* LZ4_COMMONDEFS_ONLY */ - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/lz4/lz4.h b/pulsar-client-cpp/lib/lz4/lz4.h deleted file mode 100644 index e5fb5a4784bb1..0000000000000 --- a/pulsar-client-cpp/lib/lz4/lz4.h +++ /dev/null @@ -1,405 +0,0 @@ -/* - LZ4 - Fast LZ compression algorithm - Header File - Copyright (C) 2011-2015, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/Cyan4973/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -#pragma once - -// Use "pulsar" namespace in order to avoid conflict with existing LZ4 symbols -// when using pulsar static library. -namespace pulsar { - -/* - * lz4.h provides block compression functions, and gives full buffer control to programmer. - * If you need to generate inter-operable compressed data (respecting LZ4 frame specification), - * and can let the library handle its own memory, please use lz4frame.h instead. - */ - -/************************************** - * Version - **************************************/ -#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ -#define LZ4_VERSION_MINOR 7 /* for new (non-breaking) interface capabilities */ -#define LZ4_VERSION_RELEASE 1 /* for tweaks, bug-fixes, or development */ -#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR * 100 * 100 + LZ4_VERSION_MINOR * 100 + LZ4_VERSION_RELEASE) - -int LZ4_versionNumber(void); - -/************************************** - * Tuning parameter - **************************************/ -/* - * LZ4_MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache - */ -#define LZ4_MEMORY_USAGE 14 - -/************************************** - * Simple Functions - **************************************/ - -int LZ4_compress_default(const char *source, char *dest, int sourceSize, int maxDestSize); - -int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize); - -/* -LZ4_compress_default() : - Compresses 'sourceSize' bytes from buffer 'source' - into already allocated 'dest' buffer of size 'maxDestSize'. - Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize). - It also runs faster, so it's a recommended setting. - If the function cannot compress 'source' into a more limited 'dest' budget, - compression stops *immediately*, and the function result is zero. - As a consequence, 'dest' content is not valid. - This function never writes outside 'dest' buffer, nor read outside 'source' buffer. - sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE - maxDestSize : full or partial size of buffer 'dest' (which must be already allocated) - return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize) - or 0 if compression fails - -LZ4_decompress_safe() : - compressedSize : is the precise full size of the compressed block. - maxDecompressedSize : is the size of destination buffer, which must be already allocated. - return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize) - If destination buffer is not large enough, decoding will stop and output an error code (<0). - If the source stream is detected malformed, the function will stop decoding and return a negative -result. - This function is protected against buffer overflow exploits, including malicious data packets. - It never writes outside output buffer, nor reads outside input buffer. -*/ - -/************************************** - * Advanced Functions - **************************************/ -#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ -#define LZ4_COMPRESSBOUND(isize) \ - ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize) / 255) + 16) - -/* -LZ4_compressBound() : - Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not -compressible) - This function is primarily useful for memory allocation purposes (destination buffer size). - Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for -example). - Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize) - inputSize : max supported value is LZ4_MAX_INPUT_SIZE - return : maximum output size in a "worst case" scenario - or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE) -*/ -int LZ4_compressBound(int inputSize); - -/* -LZ4_compress_fast() : - Same as LZ4_compress_default(), but allows to select an "acceleration" factor. - The larger the acceleration value, the faster the algorithm, but also the lesser the compression. - It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. - An acceleration value of "1" is the same as regular LZ4_compress_default() - Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1. -*/ -int LZ4_compress_fast(const char *source, char *dest, int sourceSize, int maxDestSize, int acceleration); - -/* -LZ4_compress_fast_extState() : - Same compression function, just using an externally allocated memory space to store compression state. - Use LZ4_sizeofState() to know how much memory must be allocated, - and allocate it on 8-bytes boundaries (using malloc() typically). - Then, provide it as 'void* state' to compression function. -*/ -int LZ4_sizeofState(void); - -int LZ4_compress_fast_extState(void *state, const char *source, char *dest, int inputSize, int maxDestSize, - int acceleration); - -/* -LZ4_compress_destSize() : - Reverse the logic, by compressing as much data as possible from 'source' buffer - into already allocated buffer 'dest' of size 'targetDestSize'. - This function either compresses the entire 'source' content into 'dest' if it's large enough, - or fill 'dest' buffer completely with as much data as possible from 'source'. - *sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'. - New value is necessarily <= old value. - return : Nb bytes written into 'dest' (necessarily <= targetDestSize) - or 0 if compression fails -*/ -int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr, int targetDestSize); - -/* -LZ4_decompress_fast() : - originalSize : is the original and therefore uncompressed size - return : the number of bytes read from the source buffer (in other words, the compressed size) - If the source stream is detected malformed, the function will stop decoding and return a negative -result. - Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes. - note : This function fully respect memory boundaries for properly formed compressed data. - It is a bit faster than LZ4_decompress_safe(). - However, it does not provide any protection against intentionally modified data stream (malicious -input). - Use this function in trusted environment only (data to decode comes from a trusted source). -*/ -int LZ4_decompress_fast(const char *source, char *dest, int originalSize); - -/* -LZ4_decompress_safe_partial() : - This function decompress a compressed block of size 'compressedSize' at position 'source' - into destination buffer 'dest' of size 'maxDecompressedSize'. - The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached, - reducing decompression time. - return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize) - Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller. - Always control how many bytes were decoded. - If the source stream is detected malformed, the function will stop decoding and return a negative -result. - This function never writes outside of output buffer, and never reads outside of input buffer. It -is therefore protected against malicious data packets -*/ -int LZ4_decompress_safe_partial(const char *source, char *dest, int compressedSize, int targetOutputSize, - int maxDecompressedSize); - -/*********************************************** - * Streaming Compression Functions - ***********************************************/ -#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE - 3)) + 4) -#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(long long)) -/* - * LZ4_stream_t - * information structure to track an LZ4 stream. - * important : init this structure content before first use ! - * note : only allocated directly the structure if you are statically linking LZ4 - * If you are using liblz4 as a DLL, please use below construction methods instead. - */ -// clang-format off - typedef struct { - long long table[LZ4_STREAMSIZE_U64]; - } LZ4_stream_t; -// clang-format on - -/* - * LZ4_resetStream - * Use this function to init an allocated LZ4_stream_t structure - */ -void LZ4_resetStream(LZ4_stream_t *streamPtr); - -/* - * LZ4_createStream will allocate and initialize an LZ4_stream_t structure - * LZ4_freeStream releases its memory. - * In the context of a DLL (liblz4), please use these methods rather than the static struct. - * They are more future proof, in case of a change of LZ4_stream_t size. - */ -LZ4_stream_t *LZ4_createStream(void); - -int LZ4_freeStream(LZ4_stream_t *streamPtr); - -/* - * LZ4_loadDict - * Use this function to load a static dictionary into LZ4_stream. - * Any previous data will be forgotten, only 'dictionary' will remain in memory. - * Loading a size of 0 is allowed. - * Return : dictionary size, in bytes (necessarily <= 64 KB) - */ -int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary, int dictSize); - -/* - * LZ4_compress_fast_continue - * Compress buffer content 'src', using data from previously compressed blocks as dictionary to improve - * compression ratio. - * Important : Previous data blocks are assumed to still be present and unmodified ! - * 'dst' buffer must be already allocated. - * If maxDstSize >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. - * If not, and if compressed data cannot fit into 'dst' buffer size, compression stops, and function returns a - * zero. - */ -int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src, char *dst, int srcSize, - int maxDstSize, int acceleration); - -/* - * LZ4_saveDict - * If previously compressed data block is not guaranteed to remain available at its memory location - * save it into a safer place (char* safeBuffer) - * Note : you don't need to call LZ4_loadDict() afterwards, - * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue() - * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error - */ -int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize); - -/************************************************ - * Streaming Decompression Functions - ************************************************/ - -#define LZ4_STREAMDECODESIZE_U64 4 -#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long)) -// clang-format off - typedef struct { - unsigned long long table[LZ4_STREAMDECODESIZE_U64]; - } LZ4_streamDecode_t; -// clang-format on - -/* - * LZ4_streamDecode_t - * information structure to track an LZ4 stream. - * init this structure content using LZ4_setStreamDecode or memset() before first use ! - * - * In the context of a DLL (liblz4) please prefer usage of construction methods below. - * They are more future proof, in case of a change of LZ4_streamDecode_t size in the future. - * LZ4_createStreamDecode will allocate and initialize an LZ4_streamDecode_t structure - * LZ4_freeStreamDecode releases its memory. - */ -LZ4_streamDecode_t *LZ4_createStreamDecode(void); - -int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream); - -/* - * LZ4_setStreamDecode - * Use this function to instruct where to find the dictionary. - * Setting a size of 0 is allowed (same effect as reset). - * Return : 1 if OK, 0 if error - */ -int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize); - -/* -*_continue() : - These decoding functions allow decompression of multiple blocks in "streaming" mode. - Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 -KB) - In the case of a ring buffers, decoding buffer must be either : - - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions) - In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 -KB). - - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. - maxBlockSize is implementation dependent. It's the maximum size you intend to compress into a single -block. - In which case, encoding and decoding buffers do not need to be synchronized, - and encoding ring buffer can have any size, including small ones ( < 64 KB). - - _At least_ 64 KB + 8 bytes + maxBlockSize. - In which case, encoding and decoding buffers do not need to be synchronized, - and encoding ring buffer can have any size, including larger than decoding buffer. - Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer, - and indicate where it is saved using LZ4_setStreamDecode() -*/ -int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, - int compressedSize, int maxDecompressedSize); - -int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, - int originalSize); - -/* -Advanced decoding functions : -*_usingDict() : - These decoding functions work the same as - a combination of LZ4_setStreamDecode() followed by LZ4_decompress_x_continue() - They are stand-alone. They don't need nor update an LZ4_streamDecode_t structure. -*/ -int LZ4_decompress_safe_usingDict(const char *source, char *dest, int compressedSize, int maxDecompressedSize, - const char *dictStart, int dictSize); - -int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, - int dictSize); - -/************************************** - * Obsolete Functions - **************************************/ -/* Deprecate Warnings */ -/* Should these warnings messages be a problem, - it is generally possible to disable them, - with -Wno-deprecated-declarations for gcc - or _CRT_SECURE_NO_WARNINGS in Visual for example. - You can also define LZ4_DEPRECATE_WARNING_DEFBLOCK. */ -#ifndef LZ4_DEPRECATE_WARNING_DEFBLOCK -#define LZ4_DEPRECATE_WARNING_DEFBLOCK -#define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -#if (LZ4_GCC_VERSION >= 405) || defined(__clang__) -#define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) -#elif (LZ4_GCC_VERSION >= 301) -#define LZ4_DEPRECATED(message) __attribute__((deprecated)) -#elif defined(_MSC_VER) -#define LZ4_DEPRECATED(message) __declspec(deprecated(message)) -#else -#pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler") -#define LZ4_DEPRECATED(message) -#endif -#endif /* LZ4_DEPRECATE_WARNING_DEFBLOCK */ - -/* Obsolete compression functions */ -/* These functions are planned to start generate warnings by r131 approximately */ -int LZ4_compress(const char *source, char *dest, int sourceSize); - -int LZ4_compress_limitedOutput(const char *source, char *dest, int sourceSize, int maxOutputSize); - -int LZ4_compress_withState(void *state, const char *source, char *dest, int inputSize); - -int LZ4_compress_limitedOutput_withState(void *state, const char *source, char *dest, int inputSize, - int maxOutputSize); - -int LZ4_compress_continue(LZ4_stream_t *LZ4_streamPtr, const char *source, char *dest, int inputSize); - -int LZ4_compress_limitedOutput_continue(LZ4_stream_t *LZ4_streamPtr, const char *source, char *dest, - int inputSize, int maxOutputSize); - -/* Obsolete decompression functions */ -/* These function names are completely deprecated and must no longer be used. - They are only provided here for compatibility with older programs. - - LZ4_uncompress is the same as LZ4_decompress_fast - - LZ4_uncompress_unknownOutputSize is the same as LZ4_decompress_safe - These function prototypes are now disabled; uncomment them only if you really need them. - It is highly recommended to stop using these prototypes and migrate to maintained ones */ -/* int LZ4_uncompress (const char* source, char* dest, int outputSize); */ -/* int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); */ - -/* Obsolete streaming functions; use new streaming interface whenever possible */ -LZ4_DEPRECATED("use LZ4_createStream() instead") - -void *LZ4_create(char *inputBuffer); - -LZ4_DEPRECATED("use LZ4_createStream() instead") - -int LZ4_sizeofStreamState(void); - -LZ4_DEPRECATED("use LZ4_resetStream() instead") - -int LZ4_resetStreamState(void *state, char *inputBuffer); - -LZ4_DEPRECATED("use LZ4_saveDict() instead") - -char *LZ4_slideInputBuffer(void *state); - -/* Obsolete streaming decoding functions */ -LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") - -int LZ4_decompress_safe_withPrefix64k(const char *src, char *dst, int compressedSize, int maxDstSize); - -LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") - -int LZ4_decompress_fast_withPrefix64k(const char *src, char *dst, int originalSize); -} // namespace pulsar \ No newline at end of file diff --git a/pulsar-client-cpp/lib/stats/ConsumerStatsBase.h b/pulsar-client-cpp/lib/stats/ConsumerStatsBase.h deleted file mode 100644 index de7e07bf37f02..0000000000000 --- a/pulsar-client-cpp/lib/stats/ConsumerStatsBase.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef PULSAR_CONSUMER_STATS_BASE_HEADER -#define PULSAR_CONSUMER_STATS_BASE_HEADER -#include -#include -#include -#include - -namespace pulsar { -class ConsumerStatsBase { - public: - virtual void receivedMessage(Message&, Result) = 0; - virtual void messageAcknowledged(Result, proto::CommandAck_AckType) = 0; - virtual ~ConsumerStatsBase() {} -}; - -typedef std::shared_ptr ConsumerStatsBasePtr; -} // namespace pulsar - -#endif // PULSAR_CONSUMER_STATS_BASE_HEADER diff --git a/pulsar-client-cpp/lib/stats/ConsumerStatsDisabled.h b/pulsar-client-cpp/lib/stats/ConsumerStatsDisabled.h deleted file mode 100644 index e2233d55ccc43..0000000000000 --- a/pulsar-client-cpp/lib/stats/ConsumerStatsDisabled.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef PULSAR_CONSUMER_STATS_DISABLED_H_ -#define PULSAR_CONSUMER_STATS_DISABLED_H_ - -#include - -namespace pulsar { - -class ConsumerStatsDisabled : public ConsumerStatsBase { - public: - virtual void receivedMessage(Message&, Result) {} - virtual void messageAcknowledged(Result, proto::CommandAck_AckType) {} -}; - -} /* namespace pulsar */ - -#endif /* PULSAR_CONSUMER_STATS_DISABLED_H_ */ diff --git a/pulsar-client-cpp/lib/stats/ConsumerStatsImpl.cc b/pulsar-client-cpp/lib/stats/ConsumerStatsImpl.cc deleted file mode 100644 index 38534a60560d9..0000000000000 --- a/pulsar-client-cpp/lib/stats/ConsumerStatsImpl.cc +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include - -#include - -namespace pulsar { -DECLARE_LOG_OBJECT(); - -ConsumerStatsImpl::ConsumerStatsImpl(std::string consumerStr, ExecutorServicePtr executor, - unsigned int statsIntervalInSeconds) - : consumerStr_(consumerStr), - executor_(executor), - timer_(executor_->createDeadlineTimer()), - statsIntervalInSeconds_(statsIntervalInSeconds) { - timer_->expires_from_now(boost::posix_time::seconds(statsIntervalInSeconds_)); - timer_->async_wait(std::bind(&pulsar::ConsumerStatsImpl::flushAndReset, this, std::placeholders::_1)); -} - -ConsumerStatsImpl::ConsumerStatsImpl(const ConsumerStatsImpl& stats) - : consumerStr_(stats.consumerStr_), - numBytesRecieved_(stats.numBytesRecieved_), - receivedMsgMap_(stats.receivedMsgMap_), - ackedMsgMap_(stats.ackedMsgMap_), - totalNumBytesRecieved_(stats.totalNumBytesRecieved_), - totalReceivedMsgMap_(stats.totalReceivedMsgMap_), - totalAckedMsgMap_(stats.totalAckedMsgMap_), - statsIntervalInSeconds_(stats.statsIntervalInSeconds_) {} - -void ConsumerStatsImpl::flushAndReset(const boost::system::error_code& ec) { - if (ec) { - LOG_DEBUG("Ignoring timer cancelled event, code[" << ec << "]"); - return; - } - - Lock lock(mutex_); - ConsumerStatsImpl tmp = *this; - numBytesRecieved_ = 0; - receivedMsgMap_.clear(); - ackedMsgMap_.clear(); - lock.unlock(); - - timer_->expires_from_now(boost::posix_time::seconds(statsIntervalInSeconds_)); - timer_->async_wait(std::bind(&pulsar::ConsumerStatsImpl::flushAndReset, this, std::placeholders::_1)); - LOG_INFO(tmp); -} - -ConsumerStatsImpl::~ConsumerStatsImpl() { - Lock lock(mutex_); - if (timer_) { - timer_->cancel(); - } -} - -void ConsumerStatsImpl::receivedMessage(Message& msg, Result res) { - Lock lock(mutex_); - if (res == ResultOk) { - totalNumBytesRecieved_ += msg.getLength(); - numBytesRecieved_ += msg.getLength(); - } - receivedMsgMap_[res] += 1; - totalReceivedMsgMap_[res] += 1; -} - -void ConsumerStatsImpl::messageAcknowledged(Result res, proto::CommandAck_AckType ackType) { - Lock lock(mutex_); - ackedMsgMap_[std::make_pair(res, ackType)] += 1; - totalAckedMsgMap_[std::make_pair(res, ackType)] += 1; -} - -std::ostream& operator<<(std::ostream& os, - const std::map, unsigned long>& m) { - os << "{"; - for (std::map, unsigned long>::const_iterator it = m.begin(); - it != m.end(); it++) { - os << "[Key: {" - << "Result: " << strResult((it->first).first) << ", ackType: " << (it->first).second - << "}, Value: " << it->second << "], "; - } - os << "}"; - return os; -} - -std::ostream& operator<<(std::ostream& os, const ConsumerStatsImpl& obj) { - os << "Consumer " << obj.consumerStr_ << ", ConsumerStatsImpl (" - << "numBytesRecieved_ = " << obj.numBytesRecieved_ - << ", totalNumBytesRecieved_ = " << obj.totalNumBytesRecieved_ - << ", receivedMsgMap_ = " << obj.receivedMsgMap_ << ", ackedMsgMap_ = " << obj.ackedMsgMap_ - << ", totalReceivedMsgMap_ = " << obj.totalReceivedMsgMap_ - << ", totalAckedMsgMap_ = " << obj.totalAckedMsgMap_ << ")"; - return os; -} -} /* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/stats/ConsumerStatsImpl.h b/pulsar-client-cpp/lib/stats/ConsumerStatsImpl.h deleted file mode 100644 index 5607ccebb3fda..0000000000000 --- a/pulsar-client-cpp/lib/stats/ConsumerStatsImpl.h +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef PULSAR_CONSUMER_STATS_IMPL_H_ -#define PULSAR_CONSUMER_STATS_IMPL_H_ - -#include -#include -#include -#include -namespace pulsar { - -class ConsumerStatsImpl : public ConsumerStatsBase { - private: - std::string consumerStr_; - - unsigned long numBytesRecieved_ = 0; - std::map receivedMsgMap_; - std::map, unsigned long> ackedMsgMap_; - - unsigned long totalNumBytesRecieved_ = 0; - std::map totalReceivedMsgMap_; - std::map, unsigned long> totalAckedMsgMap_; - - ExecutorServicePtr executor_; - DeadlineTimerPtr timer_; - std::mutex mutex_; - unsigned int statsIntervalInSeconds_; - - friend std::ostream& operator<<(std::ostream&, const ConsumerStatsImpl&); - friend std::ostream& operator<<(std::ostream&, const std::map&); - friend class PulsarFriend; - - public: - ConsumerStatsImpl(std::string, ExecutorServicePtr, unsigned int); - ConsumerStatsImpl(const ConsumerStatsImpl& stats); - void flushAndReset(const boost::system::error_code&); - virtual void receivedMessage(Message&, Result); - virtual void messageAcknowledged(Result, proto::CommandAck_AckType); - virtual ~ConsumerStatsImpl(); - - const inline std::map, unsigned long>& getAckedMsgMap() - const { - return ackedMsgMap_; - } - - inline unsigned long getNumBytesRecieved() const { return numBytesRecieved_; } - - const inline std::map& getReceivedMsgMap() const { return receivedMsgMap_; } - - inline const std::map, unsigned long>& getTotalAckedMsgMap() - const { - return totalAckedMsgMap_; - } - - inline unsigned long getTotalNumBytesRecieved() const { return totalNumBytesRecieved_; } - - const inline std::map& getTotalReceivedMsgMap() const { - return totalReceivedMsgMap_; - } -}; -typedef std::shared_ptr ConsumerStatsImplPtr; -} /* namespace pulsar */ - -#endif /* PULSAR_CONSUMER_STATS_IMPL_H_ */ diff --git a/pulsar-client-cpp/lib/stats/ProducerStatsBase.h b/pulsar-client-cpp/lib/stats/ProducerStatsBase.h deleted file mode 100644 index 0ae16d1769c7c..0000000000000 --- a/pulsar-client-cpp/lib/stats/ProducerStatsBase.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef PULSAR_PRODUCER_STATS_BASE_HEADER -#define PULSAR_PRODUCER_STATS_BASE_HEADER -#include -#include -#include - -namespace pulsar { -class ProducerStatsBase { - public: - virtual void messageSent(const Message& msg) = 0; - virtual void messageReceived(Result, const boost::posix_time::ptime&) = 0; - virtual ~ProducerStatsBase(){}; -}; - -typedef std::shared_ptr ProducerStatsBasePtr; -} // namespace pulsar - -#endif // PULSAR_PRODUCER_STATS_BASE_HEADER diff --git a/pulsar-client-cpp/lib/stats/ProducerStatsDisabled.h b/pulsar-client-cpp/lib/stats/ProducerStatsDisabled.h deleted file mode 100644 index 6568c07487719..0000000000000 --- a/pulsar-client-cpp/lib/stats/ProducerStatsDisabled.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef PULSAR_PRODUCER_STATS_DISABLED_HEADER -#define PULSAR_PRODUCER_STATS_DISABLED_HEADER -#include - -namespace pulsar { -class ProducerStatsDisabled : public ProducerStatsBase { - public: - virtual void messageSent(const Message& msg){}; - virtual void messageReceived(Result, const boost::posix_time::ptime&){}; -}; -} // namespace pulsar -#endif // PULSAR_PRODUCER_STATS_DISABLED_HEADER diff --git a/pulsar-client-cpp/lib/stats/ProducerStatsImpl.cc b/pulsar-client-cpp/lib/stats/ProducerStatsImpl.cc deleted file mode 100644 index e6f0221c85803..0000000000000 --- a/pulsar-client-cpp/lib/stats/ProducerStatsImpl.cc +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include - -#include - -namespace pulsar { -DECLARE_LOG_OBJECT(); - -static const std::array probs = {{0.5, 0.9, 0.99, 0.999}}; - -std::string ProducerStatsImpl::latencyToString(const LatencyAccumulator& obj) { - boost::accumulators::detail::extractor_result< - LatencyAccumulator, boost::accumulators::tag::extended_p_square>::type latencies = - boost::accumulators::extended_p_square(obj); - std::stringstream os; - os << "Latencies [ 50pct: " << latencies[0] / 1e3 << "ms" - << ", 90pct: " << latencies[1] / 1e3 << "ms" - << ", 99pct: " << latencies[2] / 1e3 << "ms" - << ", 99.9pct: " << latencies[3] / 1e3 << "ms" - << "]"; - return os.str(); -} - -ProducerStatsImpl::ProducerStatsImpl(std::string producerStr, ExecutorServicePtr executor, - unsigned int statsIntervalInSeconds) - : producerStr_(producerStr), - latencyAccumulator_(boost::accumulators::tag::extended_p_square::probabilities = probs), - totalLatencyAccumulator_(boost::accumulators::tag::extended_p_square::probabilities = probs), - executor_(executor), - timer_(executor->createDeadlineTimer()), - statsIntervalInSeconds_(statsIntervalInSeconds) { - timer_->expires_from_now(boost::posix_time::seconds(statsIntervalInSeconds_)); - timer_->async_wait(std::bind(&pulsar::ProducerStatsImpl::flushAndReset, this, std::placeholders::_1)); -} - -ProducerStatsImpl::ProducerStatsImpl(const ProducerStatsImpl& stats) - : producerStr_(stats.producerStr_), - numMsgsSent_(stats.numMsgsSent_), - numBytesSent_(stats.numBytesSent_), - sendMap_(stats.sendMap_), - latencyAccumulator_(stats.latencyAccumulator_), - totalMsgsSent_(stats.totalMsgsSent_), - totalBytesSent_(stats.totalBytesSent_), - totalSendMap_(stats.totalSendMap_), - totalLatencyAccumulator_(stats.totalLatencyAccumulator_), - statsIntervalInSeconds_(stats.statsIntervalInSeconds_) {} - -void ProducerStatsImpl::flushAndReset(const boost::system::error_code& ec) { - if (ec) { - LOG_DEBUG("Ignoring timer cancelled event, code[" << ec << "]"); - return; - } - - Lock lock(mutex_); - ProducerStatsImpl tmp = *this; - numMsgsSent_ = 0; - numBytesSent_ = 0; - sendMap_.clear(); - latencyAccumulator_ = - LatencyAccumulator(boost::accumulators::tag::extended_p_square::probabilities = probs); - lock.unlock(); - - timer_->expires_from_now(boost::posix_time::seconds(statsIntervalInSeconds_)); - timer_->async_wait(std::bind(&pulsar::ProducerStatsImpl::flushAndReset, this, std::placeholders::_1)); - LOG_INFO(tmp); -} - -void ProducerStatsImpl::messageSent(const Message& msg) { - Lock lock(mutex_); - numMsgsSent_++; - totalMsgsSent_++; - numBytesSent_ += msg.getLength(); - totalBytesSent_ += msg.getLength(); -} - -void ProducerStatsImpl::messageReceived(Result res, const boost::posix_time::ptime& publishTime) { - boost::posix_time::ptime currentTime = boost::posix_time::microsec_clock::universal_time(); - double diffInMicros = (currentTime - publishTime).total_microseconds(); - Lock lock(mutex_); - totalLatencyAccumulator_(diffInMicros); - latencyAccumulator_(diffInMicros); - sendMap_[res] += 1; // Value will automatically be initialized to 0 in the constructor - totalSendMap_[res] += 1; // Value will automatically be initialized to 0 in the constructor -} - -ProducerStatsImpl::~ProducerStatsImpl() { - Lock lock(mutex_); - if (timer_) { - timer_->cancel(); - } -} - -std::ostream& operator<<(std::ostream& os, const ProducerStatsImpl& obj) { - os << "Producer " << obj.producerStr_ << ", ProducerStatsImpl (" - << "numMsgsSent_ = " << obj.numMsgsSent_ << ", numBytesSent_ = " << obj.numBytesSent_ - << ", sendMap_ = " << obj.sendMap_ - << ", latencyAccumulator_ = " << ProducerStatsImpl::latencyToString(obj.latencyAccumulator_) - << ", totalMsgsSent_ = " << obj.totalMsgsSent_ << ", totalBytesSent_ = " << obj.totalBytesSent_ - << ", totalAcksReceived_ = " - << ", totalSendMap_ = " << obj.totalSendMap_ - << ", totalLatencyAccumulator_ = " << ProducerStatsImpl::latencyToString(obj.totalLatencyAccumulator_) - << ")"; - return os; -} -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/stats/ProducerStatsImpl.h b/pulsar-client-cpp/lib/stats/ProducerStatsImpl.h deleted file mode 100644 index 27ffacc81a519..0000000000000 --- a/pulsar-client-cpp/lib/stats/ProducerStatsImpl.h +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef PULSAR_PRODUCER_STATS_IMPL_HEADER -#define PULSAR_PRODUCER_STATS_IMPL_HEADER - -#include -#include -#include - -#if BOOST_VERSION >= 106400 -#include -#endif -#include - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -namespace pulsar { -typedef boost::accumulators::accumulator_set< - double, - boost::accumulators::stats > - LatencyAccumulator; - -class ProducerStatsImpl : public std::enable_shared_from_this, public ProducerStatsBase { - private: - std::string producerStr_; - - unsigned long numMsgsSent_ = 0; - unsigned long numBytesSent_ = 0; - std::map sendMap_; - LatencyAccumulator latencyAccumulator_; - - unsigned long totalMsgsSent_ = 0; - unsigned long totalBytesSent_ = 0; - std::map totalSendMap_; - LatencyAccumulator totalLatencyAccumulator_; - - ExecutorServicePtr executor_; - DeadlineTimerPtr timer_; - std::mutex mutex_; - unsigned int statsIntervalInSeconds_; - - friend std::ostream& operator<<(std::ostream&, const ProducerStatsImpl&); - friend std::ostream& operator<<(std::ostream&, const std::map&); - friend class PulsarFriend; - - static std::string latencyToString(const LatencyAccumulator&); - - public: - ProducerStatsImpl(std::string, ExecutorServicePtr, unsigned int); - - ProducerStatsImpl(const ProducerStatsImpl& stats); - - void flushAndReset(const boost::system::error_code&); - - void messageSent(const Message&); - - void messageReceived(Result, const boost::posix_time::ptime&); - - ~ProducerStatsImpl(); - - inline unsigned long getNumMsgsSent() { return numMsgsSent_; } - - inline unsigned long getNumBytesSent() { return numBytesSent_; } - - inline std::map getSendMap() { return sendMap_; } - - inline unsigned long getTotalMsgsSent() { return totalMsgsSent_; } - - inline unsigned long getTotalBytesSent() { return totalBytesSent_; } - - inline std::map getTotalSendMap() { return totalSendMap_; } - - inline LatencyAccumulator getLatencyAccumulator() { return latencyAccumulator_; } - - inline LatencyAccumulator getTotalLatencyAccumulator() { return totalLatencyAccumulator_; } -}; -typedef std::shared_ptr ProducerStatsImplPtr; -} // namespace pulsar - -#endif // PULSAR_PRODUCER_STATS_IMPL_HEADER diff --git a/pulsar-client-cpp/log4cxx.conf b/pulsar-client-cpp/log4cxx.conf deleted file mode 100644 index 9d01a067641c4..0000000000000 --- a/pulsar-client-cpp/log4cxx.conf +++ /dev/null @@ -1,32 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Set root logger level to INFO and its only appender to A1. -log4j.rootLogger=INFO, A1 - -# A1 is set to be a ConsoleAppender. -log4j.appender.A1=org.apache.log4j.ConsoleAppender - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yy-MM-dd HH:mm:ss.SSS} %X{pname}:%X{pid} %-5p %l- %m%n -log4j.appender.A1.serverFileAppender=org.apache.log4j.RollingFileAppender - -# Tweak the timestamp format so that it sorts easier -log4j.appender.A1.serverFileAppender.fileName=/tmp/pulsar_client_cpp.log diff --git a/pulsar-client-cpp/perf/CMakeLists.txt b/pulsar-client-cpp/perf/CMakeLists.txt deleted file mode 100644 index 586f9b0f169be..0000000000000 --- a/pulsar-client-cpp/perf/CMakeLists.txt +++ /dev/null @@ -1,37 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Test tools -add_definitions(-D_GLIBCXX_USE_NANOSLEEP) - -set(PERF_PRODUCER_SOURCES - PerfProducer.cc -) - -set(PERF_CONSUMER_SOURCES - PerfConsumer.cc -) - -add_executable(perfProducer ${PERF_PRODUCER_SOURCES}) -add_executable(perfConsumer ${PERF_CONSUMER_SOURCES}) - -set(TOOL_LIBS ${CLIENT_LIBS} ${Boost_PROGRAM_OPTIONS_LIBRARY} ${Boost_THREAD_LIBRARY}) - -target_link_libraries(perfProducer pulsarShared ${TOOL_LIBS}) -target_link_libraries(perfConsumer pulsarShared ${TOOL_LIBS}) diff --git a/pulsar-client-cpp/perf/PerfConsumer.cc b/pulsar-client-cpp/perf/PerfConsumer.cc deleted file mode 100644 index 6717fdeb8270d..0000000000000 --- a/pulsar-client-cpp/perf/PerfConsumer.cc +++ /dev/null @@ -1,354 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -DECLARE_LOG_OBJECT() - -#include -#include -#include -#include -#include -#include - -using namespace std::chrono; - -#include -#include -#include -#include -#include -#include -namespace po = boost::program_options; -using namespace boost::accumulators; - -#include - -#include -#include -using namespace pulsar; - -static int64_t currentTimeMillis() { - using namespace boost::posix_time; - using boost::posix_time::milliseconds; - using boost::posix_time::seconds; - static ptime time_t_epoch(boost::gregorian::date(1970, 1, 1)); - - time_duration diff = microsec_clock::universal_time() - time_t_epoch; - return diff.total_milliseconds(); -} - -struct Arguments { - std::string authParams; - std::string authPlugin; - bool isUseTls; - bool isTlsAllowInsecureConnection; - std::string tlsTrustCertsFilePath; - std::string topic; - int numTopics; - int numConsumers; - std::string subscriberName; - int waitTimeMs; - std::string serviceURL; - int receiverQueueSize; - int ioThreads; - int listenerThreads; - bool poolConnections; - std::string encKeyName; - std::string encKeyValueFile; -}; - -namespace pulsar { -class PulsarFriend { - public: - static Client getClient(const std::string& url, const ClientConfiguration conf, bool poolConnections) { - return Client(url, conf, poolConnections); - } -}; -} // namespace pulsar - -#if __GNUC__ == 4 && __GNUC_MINOR__ == 4 -// Used for gcc-4.4.7 with boost-1.41 -#include -#else -#include -#endif - -class EncKeyReader : public CryptoKeyReader { - private: - std::string privKeyContents; - - void readFile(std::string fileName, std::string& fileContents) const { - std::ifstream ifs(fileName); - std::stringstream fileStream; - fileStream << ifs.rdbuf(); - fileContents = fileStream.str(); - } - - public: - EncKeyReader(std::string keyFile) { - if (keyFile.empty()) { - return; - } - readFile(keyFile, privKeyContents); - } - - Result getPublicKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const { - return ResultInvalidConfiguration; - } - - Result getPrivateKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const { - encKeyInfo.setKey(privKeyContents); - return ResultOk; - } -}; - -// Counters -std::atomic messagesReceived; -std::atomic bytesReceived; - -typedef std::chrono::high_resolution_clock Clock; - -void handleAckComplete(Result) {} - -std::mutex mutex; -typedef std::unique_lock Lock; -typedef accumulator_set > LatencyAccumulator; -LatencyAccumulator e2eLatencyAccumulator(quantile_probability = 0.99); - -void messageListener(Consumer consumer, const Message& msg) { - ++messagesReceived; - bytesReceived += msg.getLength(); - - int64_t e2eLatencyMsec = currentTimeMillis() - msg.getPublishTimestamp(); - Lock lock(mutex); - e2eLatencyAccumulator(e2eLatencyMsec); - lock.unlock(); - - consumer.acknowledgeAsync(msg, handleAckComplete); -} - -std::vector consumers; - -void handleSubscribe(Result result, Consumer consumer, Latch latch) { - if (result != ResultOk) { - LOG_ERROR("Error creating consumer: " << result); - exit(-1); - } - - Lock lock(mutex); - consumers.push_back(consumer); - - latch.countdown(); -} - -void startPerfConsumer(const Arguments& args) { - ClientConfiguration conf; - - conf.setUseTls(args.isUseTls); - conf.setTlsAllowInsecureConnection(args.isTlsAllowInsecureConnection); - if (!args.tlsTrustCertsFilePath.empty()) { - std::string tlsTrustCertsFilePath(args.tlsTrustCertsFilePath); - conf.setTlsTrustCertsFilePath(tlsTrustCertsFilePath); - } - conf.setIOThreads(args.ioThreads); - conf.setMessageListenerThreads(args.listenerThreads); - if (!args.authPlugin.empty()) { - AuthenticationPtr auth = AuthFactory::create(args.authPlugin, args.authParams); - conf.setAuth(auth); - } - - Client client(pulsar::PulsarFriend::getClient(args.serviceURL, conf, args.poolConnections)); - - ConsumerConfiguration consumerConf; - consumerConf.setMessageListener(messageListener); - consumerConf.setReceiverQueueSize(args.receiverQueueSize); - std::shared_ptr keyReader = std::make_shared(args.encKeyValueFile); - if (!args.encKeyName.empty()) { - consumerConf.setCryptoKeyReader(keyReader); - } - - Latch latch(args.numTopics * args.numConsumers); - - for (int i = 0; i < args.numTopics; i++) { - std::string topic = (args.numTopics == 1) ? args.topic : args.topic + "-" + std::to_string(i); - LOG_INFO("Adding " << args.numConsumers << " consumers on topic " << topic); - - for (int j = 0; j < args.numConsumers; j++) { - std::string subscriberName; - if (args.numConsumers > 1) { - subscriberName = args.subscriberName + "-" + std::to_string(j); - } else { - subscriberName = args.subscriberName; - } - - client.subscribeAsync( - topic, subscriberName, consumerConf, - std::bind(handleSubscribe, std::placeholders::_1, std::placeholders::_2, latch)); - } - } - - Clock::time_point oldTime = Clock::now(); - - latch.wait(); - LOG_INFO("Start receiving from " << args.numConsumers << " consumers on " << args.numTopics << " topics"); - - while (true) { - std::this_thread::sleep_for(seconds(10)); - - Clock::time_point now = Clock::now(); - double elapsed = duration_cast(now - oldTime).count() / 1e3; - - double rate = messagesReceived.exchange(0) / elapsed; - double throughput = bytesReceived.exchange(0) / elapsed / 1024 / 1024 * 8; - - Lock lock(mutex); - int64_t e2eLatencyAvgMs = rate > 0.0 ? mean(e2eLatencyAccumulator) : 0; - int64_t e2eLatency99pctMs = p_square_quantile(e2eLatencyAccumulator); - e2eLatencyAccumulator = LatencyAccumulator(quantile_probability = 0.99); - lock.unlock(); - - LOG_INFO("Throughput received: " << rate << " msg/s --- " << throughput << " Mbit/s ---" // - << " End-To-End latency: avg: " << e2eLatencyAvgMs - << " ms -- 99pct: " << e2eLatency99pctMs << " ms"); - - oldTime = now; - } -} - -int main(int argc, char** argv) { - // First try to read default values from config file if present - const std::string confFile = "conf/client.conf"; - std::string defaultServiceUrl; - - std::ifstream file(confFile.c_str()); - if (file) { - po::variables_map vm; - po::options_description confFileDesc; - confFileDesc.add_options() // - ("serviceURL", po::value()->default_value("pulsar://localhost:6650")); - - po::store(po::parse_config_file(file, confFileDesc, true), vm); - po::notify(vm); - - defaultServiceUrl = vm["serviceURL"].as(); - } - - Arguments args; - - // Declare the supported options. - po::positional_options_description positional; - positional.add("topic", 1); - - po::options_description desc("Allowed options"); - desc.add_options() // - - ("help,h", "Print this help message") // - - ("auth-params,v", po::value(&args.authParams)->default_value(""), - "Authentication parameters, e.g., \"key1:val1,key2:val2\"") // - - ("auth-plugin,a", po::value(&args.authPlugin)->default_value(""), - "Authentication plugin class library path") // - - ("use-tls,b", po::value(&args.isUseTls)->default_value(false), - "Whether tls connection is used") // - - ("allow-insecure,d", po::value(&args.isTlsAllowInsecureConnection)->default_value(true), - "Whether insecure tls connection is allowed") // - - ("trust-cert-file,c", po::value(&args.tlsTrustCertsFilePath)->default_value(""), - "TLS trust certification file path") // - - ("num-topics,t", po::value(&args.numTopics)->default_value(1), "Number of topics") // - - ("num-consumers,n", po::value(&args.numConsumers)->default_value(1), - "Number of consumers (per topic)") // - - ("subscriber-name,s", po::value(&args.subscriberName)->default_value("sub"), - "Subscriber name prefix") // - - ("wait-time,w", po::value(&args.waitTimeMs)->default_value(1), - "Simulate a slow message consumer (Delay in ms)") // - - ("service-url,u", po::value(&args.serviceURL)->default_value(defaultServiceUrl), - "Pulsar Service URL") // - - ("receiver-queue-size,p", po::value(&args.receiverQueueSize)->default_value(1000), - "Size of the receiver queue") // - - ("io-threads,i", po::value(&args.ioThreads)->default_value(1), - "Number of IO threads to use") // - - ("listener-threads,l", po::value(&args.listenerThreads)->default_value(1), - "Number of listener threads") // - - ("pool-connections", po::value(&args.poolConnections)->default_value(false), - "whether pool connections used") // - - ("encryption-key-name,k", po::value(&args.encKeyName)->default_value(""), - "The private key name to decrypt payload") // - - ("encryption-key-value-file,f", po::value(&args.encKeyValueFile)->default_value(""), - "The file which contains the private key to decrypt payload"); // - - po::options_description hidden; - hidden.add_options()("topic", po::value(&args.topic), "Topic name"); - - po::options_description allOptions; - allOptions.add(desc).add(hidden); - - po::variables_map map; - try { - po::store(po::command_line_parser(argc, argv).options(allOptions).positional(positional).run(), map); - po::notify(map); - } catch (const std::exception& e) { - std::cerr << "Error parsing parameters -- " << e.what() << std::endl << std::endl; - std::cerr << desc << std::endl; - return -1; - } - - if (map.count("help")) { - std::cerr << desc << std::endl; - return -1; - } - - if (map.count("topic") != 1) { - std::cerr << "Need to specify a topic name. eg: persistent://prop/cluster/ns/my-topic" << std::endl - << std::endl; - std::cerr << desc << std::endl; - return -1; - } - - LOG_INFO("--- Consumer configuration ---"); - for (po::variables_map::iterator it = map.begin(); it != map.end(); ++it) { - if (it->second.value().type() == typeid(std::string)) { - LOG_INFO(it->first << ": " << it->second.as()); - } else if (it->second.value().type() == typeid(int)) { - LOG_INFO(it->first << ": " << it->second.as()); - } else if (it->second.value().type() == typeid(double)) { - LOG_INFO(it->first << ": " << it->second.as()); - } - } - - LOG_INFO("------------------------------"); - - startPerfConsumer(args); -} diff --git a/pulsar-client-cpp/perf/PerfProducer.cc b/pulsar-client-cpp/perf/PerfProducer.cc deleted file mode 100644 index f7d33612267ab..0000000000000 --- a/pulsar-client-cpp/perf/PerfProducer.cc +++ /dev/null @@ -1,430 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -DECLARE_LOG_OBJECT() - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -namespace po = boost::program_options; - -#include -#include -#include -#include -#include -#include "RateLimiter.h" -#include -#include -typedef std::shared_ptr RateLimiterPtr; - -struct Arguments { - std::string authParams; - std::string authPlugin; - bool isUseTls; - bool isTlsAllowInsecureConnection; - std::string tlsTrustCertsFilePath; - std::string topic; - int memoryLimitMb; - double rate; - int msgSize; - int numTopics; - int numProducers; - int numOfThreadsPerProducer; - std::string serviceURL; - int producerQueueSize; - int ioThreads; - int listenerThreads; - long samplingPeriod; - long numberOfSamples; - unsigned int batchingMaxMessages; - long batchingMaxAllowedSizeInBytes; - long batchingMaxPublishDelayMs; - bool poolConnections; - std::string encKeyName; - std::string encKeyValueFile; - std::string compression; -}; - -namespace pulsar { -class PulsarFriend { - public: - static Client getClient(const std::string& url, const ClientConfiguration conf, bool poolConnections) { - return Client(url, conf, poolConnections); - } -}; -} // namespace pulsar - -unsigned long messagesProduced; -unsigned long bytesProduced; -using namespace boost::accumulators; -using namespace pulsar; - -class EncKeyReader : public CryptoKeyReader { - private: - std::string pubKeyContents; - - void readFile(std::string fileName, std::string& fileContents) const { - std::ifstream ifs(fileName); - std::stringstream fileStream; - fileStream << ifs.rdbuf(); - fileContents = fileStream.str(); - } - - public: - EncKeyReader(std::string keyFile) { - if (keyFile.empty()) { - return; - } - readFile(keyFile, pubKeyContents); - } - - Result getPublicKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const { - encKeyInfo.setKey(pubKeyContents); - return ResultOk; - } - - Result getPrivateKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const { - return ResultInvalidConfiguration; - } -}; - -// Stats -typedef accumulator_set > LatencyAccumulator; -LatencyAccumulator e2eLatencyAccumulator(quantile_probability = 0.99); -std::vector producerList; -std::vector threadList; - -std::mutex mutex; -typedef std::unique_lock Lock; - -typedef std::chrono::high_resolution_clock Clock; - -void sendCallback(pulsar::Result result, const pulsar::MessageId& msgId, size_t msgLength, - Clock::time_point& publishTime) { - LOG_DEBUG("result = " << result); - assert(result == pulsar::ResultOk); - uint64_t latencyUsec = - std::chrono::duration_cast(Clock::now() - publishTime).count(); - Lock lock(mutex); - ++messagesProduced; - bytesProduced += msgLength; - e2eLatencyAccumulator(latencyUsec); -} - -// Start a pulsar producer on a topic and keep producing messages -void runProducer(const Arguments& args, std::string topicName, int threadIndex, RateLimiterPtr limiter, - pulsar::Producer& producer, const std::atomic& exitCondition) { - LOG_INFO("Producing messages for topic = " << topicName << ", threadIndex = " << threadIndex); - - std::unique_ptr payload(new char[args.msgSize]); - memset(payload.get(), 0, args.msgSize); - pulsar::MessageBuilder builder; - - while (true) { - if (args.rate != -1) { - limiter->acquire(); - } - pulsar::Message msg = builder.create().setAllocatedContent(payload.get(), args.msgSize).build(); - - producer.sendAsync(msg, std::bind(sendCallback, std::placeholders::_1, std::placeholders::_2, - msg.getLength(), Clock::now())); - if (exitCondition) { - LOG_INFO("Thread interrupted. Exiting producer thread."); - break; - } - } -} - -void startPerfProducer(const Arguments& args, pulsar::ProducerConfiguration& producerConf, - pulsar::Client& client, const std::atomic& exitCondition) { - RateLimiterPtr limiter; - if (args.rate != -1) { - limiter = std::make_shared(args.rate); - } - - producerList.resize(args.numTopics * args.numProducers); - for (int i = 0; i < args.numTopics; i++) { - std::string topic = (args.numTopics == 1) ? args.topic : args.topic + "-" + std::to_string(i); - LOG_INFO("Adding " << args.numProducers << " producers on topic " << topic); - - for (int j = 0; j < args.numProducers; j++) { - pulsar::Result result = - client.createProducer(topic, producerConf, producerList[i * args.numProducers + j]); - if (result != pulsar::ResultOk) { - LOG_ERROR("Couldn't create producer: " << result); - exit(-1); - } else { - LOG_DEBUG("Created Producer at index " << i * args.numProducers + j); - } - - for (int k = 0; k < args.numOfThreadsPerProducer; k++) { - threadList.push_back(std::thread(std::bind(runProducer, args, topic, k, limiter, - producerList[i * args.numProducers + j], - std::cref(exitCondition)))); - } - } - } -} - -int main(int argc, char** argv) { - std::string defaultServiceUrl; - - // First try to read default values from config file if present - const std::string confFile = "conf/client.conf"; - - std::ifstream file(confFile.c_str()); - if (file) { - po::variables_map vm; - po::options_description confFileDesc; - confFileDesc.add_options() // - ("serviceURL", po::value()->default_value("pulsar://localhost:6650")); - - po::store(po::parse_config_file(file, confFileDesc, true), vm); - po::notify(vm); - - defaultServiceUrl = vm["serviceURL"].as(); - } - - Arguments args; - - // Declare the supported options. - po::positional_options_description positional; - positional.add("topic", 1); - - po::options_description desc("Allowed options"); - desc.add_options() // - ("help,h", "Print this help message") // - - ("memory-limit,ml", po::value(&args.memoryLimitMb)->default_value(64), "Memory limit (MB)") // - - ("auth-params,v", po::value(&args.authParams)->default_value(""), - "Authentication parameters, e.g., \"key1:val1,key2:val2\"") // - - ("auth-plugin,a", po::value(&args.authPlugin)->default_value(""), - "Authentication plugin class library path") // - - ("use-tls,b", po::value(&args.isUseTls)->default_value(false), - "Whether tls connection is used") // - - ("allow-insecure,d", po::value(&args.isTlsAllowInsecureConnection)->default_value(true), - "Whether insecure tls connection is allowed") // - - ("trust-cert-file,c", po::value(&args.tlsTrustCertsFilePath)->default_value(""), - "TLS trust certification file path") // - - ("rate,r", po::value(&args.rate)->default_value(100.0), - "Publish rate msg/s across topics") // - ("size,s", po::value(&args.msgSize)->default_value(1024), "Message size") // - - ("num-topics,t", po::value(&args.numTopics)->default_value(1), "Number of topics") // - - ("num-producers,n", po::value(&args.numProducers)->default_value(1), - "Number of producers (per topic)") // - - ("num-threads-per-producers", po::value(&args.numOfThreadsPerProducer)->default_value(1), - "Number of threads (per producer)") // - - ("service-url,u", po::value(&args.serviceURL)->default_value(defaultServiceUrl), - "Pulsar Service URL") // - - ("producer-queue-size,p", po::value(&args.producerQueueSize)->default_value(1000), - "Max size of producer pending messages queue") // - - ("io-threads,i", po::value(&args.ioThreads)->default_value(1), - "Number of IO threads to use") // - - ("listener-threads,l", po::value(&args.listenerThreads)->default_value(1), - "Number of listener threads") // - - ("sampling-period", po::value(&args.samplingPeriod)->default_value(20), - "Time elapsed in seconds before reading are aggregated. Default: 20 sec") // - - ("num-of-samples", po::value(&args.numberOfSamples)->default_value(0), - "Number of samples to take. Default: 0 (run forever)") // - - ("batch-size", po::value(&args.batchingMaxMessages)->default_value(1), - "If batch size == 1 then batching is disabled. Default batch size == 1") // - - ("compression", po::value(&args.compression)->default_value(""), - "Compression can be either 'zlib' or 'lz4'. Default is no compression") // - - ("max-batch-size-in-bytes", - po::value(&args.batchingMaxAllowedSizeInBytes)->default_value(128 * 1024), - "Use only is batch-size > 1, Default is 128 KB") // - - ("max-batch-publish-delay-in-ms", - po::value(&args.batchingMaxPublishDelayMs)->default_value(3000), - "Use only is batch-size > 1, Default is 3 seconds") // - - ("pool-connections", po::value(&args.poolConnections)->default_value(false), - "whether pool connections used") // - - ("encryption-key-name,k", po::value(&args.encKeyName)->default_value(""), - "The public key name to encrypt payload") // - - ("encryption-key-value-file,f", po::value(&args.encKeyValueFile)->default_value(""), - "The file which contains the public key to encrypt payload"); // - - po::options_description hidden; - hidden.add_options()("topic", po::value(&args.topic), "Topic name"); - - po::options_description allOptions; - allOptions.add(desc).add(hidden); - - po::variables_map map; - try { - po::store(po::command_line_parser(argc, argv).options(allOptions).positional(positional).run(), map); - po::notify(map); - } catch (const std::exception& e) { - std::cerr << "Error parsing parameters -- " << e.what() << std::endl << std::endl; - std::cerr << desc << std::endl; - return -1; - } - - if (map.count("help")) { - std::cerr << desc << std::endl; - return -1; - } - - if (map.count("topic") != 1) { - std::cerr << "Need to specify a topic name. eg: persistent://prop/cluster/ns/my-topic" << std::endl - << std::endl; - std::cerr << desc << std::endl; - return -1; - } - - LOG_INFO("--- Producer configuration ---"); - for (po::variables_map::iterator it = map.begin(); it != map.end(); ++it) { - if (it->second.value().type() == typeid(std::string)) { - LOG_INFO(it->first << ": " << it->second.as()); - } else if (it->second.value().type() == typeid(bool)) { - LOG_INFO(it->first << ": " << it->second.as()); - } else if (it->second.value().type() == typeid(int)) { - LOG_INFO(it->first << ": " << it->second.as()); - } else if (it->second.value().type() == typeid(double)) { - LOG_INFO(it->first << ": " << it->second.as()); - } else if (it->second.value().type() == typeid(long)) { - LOG_INFO(it->first << ": " << it->second.as()); - } else if (it->second.value().type() == typeid(unsigned int)) { - LOG_INFO(it->first << ": " << it->second.as()); - } else { - LOG_INFO(it->first << ": " - << "new data type used, please create an else condition in the code"); - } - } - - LOG_INFO("------------------------------"); - pulsar::ProducerConfiguration producerConf; - producerConf.setMaxPendingMessages(args.producerQueueSize); - if (args.batchingMaxMessages > 1) { - producerConf.setBatchingEnabled(true); - producerConf.setBatchingMaxMessages(args.batchingMaxMessages); - producerConf.setBatchingMaxAllowedSizeInBytes(args.batchingMaxAllowedSizeInBytes); - producerConf.setBatchingMaxPublishDelayMs(args.batchingMaxPublishDelayMs); - } - - if (args.compression == "zlib") { - producerConf.setCompressionType(CompressionZLib); - } else if (args.compression == "lz4") { - producerConf.setCompressionType(CompressionLZ4); - } else if (!args.compression.empty()) { - LOG_WARN("Invalid compression type: " << args.compression); - return -1; - } - - // Block if queue is full else we will start seeing errors in sendAsync - producerConf.setBlockIfQueueFull(true); - std::shared_ptr keyReader = std::make_shared(args.encKeyValueFile); - if (!args.encKeyName.empty()) { - producerConf.addEncryptionKey(args.encKeyName); - producerConf.setCryptoKeyReader(keyReader); - } - - // Enable round robin message routing if it is a partitioned topic - producerConf.setPartitionsRoutingMode(ProducerConfiguration::RoundRobinDistribution); - - pulsar::ClientConfiguration conf; - conf.setMemoryLimit(args.memoryLimitMb * 1024 * 1024); - conf.setUseTls(args.isUseTls); - conf.setTlsAllowInsecureConnection(args.isTlsAllowInsecureConnection); - if (!args.tlsTrustCertsFilePath.empty()) { - std::string tlsTrustCertsFilePath(args.tlsTrustCertsFilePath); - conf.setTlsTrustCertsFilePath(tlsTrustCertsFilePath); - } - conf.setIOThreads(args.ioThreads); - conf.setMessageListenerThreads(args.listenerThreads); - if (!args.authPlugin.empty()) { - pulsar::AuthenticationPtr auth = pulsar::AuthFactory::create(args.authPlugin, args.authParams); - conf.setAuth(auth); - } - - pulsar::Client client(pulsar::PulsarFriend::getClient(args.serviceURL, conf, args.poolConnections)); - - std::atomic exitCondition(false); - startPerfProducer(args, producerConf, client, exitCondition); - - Clock::time_point oldTime = Clock::now(); - unsigned long totalMessagesProduced = 0; - long messagesToSend = args.numberOfSamples; - while (args.numberOfSamples == 0 || --messagesToSend > 0) { - std::this_thread::sleep_for(std::chrono::seconds(args.samplingPeriod)); - - Clock::time_point now = Clock::now(); - double elapsed = std::chrono::duration_cast(now - oldTime).count() / 1e3; - - Lock lock(mutex); - double rate = messagesProduced / elapsed; - double throughput = bytesProduced / elapsed / 1024 / 1024 * 8; - totalMessagesProduced += messagesProduced; - messagesProduced = 0; - bytesProduced = 0; - - double latencyAvgMs = mean(e2eLatencyAccumulator) / 1000.0; - double latency99pctMs = p_square_quantile(e2eLatencyAccumulator) / 1000.0; - e2eLatencyAccumulator = LatencyAccumulator(quantile_probability = 0.99); - lock.unlock(); - - LOG_INFO("Throughput produced: " << rate << " msg/s --- " << throughput << " Mbit/s --- " // - << "Lat avg: " << latencyAvgMs - << " ms -- Lat 99pct: " << latency99pctMs << " ms"); - oldTime = now; - } - LOG_INFO("Total messagesProduced = " << totalMessagesProduced + messagesProduced); - exitCondition = true; - for (auto& thread : threadList) { - thread.join(); - } - // Waiting for the sendCallbacks To Complete - std::this_thread::sleep_for(std::chrono::seconds(2)); - for (int i = 0; i < producerList.size(); i++) { - producerList[i].close(); - } - // Waiting for 2 seconds - std::this_thread::sleep_for(std::chrono::seconds(2)); -} diff --git a/pulsar-client-cpp/perf/RateLimiter.h b/pulsar-client-cpp/perf/RateLimiter.h deleted file mode 100644 index eea76ec868fb6..0000000000000 --- a/pulsar-client-cpp/perf/RateLimiter.h +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PERF_RATELIMITER_H_ -#define PERF_RATELIMITER_H_ - -#include -#include -#include - -namespace pulsar { - -class RateLimiter { - public: - RateLimiter(double rate); - - void acquire(); - - void acquire(int permits); - - private: - RateLimiter(const RateLimiter&); - RateLimiter& operator=(const RateLimiter&); - typedef std::chrono::high_resolution_clock Clock; - Clock::duration interval_; - - long storedPermits_; - double maxPermits_; - Clock::time_point nextFree_; - std::mutex mutex_; - typedef std::unique_lock Lock; -}; - -RateLimiter::RateLimiter(double rate) - : interval_(std::chrono::microseconds((long)(1e6 / rate))), - storedPermits_(0.0), - maxPermits_(rate), - nextFree_(Clock::now()) { - assert(rate < 1e6 && "Exceeded maximum rate"); -} - -void RateLimiter::acquire() { acquire(1); } - -void RateLimiter::acquire(int permits) { - Clock::time_point now = Clock::now(); - - Lock lock(mutex_); - - if (now > nextFree_) { - storedPermits_ = std::min(maxPermits_, storedPermits_ + (now - nextFree_) / interval_); - nextFree_ = now; - } - - Clock::duration wait = nextFree_ - now; - - // Determine how many stored and fresh permits to consume - long stored = std::min(permits, storedPermits_); - long fresh = permits - stored; - - // In the general RateLimiter, stored permits have no wait time, - // and thus we only have to wait for however many fresh permits we consume - Clock::duration next = fresh * interval_; - nextFree_ += next; - storedPermits_ -= stored; - - lock.unlock(); - - if (wait != Clock::duration::zero()) { - std::this_thread::sleep_for(wait); - } -} - -} // namespace pulsar - -#endif /* PERF_RATELIMITER_H_ */ diff --git a/pulsar-client-cpp/pkg/apk/.gitignore b/pulsar-client-cpp/pkg/apk/.gitignore deleted file mode 100644 index 7012f3a735480..0000000000000 --- a/pulsar-client-cpp/pkg/apk/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -pkg -perf -examples -lib diff --git a/pulsar-client-cpp/pkg/apk/APKBUILD b/pulsar-client-cpp/pkg/apk/APKBUILD deleted file mode 100644 index 10bdd72002bd4..0000000000000 --- a/pulsar-client-cpp/pkg/apk/APKBUILD +++ /dev/null @@ -1,57 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Maintainer: "Apache Pulsar " - -pkgname="apache-pulsar-client" -pkgver="$POM_VERSION" -pkgrel=0 -pkgdesc="Apache Pulsar - distributed pub-sub messaging system" -url="https://pulsar.apache.org" -arch="all" -license="https://www.apache.org/licenses/LICENSE-2.0.txt" -depends="" -makedepends="cmake" -install="" -subpackages="$pkgname-dev" -source="" -builddir="$srcdir/" - -build() { - set -x -e - if [ "$CBUILD" != "$CHOST" ]; then - CMAKE_CROSSOPTS="-DCMAKE_SYSTEM_NAME=Linux -DCMAKE_HOST_SYSTEM_NAME=Linux" - fi - cmake \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DCMAKE_INSTALL_LIBDIR=lib \ - -DBUILD_SHARED_LIBS=True \ - -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_CXX_FLAGS="$CXXFLAGS" \ - -DCMAKE_C_FLAGS="$CFLAGS" \ - -DBUILD_TESTS=OFF \ - -DBUILD_PYTHON_WRAPPER=OFF \ - -DLINK_STATIC=ON \ - ${CMAKE_CROSSOPTS} ${SRC_ROOT_DIR}/pulsar-client-cpp - make -} - -package() { - make DESTDIR="$pkgdir" install -} diff --git a/pulsar-client-cpp/pkg/apk/build-apk.sh b/pulsar-client-cpp/pkg/apk/build-apk.sh deleted file mode 100755 index c316d18b24203..0000000000000 --- a/pulsar-client-cpp/pkg/apk/build-apk.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -e -x - -export SRC_ROOT_DIR=$(git rev-parse --show-toplevel) -cd $SRC_ROOT_DIR/pulsar-client-cpp/pkg/apk - -VERSION=`python3 $SRC_ROOT_DIR/src/get-project-version.py` -# Sanitize the version string -export POM_VERSION=`echo $VERSION | sed -E 's/-[a-z]+-/./' | sed -E 's/.[A-Z]+././'` - -echo "POM_VERSION: $POM_VERSION" - -abuild-keygen -a -i -n -chmod 755 ~ -chmod 755 ~/.abuild -chmod 644 ~/.abuild/* - -mkdir -p /root/packages -chmod 777 /root/packages - -sudo -E -u pulsar abuild -r - -mv /root/packages/pkg . diff --git a/pulsar-client-cpp/pkg/apk/docker-build-apk.sh b/pulsar-client-cpp/pkg/apk/docker-build-apk.sh deleted file mode 100755 index e458d4ad44cd3..0000000000000 --- a/pulsar-client-cpp/pkg/apk/docker-build-apk.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -e - -ROOT_DIR=$(git rev-parse --show-toplevel) -IMAGE=apachepulsar/pulsar-build:alpine-3.11 - -docker pull $IMAGE - -docker run -i -v $ROOT_DIR:/pulsar $IMAGE \ - /pulsar/pulsar-client-cpp/pkg/apk/build-apk.sh diff --git a/pulsar-client-cpp/pkg/deb/.gitignore b/pulsar-client-cpp/pkg/deb/.gitignore deleted file mode 100644 index 89620f31052a2..0000000000000 --- a/pulsar-client-cpp/pkg/deb/.gitignore +++ /dev/null @@ -1 +0,0 @@ -BUILD diff --git a/pulsar-client-cpp/pkg/deb/Dockerfile b/pulsar-client-cpp/pkg/deb/Dockerfile deleted file mode 100644 index 171c829266125..0000000000000 --- a/pulsar-client-cpp/pkg/deb/Dockerfile +++ /dev/null @@ -1,91 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Build pulsar client library in Centos with tools to - -FROM --platform=linux/amd64 debian:9 - -# perl is required to install OpenSSL -RUN apt-get update -y && \ - apt-get install -y curl g++ make perl dpkg-dev python3 - -# Download and compile boost -RUN curl -O -L https://boostorg.jfrog.io/artifactory/main/release/1.79.0/source/boost_1_79_0.tar.gz && \ - tar xfz boost_1_79_0.tar.gz && \ - cd boost_1_79_0 && \ - ./bootstrap.sh && \ - ./b2 address-model=64 cxxflags=-fPIC link=static threading=multi variant=release install && \ - rm -rf /boost_1_79_0.tar.gz /boost_1_79_0 - -RUN curl -O -L https://github.com/Kitware/CMake/releases/download/v3.24.0/cmake-3.24.0-linux-x86_64.tar.gz && \ - tar xfz cmake-3.24.0-linux-x86_64.tar.gz && \ - cp cmake-3.24.0-linux-x86_64/bin/* /usr/bin/ && \ - cp -r cmake-3.24.0-linux-x86_64/share/cmake-3.24 /usr/share/ && \ - rm -rf cmake-3.24.0-linux-x86_64 cmake-3.24.0-linux-x86_64.tar.gz - -# Download and copile protoubf -RUN curl -O -L https://github.com/google/protobuf/releases/download/v3.20.0/protobuf-cpp-3.20.0.tar.gz && \ - tar xfz protobuf-cpp-3.20.0.tar.gz && \ - cd protobuf-3.20.0/ && \ - CXXFLAGS=-fPIC ./configure && \ - make -j8 && make install && ldconfig && \ - rm -rf /protobuf-cpp-3.20.0.tar.gz /protobuf-3.20.0 - -# ZLib -RUN curl -O -L https://github.com/madler/zlib/archive/v1.2.12.tar.gz && \ - tar xfz v1.2.12.tar.gz && \ - cd zlib-1.2.12 && \ - CFLAGS="-fPIC -O3" ./configure && \ - make && make install && \ - rm -rf /v1.2.12.tar.gz /zlib-1.2.12 - -# Zstandard -RUN curl -O -L https://github.com/facebook/zstd/releases/download/v1.3.7/zstd-1.3.7.tar.gz && \ - tar xfz zstd-1.3.7.tar.gz && \ - cd zstd-1.3.7 && \ - CFLAGS="-fPIC -O3" make -j8 && \ - make install && \ - rm -rf /zstd-1.3.7 /zstd-1.3.7.tar.gz - -# Snappy -RUN curl -O -L https://github.com/google/snappy/releases/download/1.1.3/snappy-1.1.3.tar.gz && \ - tar xfz snappy-1.1.3.tar.gz && \ - cd snappy-1.1.3 && \ - CXXFLAGS="-fPIC -O3" ./configure && \ - make && make install && \ - rm -rf /snappy-1.1.3 /snappy-1.1.3.tar.gz - -RUN curl -O -L https://github.com/openssl/openssl/archive/OpenSSL_1_1_1n.tar.gz && \ - tar xfz OpenSSL_1_1_1n.tar.gz && \ - cd openssl-OpenSSL_1_1_1n/ && \ - ./Configure -fPIC --prefix=/usr/local/ssl/ linux-x86_64 && \ - make -j8 && make install && \ - rm -rf /OpenSSL_1_1_1n.tar.gz /openssl-OpenSSL_1_1_1n - -ENV LD_LIBRARY_PATH /usr/local/ssl/lib/: - -# LibCurl -RUN curl -O -L https://github.com/curl/curl/releases/download/curl-7_61_0/curl-7.61.0.tar.gz && \ - tar xfz curl-7.61.0.tar.gz && \ - cd curl-7.61.0 && \ - CFLAGS=-fPIC ./configure --with-ssl=/usr/local/ssl/ && \ - make -j8 && make install && \ - rm -rf /curl-7.61.0.tar.gz /curl-7.61.0 - -ENV OPENSSL_ROOT_DIR /usr/local/ssl/ diff --git a/pulsar-client-cpp/pkg/deb/build-deb.sh b/pulsar-client-cpp/pkg/deb/build-deb.sh deleted file mode 100755 index b7409de535a5d..0000000000000 --- a/pulsar-client-cpp/pkg/deb/build-deb.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -e -x - -cd /pulsar -SRC_ROOT_DIR=$(pwd) -cd $SRC_ROOT_DIR/pulsar-client-cpp/pkg/deb - -POM_VERSION=`$SRC_ROOT_DIR/src/get-project-version.py` -# Sanitize VERSION by removing `SNAPSHOT` if any since it's not legal in DEB -VERSION=`echo $POM_VERSION | awk -F- '{print $1}'` - -ROOT_DIR=apache-pulsar-$POM_VERSION-src -CPP_DIR=$ROOT_DIR/pulsar-client-cpp - -rm -rf BUILD -mkdir BUILD -cd BUILD -tar xfz $SRC_ROOT_DIR/target/apache-pulsar-$POM_VERSION-src.tar.gz -pushd $CPP_DIR - -# link libraries for protoc -export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH - -chmod +x $(find . -name "*.sh") -cmake . -DBUILD_TESTS=OFF -DBUILD_PYTHON_WRAPPER=OFF -DBUILD_PERF_TOOLS=OFF -DLINK_STATIC=ON -make pulsarShared pulsarSharedNossl pulsarStatic pulsarStaticWithDeps -j 3 -popd - -DEST_DIR=apache-pulsar-client -mkdir -p $DEST_DIR/DEBIAN -cat < $DEST_DIR/DEBIAN/control -Package: apache-pulsar-client -Version: ${VERSION} -Maintainer: Apache Pulsar -Architecture: amd64 -Description: The Apache Pulsar client contains a C++ and C APIs to interact with Apache Pulsar brokers. -EOF - -DEVEL_DEST_DIR=apache-pulsar-client-dev -mkdir -p $DEVEL_DEST_DIR/DEBIAN -cat < $DEVEL_DEST_DIR/DEBIAN/control -Package: apache-pulsar-client-dev -Version: ${VERSION} -Maintainer: Apache Pulsar -Architecture: amd64 -Depends: apache-pulsar-client -Description: The Apache Pulsar client contains a C++ and C APIs to interact with Apache Pulsar brokers. -EOF - -mkdir -p $DEST_DIR/usr/lib -mkdir -p $DEVEL_DEST_DIR/usr/lib -mkdir -p $DEVEL_DEST_DIR/usr/include -mkdir -p $DEST_DIR/usr/share/doc/pulsar-client-$VERSION -mkdir -p $DEVEL_DEST_DIR/usr/share/doc/pulsar-client-dev-$VERSION - -ls $CPP_DIR/lib/libpulsar* - -cp -ar $CPP_DIR/include/pulsar $DEVEL_DEST_DIR/usr/include/ -cp $CPP_DIR/lib/libpulsar.a $DEVEL_DEST_DIR/usr/lib -cp $CPP_DIR/lib/libpulsarwithdeps.a $DEVEL_DEST_DIR/usr/lib -cp $CPP_DIR/lib/libpulsar.so.$POM_VERSION $DEST_DIR/usr/lib -cp $CPP_DIR/lib/libpulsarnossl.so.$POM_VERSION $DEST_DIR/usr/lib - -pushd $DEST_DIR/usr/lib -ln -s libpulsar.so.$POM_VERSION libpulsar.so -ln -s libpulsarnossl.so.$POM_VERSION libpulsarnossl.so -popd - -cp $ROOT_DIR/NOTICE $DEST_DIR/usr/share/doc/pulsar-client-$VERSION -cp $CPP_DIR/pkg/licenses/* $DEST_DIR/usr/share/doc/pulsar-client-$VERSION -cp $CPP_DIR/pkg/licenses/LICENSE.txt $DEST_DIR/usr/share/doc/pulsar-client-$VERSION/copyright -cp $CPP_DIR/pkg/licenses/LICENSE.txt $DEST_DIR/DEBIAN/copyright -cp $CPP_DIR/pkg/licenses/LICENSE.txt $DEVEL_DEST_DIR/DEBIAN/copyright - -cp $DEST_DIR/usr/share/doc/pulsar-client-$VERSION/* $DEVEL_DEST_DIR/usr/share/doc/pulsar-client-dev-$VERSION - - -## Build actual debian packages -dpkg-deb --build $DEST_DIR -dpkg-deb --build $DEVEL_DEST_DIR - -mkdir DEB -mv *.deb DEB -cd DEB -dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz diff --git a/pulsar-client-cpp/pkg/deb/docker-build-deb.sh b/pulsar-client-cpp/pkg/deb/docker-build-deb.sh deleted file mode 100755 index bc7f42234b343..0000000000000 --- a/pulsar-client-cpp/pkg/deb/docker-build-deb.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -ex - -ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../.. &> /dev/null && pwd )" -IMAGE_NAME=apachepulsar/pulsar-build:debian-9-2.11 - -if [[ -z $BUILD_IMAGE ]]; then - # pull the image from DockerHub by default - docker pull $IMAGE_NAME -else - docker build --platform linux/amd64 -t $IMAGE_NAME $ROOT_DIR/pulsar-client-cpp/pkg/deb -fi - -docker run --platform linux/amd64 -v $ROOT_DIR:/pulsar $IMAGE_NAME \ - /pulsar/pulsar-client-cpp/pkg/deb/build-deb.sh diff --git a/pulsar-client-cpp/pkg/licenses/LICENSE-boost.txt b/pulsar-client-cpp/pkg/licenses/LICENSE-boost.txt deleted file mode 100644 index 36b7cd93cdfba..0000000000000 --- a/pulsar-client-cpp/pkg/licenses/LICENSE-boost.txt +++ /dev/null @@ -1,23 +0,0 @@ -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/pulsar-client-cpp/pkg/licenses/LICENSE-jsoncpp.txt b/pulsar-client-cpp/pkg/licenses/LICENSE-jsoncpp.txt deleted file mode 100644 index c41a1d1c77489..0000000000000 --- a/pulsar-client-cpp/pkg/licenses/LICENSE-jsoncpp.txt +++ /dev/null @@ -1,55 +0,0 @@ -The JsonCpp library's source code, including accompanying documentation, -tests and demonstration applications, are licensed under the following -conditions... - -Baptiste Lepilleur and The JsonCpp Authors explicitly disclaim copyright in all -jurisdictions which recognize such a disclaimer. In such jurisdictions, -this software is released into the Public Domain. - -In jurisdictions which do not recognize Public Domain property (e.g. Germany as of -2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur and -The JsonCpp Authors, and is released under the terms of the MIT License (see below). - -In jurisdictions which recognize Public Domain property, the user of this -software may choose to accept it either as 1) Public Domain, 2) under the -conditions of the MIT License (see below), or 3) under the terms of dual -Public Domain/MIT License conditions described here, as they choose. - -The MIT License is about as close to Public Domain as a license can get, and is -described in clear, concise terms at: - - http://en.wikipedia.org/wiki/MIT_License - -The full text of the MIT License follows: - -======================================================================== -Copyright (c) 2007-2010 Baptiste Lepilleur and The JsonCpp Authors - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, copy, -modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -======================================================================== -(END LICENSE TEXT) - -The MIT license is compatible with both the GPL and commercial -software, affording one all of the rights of Public Domain with the -minor nuisance of being required to keep the above copyright notice -and license text in the source code. Note also that by accepting the -Public Domain "license" you can re-license your copy using whatever -license you like. diff --git a/pulsar-client-cpp/pkg/licenses/LICENSE-libcurl.txt b/pulsar-client-cpp/pkg/licenses/LICENSE-libcurl.txt deleted file mode 100644 index 560a49dcee011..0000000000000 --- a/pulsar-client-cpp/pkg/licenses/LICENSE-libcurl.txt +++ /dev/null @@ -1,22 +0,0 @@ -COPYRIGHT AND PERMISSION NOTICE - -Copyright (c) 1996 - 2018, Daniel Stenberg, , and many -contributors, see the THANKS file. - -All rights reserved. - -Permission to use, copy, modify, and distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright -notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN -NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE -OR OTHER DEALINGS IN THE SOFTWARE. - -Except as contained in this notice, the name of a copyright holder shall not -be used in advertising or otherwise to promote the sale, use or other dealings -in this Software without prior written authorization of the copyright holder. diff --git a/pulsar-client-cpp/pkg/licenses/LICENSE-protobuf.txt b/pulsar-client-cpp/pkg/licenses/LICENSE-protobuf.txt deleted file mode 100644 index 19b305b00060a..0000000000000 --- a/pulsar-client-cpp/pkg/licenses/LICENSE-protobuf.txt +++ /dev/null @@ -1,32 +0,0 @@ -Copyright 2008 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. diff --git a/pulsar-client-cpp/pkg/licenses/LICENSE-zlib.txt b/pulsar-client-cpp/pkg/licenses/LICENSE-zlib.txt deleted file mode 100644 index f1f93cd6a6af5..0000000000000 --- a/pulsar-client-cpp/pkg/licenses/LICENSE-zlib.txt +++ /dev/null @@ -1,23 +0,0 @@ -zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.12, March 27th, 2022 - - Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu diff --git a/pulsar-client-cpp/pkg/licenses/LICENSE.txt b/pulsar-client-cpp/pkg/licenses/LICENSE.txt deleted file mode 100644 index bc2e47af67d3b..0000000000000 --- a/pulsar-client-cpp/pkg/licenses/LICENSE.txt +++ /dev/null @@ -1,214 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ----------------------------------------------------------------------------------------------------- - -This package is statically linked with libraries with the following licenses: - -MIT: - * Boost https://www.boost.org -- LICENSE-boost.txt - * Libcurl -- https://curl.haxx.se -- LICENSE-libcurl.txt - -ZLib -- ZLib license -- LICENSE-zlib.txt - -Protocol buffers -- Protocol Buffers license -- LICENSE-protobuf.txt diff --git a/pulsar-client-cpp/pkg/rpm/.gitignore b/pulsar-client-cpp/pkg/rpm/.gitignore deleted file mode 100644 index b77cada7b2abc..0000000000000 --- a/pulsar-client-cpp/pkg/rpm/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -BUILD -BUILDROOT -SOURCES -RPMS -SRPMS \ No newline at end of file diff --git a/pulsar-client-cpp/pkg/rpm/Dockerfile b/pulsar-client-cpp/pkg/rpm/Dockerfile deleted file mode 100644 index 9e4a057003e03..0000000000000 --- a/pulsar-client-cpp/pkg/rpm/Dockerfile +++ /dev/null @@ -1,92 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Build pulsar client library in Centos with tools to build static RPM - -FROM --platform=linux/amd64 centos:7 - -RUN yum update -y && \ - yum install -y gcc-c++ make rpm-build which \ - createrepo libstdc++-static.x86_64 python3 - -# Download and compile boost -# GCC 4.8.2 implementation of std::regex is buggy, so we install boost::regex here -RUN curl -O -L https://boostorg.jfrog.io/artifactory/main/release/1.79.0/source/boost_1_79_0.tar.gz && \ - tar xfz boost_1_79_0.tar.gz && \ - cd boost_1_79_0 && \ - ./bootstrap.sh --with-libraries=regex && \ - ./b2 address-model=64 cxxflags=-fPIC link=static threading=multi variant=release install && \ - rm -rf /boost_1_79_0.tar.gz /boost_1_79_0 - -RUN curl -O -L https://github.com/Kitware/CMake/releases/download/v3.24.0/cmake-3.24.0-linux-x86_64.tar.gz && \ - tar xfz cmake-3.24.0-linux-x86_64.tar.gz && \ - cp cmake-3.24.0-linux-x86_64/bin/* /usr/bin/ && \ - cp -r cmake-3.24.0-linux-x86_64/share/cmake-3.24 /usr/share/ && \ - rm -rf cmake-3.24.0-linux-x86_64 cmake-3.24.0-linux-x86_64.tar.gz - -# Download and copile protoubf -RUN curl -O -L https://github.com/google/protobuf/releases/download/v3.20.0/protobuf-cpp-3.20.0.tar.gz && \ - tar xfz protobuf-cpp-3.20.0.tar.gz && \ - cd protobuf-3.20.0/ && \ - CXXFLAGS=-fPIC ./configure && \ - make -j8 && make install && ldconfig && \ - rm -rf /protobuf-cpp-3.20.0.tar.gz /protobuf-3.20.0 - -# ZLib -RUN curl -O -L https://github.com/madler/zlib/archive/v1.2.12.tar.gz && \ - tar xfz v1.2.12.tar.gz && \ - cd zlib-1.2.12 && \ - CFLAGS="-fPIC -O3" ./configure && \ - make && make install && \ - rm -rf /v1.2.12.tar.gz /zlib-1.2.12 - -# Zstandard -RUN curl -O -L https://github.com/facebook/zstd/releases/download/v1.3.7/zstd-1.3.7.tar.gz && \ - tar xfz zstd-1.3.7.tar.gz && \ - cd zstd-1.3.7 && \ - CFLAGS="-fPIC -O3" make -j8 && \ - make install && \ - rm -rf /zstd-1.3.7 /zstd-1.3.7.tar.gz - -# Snappy -RUN curl -O -L https://github.com/google/snappy/releases/download/1.1.3/snappy-1.1.3.tar.gz && \ - tar xfz snappy-1.1.3.tar.gz && \ - cd snappy-1.1.3 && \ - CXXFLAGS="-fPIC -O3" ./configure && \ - make && make install && \ - rm -rf /snappy-1.1.3 /snappy-1.1.3.tar.gz - -RUN curl -O -L https://github.com/openssl/openssl/archive/OpenSSL_1_1_1n.tar.gz && \ - tar xfz OpenSSL_1_1_1n.tar.gz && \ - cd openssl-OpenSSL_1_1_1n/ && \ - ./Configure -fPIC --prefix=/usr/local/ssl/ linux-x86_64 && \ - make -j8 && make install && \ - rm -rf /OpenSSL_1_1_1n.tar.gz /openssl-OpenSSL_1_1_1n - -ENV LD_LIBRARY_PATH /usr/local/ssl/lib/: - -# LibCurl -RUN curl -O -L https://github.com/curl/curl/releases/download/curl-7_61_0/curl-7.61.0.tar.gz && \ - tar xfz curl-7.61.0.tar.gz && \ - cd curl-7.61.0 && \ - CFLAGS=-fPIC ./configure --with-ssl=/usr/local/ssl/ && \ - make && make install && \ - rm -rf /curl-7.61.0.tar.gz /curl-7.61.0 - -ENV OPENSSL_ROOT_DIR /usr/local/ssl/ diff --git a/pulsar-client-cpp/pkg/rpm/SPECS/pulsar-client.spec b/pulsar-client-cpp/pkg/rpm/SPECS/pulsar-client.spec deleted file mode 100644 index b23fd41719e83..0000000000000 --- a/pulsar-client-cpp/pkg/rpm/SPECS/pulsar-client.spec +++ /dev/null @@ -1,97 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -%define name apache-pulsar-client -%define release 1 -%define buildroot %{_topdir}/%{name}-%{version}-root - -BuildRoot: %{buildroot} -Summary: Apache Pulsar client library -URL: https://pulsar.apache.org/ -License: Apache License v2 -Name: %{name} -Version: %{version} -Release: %{release} -Source: apache-pulsar-%{pom_version}-src.tar.gz -Prefix: /usr -AutoReq: no - -%package devel -Summary: Apache Pulsar client library -Provides: apache-pulsar-client-devel -Requires: apache-pulsar-client - -%description -The Apache Pulsar client contains a C++ and C APIs to interact -with Apache Pulsar brokers. - -%description devel -The Apache Pulsar client contains a C++ and C APIs to interact -with Apache Pulsar brokers. - -The devel package contains C++ and C API headers and `libpulsar.a` -static library. - -%prep -%setup -q -n apache-pulsar-%{pom_version}-src - -%build -cd pulsar-client-cpp -chmod +x $(find . -name "*.sh") -cmake . -DBUILD_TESTS=OFF -DLINK_STATIC=ON -DBUILD_PYTHON_WRAPPER=OFF -make pulsarShared pulsarSharedNossl pulsarStatic pulsarStaticWithDeps -j 3 - -%install -cd pulsar-client-cpp -INCLUDE_DIR=$RPM_BUILD_ROOT/usr/include -LIB_DIR=$RPM_BUILD_ROOT/usr/lib -DOC_DIR=$RPM_BUILD_ROOT/usr/share/doc/pulsar-client-%{version} -DOC_DEVEL_DIR=$RPM_BUILD_ROOT/usr/share/doc/pulsar-client-devel-%{version} -mkdir -p $INCLUDE_DIR $LIB_DIR $DOC_DIR $DOC_DEVEL_DIR - -cp -ar include/pulsar $INCLUDE_DIR -cp lib/libpulsar.a $LIB_DIR -cp lib/libpulsarwithdeps.a $LIB_DIR -cp lib/libpulsar.so.%{pom_version} $LIB_DIR -cp lib/libpulsarnossl.so.%{pom_version} $LIB_DIR - -# Copy LICENSE files -cp ../NOTICE $DOC_DIR -cp pkg/licenses/* $DOC_DIR - -cp $DOC_DIR/* $DOC_DEVEL_DIR/ - -cd $LIB_DIR -ln -s libpulsar.so.%{pom_version} libpulsar.so -ln -s libpulsarnossl.so.%{pom_version} libpulsarnossl.so - -%files -%defattr(-,root,root) -/usr/lib/libpulsar.so -/usr/lib/libpulsar.so.%{pom_version} -/usr/lib/libpulsarnossl.so -/usr/lib/libpulsarnossl.so.%{pom_version} -/usr/share/doc/pulsar-client-%{version} - -%files devel -%defattr(-,root,root) -/usr/lib/libpulsar.a -/usr/lib/libpulsarwithdeps.a -/usr/include/pulsar -/usr/share/doc/pulsar-client-devel-%{version} diff --git a/pulsar-client-cpp/pkg/rpm/build-rpm.sh b/pulsar-client-cpp/pkg/rpm/build-rpm.sh deleted file mode 100755 index 97647a15d48f7..0000000000000 --- a/pulsar-client-cpp/pkg/rpm/build-rpm.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -e - -cd /pulsar -ROOT_DIR=$(pwd) -cd $ROOT_DIR/pulsar-client-cpp/pkg/rpm - -POM_VERSION=`$ROOT_DIR/src/get-project-version.py` - -# Sanitize VERSION by removing `-incubating` since it's not legal in RPM -VERSION=`echo $POM_VERSION | awk -F- '{print $1}'` - -mkdir -p BUILD RPMS SOURCES SPECS SRPMS - -cp $ROOT_DIR/target/apache-pulsar-$POM_VERSION-src.tar.gz SOURCES - -rpmbuild -v -bb --clean \ - --define "version $VERSION" \ - --define "pom_version $POM_VERSION" \ - --define "_topdir $PWD" \ - SPECS/pulsar-client.spec - -cd RPMS/x86_64 -createrepo . diff --git a/pulsar-client-cpp/pkg/rpm/docker-build-rpm.sh b/pulsar-client-cpp/pkg/rpm/docker-build-rpm.sh deleted file mode 100755 index 4ba02afc3dceb..0000000000000 --- a/pulsar-client-cpp/pkg/rpm/docker-build-rpm.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -ex - -ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../.. &> /dev/null && pwd )" -IMAGE_NAME=apachepulsar/pulsar-build:centos-7-2.11 - -if [[ -z $BUILD_IMAGE ]]; then - # pull the image from DockerHub by default - docker pull $IMAGE_NAME -else - docker build --platform linux/amd64 -t $IMAGE_NAME $ROOT_DIR/pulsar-client-cpp/pkg/rpm -fi - -docker run --platform linux/amd64 -v $ROOT_DIR:/pulsar $IMAGE_NAME \ - /pulsar/pulsar-client-cpp/pkg/rpm/build-rpm.sh diff --git a/pulsar-client-cpp/pulsar-test-service-start.sh b/pulsar-client-cpp/pulsar-test-service-start.sh deleted file mode 100755 index 63915cfc31101..0000000000000 --- a/pulsar-client-cpp/pulsar-test-service-start.sh +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -e - -SRC_DIR=$(git rev-parse --show-toplevel) -cd $SRC_DIR - -if [ -f /.dockerenv ]; then - # When running tests inside docker. Unpack the pulsar tgz - # because otherwise the classpath might not be correct - # in picking up the jars from local maven repo - export PULSAR_DIR=/tmp/pulsar-test-dist - rm -rf $PULSAR_DIR - mkdir $PULSAR_DIR - TGZ=$(ls -1 $SRC_DIR/distribution/server/target/apache-pulsar*bin.tar.gz | head -1) - tar -xzf $TGZ -C $PULSAR_DIR --strip-components 1 -else - export PULSAR_DIR=$SRC_DIR -fi - -DATA_DIR=/tmp/pulsar-test-data -rm -rf $DATA_DIR -mkdir -p $DATA_DIR - -# Set up basic authentication -cp $SRC_DIR/pulsar-client-cpp/test-conf/.htpasswd $DATA_DIR/.htpasswd -export PULSAR_EXTRA_OPTS=-Dpulsar.auth.basic.conf=$DATA_DIR/.htpasswd - -# Copy TLS test certificates -mkdir -p $DATA_DIR/certs -cp $SRC_DIR/pulsar-broker/src/test/resources/authentication/tls/*.pem $DATA_DIR/certs - -# Generate secret key and token -mkdir -p $DATA_DIR/tokens -$PULSAR_DIR/bin/pulsar tokens create-secret-key --output $DATA_DIR/tokens/secret.key - -$PULSAR_DIR/bin/pulsar tokens create \ - --subject token-principal \ - --secret-key file:///$DATA_DIR/tokens/secret.key \ - > $DATA_DIR/tokens/token.txt - -export PULSAR_STANDALONE_CONF=$SRC_DIR/pulsar-client-cpp/test-conf/standalone-ssl.conf -$PULSAR_DIR/bin/pulsar-daemon start standalone \ - --no-functions-worker --no-stream-storage \ - --bookkeeper-dir $DATA_DIR/bookkeeper - -echo "-- Wait for Pulsar service to be ready" -until curl http://localhost:8080/metrics > /dev/null 2>&1 ; do sleep 1; done - -echo "-- Pulsar service is ready -- Configure permissions" - -export PULSAR_CLIENT_CONF=$SRC_DIR/pulsar-client-cpp/test-conf/client-ssl.conf - -# Create "standalone" cluster if it does not exist -$PULSAR_DIR/bin/pulsar-admin clusters list | grep -q '^standalone$' || - $PULSAR_DIR/bin/pulsar-admin clusters create \ - standalone \ - --url http://localhost:8080/ \ - --url-secure https://localhost:8443/ \ - --broker-url pulsar://localhost:6650/ \ - --broker-url-secure pulsar+ssl://localhost:6651/ - -# Update "public" tenant -$PULSAR_DIR/bin/pulsar-admin tenants update public -r "anonymous" -c "standalone" - -# Update "public/default" with no auth required -$PULSAR_DIR/bin/pulsar-admin namespaces grant-permission public/default \ - --actions produce,consume \ - --role "anonymous" - -# Create "public/default-2" with no auth required -$PULSAR_DIR/bin/pulsar-admin namespaces create public/default-2 \ - --clusters standalone -$PULSAR_DIR/bin/pulsar-admin namespaces grant-permission public/default-2 \ - --actions produce,consume \ - --role "anonymous" - -# Create "public/default-3" with no auth required -$PULSAR_DIR/bin/pulsar-admin namespaces create public/default-3 \ - --clusters standalone -$PULSAR_DIR/bin/pulsar-admin namespaces grant-permission public/default-3 \ - --actions produce,consume \ - --role "anonymous" - -# Create "public/default-4" with encryption required -$PULSAR_DIR/bin/pulsar-admin namespaces create public/default-4 \ - --clusters standalone -$PULSAR_DIR/bin/pulsar-admin namespaces grant-permission public/default-4 \ - --actions produce,consume \ - --role "anonymous" -$PULSAR_DIR/bin/pulsar-admin namespaces set-encryption-required public/default-4 -e - -# Create "public/test-backlog-quotas" to test backlog quotas policy -$PULSAR_DIR/bin/pulsar-admin namespaces create public/test-backlog-quotas \ - --clusters standalone - -# Create "private" tenant -$PULSAR_DIR/bin/pulsar-admin tenants create private -r "" -c "standalone" - -# Create "private/auth" with required authentication -$PULSAR_DIR/bin/pulsar-admin namespaces create private/auth --clusters standalone - -$PULSAR_DIR/bin/pulsar-admin namespaces grant-permission private/auth \ - --actions produce,consume \ - --role "token-principal" - -echo "-- Ready to start tests" diff --git a/pulsar-client-cpp/pulsar-test-service-stop.sh b/pulsar-client-cpp/pulsar-test-service-stop.sh deleted file mode 100755 index 774e9b45bb2ac..0000000000000 --- a/pulsar-client-cpp/pulsar-test-service-stop.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -e - -ROOT_DIR=$(git rev-parse --show-toplevel) -cd $ROOT_DIR - -bin/pulsar-daemon stop standalone diff --git a/pulsar-client-cpp/python/.gitignore b/pulsar-client-cpp/python/.gitignore deleted file mode 100644 index 5cb909fbbd0bc..0000000000000 --- a/pulsar-client-cpp/python/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -MANIFEST -build -dist -*.egg-info diff --git a/pulsar-client-cpp/python/CMakeLists.txt b/pulsar-client-cpp/python/CMakeLists.txt deleted file mode 100644 index 63cf163adbc43..0000000000000 --- a/pulsar-client-cpp/python/CMakeLists.txt +++ /dev/null @@ -1,103 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -INCLUDE_DIRECTORIES("${Boost_INCLUDE_DIRS}" "${PYTHON_INCLUDE_DIRS}") - -ADD_LIBRARY(_pulsar SHARED src/pulsar.cc - src/producer.cc - src/consumer.cc - src/config.cc - src/enums.cc - src/client.cc - src/message.cc - src/authentication.cc - src/reader.cc - src/schema.cc - src/cryptoKeyReader.cc - src/exceptions.cc - src/utils.cc - ) - -SET(CMAKE_SHARED_LIBRARY_PREFIX ) -SET(CMAKE_SHARED_LIBRARY_SUFFIX .so) - -if (NOT APPLE AND NOT MSVC) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_PYTHON}") -endif() - -if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") - set(CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS "${CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS} -Qunused-arguments -undefined dynamic_lookup") -endif() - -# Newer boost versions don't use the -mt suffix -if (NOT DEFINED ${Boost_PYTHON37-MT_LIBRARY}) - set(Boost_PYTHON37-MT_LIBRARY ${Boost_PYTHON37_LIBRARY}) -endif() - -if (NOT DEFINED ${Boost_PYTHON38-MT_LIBRARY}) - set(Boost_PYTHON38-MT_LIBRARY ${Boost_PYTHON38_LIBRARY}) -endif() - -if (NOT DEFINED ${Boost_PYTHON39-MT_LIBRARY}) - set(Boost_PYTHON39-MT_LIBRARY ${Boost_PYTHON39_LIBRARY}) -endif() - -if (NOT DEFINED ${Boost_PYTHON310-MT_LIBRARY}) - set(Boost_PYTHON310-MT_LIBRARY ${Boost_PYTHON310_LIBRARY}) -endif() - -# Try all possible boost-python variable namings -set(PYTHON_WRAPPER_LIBS ${Boost_PYTHON_LIBRARY} - ${Boost_PYTHON3_LIBRARY} - ${Boost_PYTHON37-MT_LIBRARY} - ${Boost_PYTHON38_LIBRARY} - ${Boost_PYTHON39_LIBRARY} - ${Boost_PYTHON310_LIBRARY} - ) - -if (APPLE) - if (Boost_PYTHON37-MT_LIBRARY_RELEASE) - set(PYTHON_WRAPPER_LIBS ${PYTHON_WRAPPER_LIBS} ${Boost_PYTHON37-MT_LIBRARY_RELEASE}) - endif () - if (Boost_PYTHON38-MT_LIBRARY_RELEASE) - set(PYTHON_WRAPPER_LIBS ${PYTHON_WRAPPER_LIBS} ${Boost_PYTHON38-MT_LIBRARY_RELEASE}) - endif () - if (Boost_PYTHON39-MT_LIBRARY_RELEASE) - set(PYTHON_WRAPPER_LIBS ${PYTHON_WRAPPER_LIBS} ${Boost_PYTHON39-MT_LIBRARY_RELEASE}) - endif () - if (Boost_PYTHON310-MT_LIBRARY_RELEASE) - set(PYTHON_WRAPPER_LIBS ${PYTHON_WRAPPER_LIBS} ${Boost_PYTHON310-MT_LIBRARY_RELEASE}) - endif () -endif() - -message(STATUS "Using Boost Python libs: ${PYTHON_WRAPPER_LIBS}") - -if (NOT PYTHON_WRAPPER_LIBS) - MESSAGE(FATAL_ERROR "Could not find Boost Python library") -endif () - -if (APPLE) - set(CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS "${CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS} -undefined dynamic_lookup") - target_link_libraries(_pulsar -Wl,-all_load pulsarStatic ${PYTHON_WRAPPER_LIBS} ${COMMON_LIBS} ${ICU_LIBS}) -else () - if (NOT MSVC) - set (CMAKE_SHARED_LINKER_FLAGS " -static-libgcc -static-libstdc++") - endif() - target_link_libraries(_pulsar pulsarStatic ${PYTHON_WRAPPER_LIBS} ${COMMON_LIBS}) -endif () diff --git a/pulsar-client-cpp/python/build-mac-wheels.sh b/pulsar-client-cpp/python/build-mac-wheels.sh deleted file mode 100755 index 6a4dae7117e7d..0000000000000 --- a/pulsar-client-cpp/python/build-mac-wheels.sh +++ /dev/null @@ -1,300 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -e - -PYTHON_VERSIONS=( - '3.7 3.7.13' - '3.8 3.8.13' - '3.9 3.9.10' - '3.10 3.10.2' -) - -export MACOSX_DEPLOYMENT_TARGET=10.15 -MACOSX_DEPLOYMENT_TARGET_MAJOR=${MACOSX_DEPLOYMENT_TARGET%%.*} - -ZLIB_VERSION=1.2.12 -OPENSSL_VERSION=1_1_1n -BOOST_VERSION=1.78.0 -PROTOBUF_VERSION=3.20.0 -ZSTD_VERSION=1.5.2 -SNAPPY_VERSION=1.1.3 -CURL_VERSION=7.61.0 - -ROOT_DIR=$(git rev-parse --show-toplevel) -cd "${ROOT_DIR}/pulsar-client-cpp" - - -# Compile and cache dependencies -CACHE_DIR=~/.pulsar-mac-wheels-cache -mkdir -p $CACHE_DIR - -cd $CACHE_DIR - -PREFIX=$CACHE_DIR/install - -############################################################################### -for line in "${PYTHON_VERSIONS[@]}"; do - read -r -a PY <<< "$line" - PYTHON_VERSION=${PY[0]} - PYTHON_VERSION_LONG=${PY[1]} - - if [ ! -f Python-${PYTHON_VERSION_LONG}/.done ]; then - echo "Building Python $PYTHON_VERSION_LONG" - curl -O -L https://www.python.org/ftp/python/${PYTHON_VERSION_LONG}/Python-${PYTHON_VERSION_LONG}.tgz - tar xfz Python-${PYTHON_VERSION_LONG}.tgz - - PY_PREFIX=$CACHE_DIR/py-$PYTHON_VERSION - pushd Python-${PYTHON_VERSION_LONG} - if [ $PYTHON_VERSION = '3.7' ]; then - UNIVERSAL_ARCHS='intel-64' - PY_CFLAGS=" -arch x86_64" - else - UNIVERSAL_ARCHS='universal2' - fi - - CFLAGS="-fPIC -O3 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET} -I${PREFIX}/include ${PY_CFLAGS}" \ - LDFLAGS=" ${PY_CFLAGS} -L${PREFIX}/lib" \ - ./configure --prefix=$PY_PREFIX --enable-shared --enable-universalsdk --with-universal-archs=${UNIVERSAL_ARCHS} - make -j16 - make install - - curl -O -L https://files.pythonhosted.org/packages/27/d6/003e593296a85fd6ed616ed962795b2f87709c3eee2bca4f6d0fe55c6d00/wheel-0.37.1-py2.py3-none-any.whl - $PY_PREFIX/bin/pip3 install wheel-*.whl - - touch .done - popd - else - echo "Using cached Python $PYTHON_VERSION_LONG" - fi -done - - -############################################################################### -if [ ! -f zlib-${ZLIB_VERSION}/.done ]; then - echo "Building ZLib" - curl -O -L https://zlib.net/zlib-${ZLIB_VERSION}.tar.gz - tar xvfz zlib-$ZLIB_VERSION.tar.gz - pushd zlib-$ZLIB_VERSION - CFLAGS="-fPIC -O3 -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" ./configure --prefix=$PREFIX - make -j16 - make install - touch .done - popd -else - echo "Using cached ZLib" -fi - -############################################################################### -if [ ! -f openssl-OpenSSL_${OPENSSL_VERSION}.done ]; then - echo "Building OpenSSL" - curl -O -L https://github.com/openssl/openssl/archive/OpenSSL_${OPENSSL_VERSION}.tar.gz - # -arch arm64 -arch x86_64 - tar xvfz OpenSSL_${OPENSSL_VERSION}.tar.gz - mv openssl-OpenSSL_${OPENSSL_VERSION} openssl-OpenSSL_${OPENSSL_VERSION}-arm64 - pushd openssl-OpenSSL_${OPENSSL_VERSION}-arm64 - CFLAGS="-fPIC -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ - ./Configure --prefix=$PREFIX no-shared darwin64-arm64-cc - make -j8 - make install - popd - - tar xvfz OpenSSL_${OPENSSL_VERSION}.tar.gz - mv openssl-OpenSSL_${OPENSSL_VERSION} openssl-OpenSSL_${OPENSSL_VERSION}-x86_64 - pushd openssl-OpenSSL_${OPENSSL_VERSION}-x86_64 - CFLAGS="-fPIC -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ - ./Configure --prefix=$PREFIX no-shared darwin64-x86_64-cc - make -j8 - make install - popd - - # Create universal binaries - lipo -create openssl-OpenSSL_${OPENSSL_VERSION}-arm64/libssl.a openssl-OpenSSL_${OPENSSL_VERSION}-x86_64/libssl.a \ - -output $PREFIX/lib/libssl.a - lipo -create openssl-OpenSSL_${OPENSSL_VERSION}-arm64/libcrypto.a openssl-OpenSSL_${OPENSSL_VERSION}-x86_64/libcrypto.a \ - -output $PREFIX/lib/libcrypto.a - - touch openssl-OpenSSL_${OPENSSL_VERSION}.done -else - echo "Using cached OpenSSL" -fi - -############################################################################### -BOOST_VERSION_=${BOOST_VERSION//./_} -for line in "${PYTHON_VERSIONS[@]}"; do - read -r -a PY <<< "$line" - PYTHON_VERSION=${PY[0]} - PYTHON_VERSION_LONG=${PY[1]} - - DIR=boost-src-${BOOST_VERSION}-python-${PYTHON_VERSION} - if [ ! -f $DIR/.done ]; then - echo "Building Boost for Py $PYTHON_VERSION" - curl -O -L https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz - tar xfz boost_${BOOST_VERSION_}.tar.gz - mv boost_${BOOST_VERSION_} $DIR - - PY_PREFIX=$CACHE_DIR/py-$PYTHON_VERSION - PY_INCLUDE_DIR=${PY_PREFIX}/include/python${PYTHON_VERSION} - if [ $PYTHON_VERSION = '3.7' ]; then - PY_INCLUDE_DIR=${PY_INCLUDE_DIR}m - fi - - pushd $DIR - cat < user-config.jam - using python : $PYTHON_VERSION - : python3 - : ${PY_INCLUDE_DIR} - : ${PY_PREFIX}/lib - ; -EOF - ./bootstrap.sh --with-libraries=python --with-python=python3 --with-python-root=$PY_PREFIX \ - --prefix=$CACHE_DIR/boost-py-$PYTHON_VERSION - ./b2 address-model=64 cxxflags="-fPIC -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ - link=static threading=multi \ - --user-config=./user-config.jam \ - variant=release python=${PYTHON_VERSION} \ - -j16 \ - install - touch .done - popd - else - echo "Using cached Boost for Py $PYTHON_VERSION" - fi - -done - - - -############################################################################### -if [ ! -f protobuf-${PROTOBUF_VERSION}/.done ]; then - echo "Building Protobuf" - curl -O -L https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/protobuf-cpp-${PROTOBUF_VERSION}.tar.gz - tar xvfz protobuf-cpp-${PROTOBUF_VERSION}.tar.gz - pushd protobuf-${PROTOBUF_VERSION} - CXXFLAGS="-fPIC -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ - ./configure --prefix=$PREFIX - make -j16 - make install - touch .done - popd -else - echo "Using cached Protobuf" -fi - -############################################################################### -if [ ! -f zstd-${ZSTD_VERSION}/.done ]; then - echo "Building ZStd" - curl -O -L https://github.com/facebook/zstd/releases/download/v${ZSTD_VERSION}/zstd-${ZSTD_VERSION}.tar.gz - tar xvfz zstd-${ZSTD_VERSION}.tar.gz - pushd zstd-${ZSTD_VERSION} - CFLAGS="-fPIC -O3 -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" PREFIX=$PREFIX \ - make -j16 install - touch .done - popd -else - echo "Using cached ZStd" -fi - -############################################################################### -if [ ! -f snappy-${SNAPPY_VERSION}/.done ]; then - echo "Building Snappy" - curl -O -L https://github.com/google/snappy/releases/download/${SNAPPY_VERSION}/snappy-${SNAPPY_VERSION}.tar.gz - tar xvfz snappy-${SNAPPY_VERSION}.tar.gz - pushd snappy-${SNAPPY_VERSION} - CXXFLAGS="-fPIC -O3 -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ - ./configure --prefix=$PREFIX - make -j16 - make install - touch .done - popd -else - echo "Using cached Snappy" -fi - -############################################################################### -if [ ! -f curl-${CURL_VERSION}/.done ]; then - echo "Building LibCurl" - CURL_VERSION_=${CURL_VERSION//./_} - curl -O -L https://github.com/curl/curl/releases/download/curl-${CURL_VERSION_}/curl-${CURL_VERSION}.tar.gz - tar xfz curl-${CURL_VERSION}.tar.gz - pushd curl-${CURL_VERSION} - CFLAGS="-fPIC -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ - ./configure --with-ssl=$PREFIX \ - --without-nghttp2 --without-libidn2 --disable-ldap \ - --prefix=$PREFIX - make -j16 install - touch .done - popd -else - echo "Using cached LibCurl" -fi - -############################################################################### -############################################################################### -############################################################################### -############################################################################### - -for line in "${PYTHON_VERSIONS[@]}"; do - read -r -a PY <<< "$line" - PYTHON_VERSION=${PY[0]} - PYTHON_VERSION_LONG=${PY[1]} - echo '----------------------------------------------------------------------------' - echo '----------------------------------------------------------------------------' - echo '----------------------------------------------------------------------------' - echo "Build wheel for Python $PYTHON_VERSION" - - cd "${ROOT_DIR}/pulsar-client-cpp" - - find . -name CMakeCache.txt | xargs -r rm - find . -name CMakeFiles | xargs -r rm -rf - - PY_PREFIX=$CACHE_DIR/py-$PYTHON_VERSION - PY_EXE=$PY_PREFIX/bin/python3 - - PY_INCLUDE_DIR=${PY_PREFIX}/include/python${PYTHON_VERSION} - ARCHS='arm64;x86_64' - if [ $PYTHON_VERSION = '3.7' ]; then - PY_INCLUDE_DIR=${PY_INCLUDE_DIR}m - ARCHS='x86_64' - fi - - set -x - cmake . \ - -DCMAKE_OSX_ARCHITECTURES=${ARCHS} \ - -DCMAKE_OSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET} \ - -DCMAKE_INSTALL_PREFIX=$PREFIX \ - -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_PREFIX_PATH=$PREFIX \ - -DCMAKE_CXX_FLAGS=-I$PREFIX/include \ - -DBoost_INCLUDE_DIR=$CACHE_DIR/boost-py-$PYTHON_VERSION/include \ - -DBoost_LIBRARY_DIR=$CACHE_DIR/boost-py-$PYTHON_VERSION/lib \ - -DPYTHON_INCLUDE_DIR=$PY_INCLUDE_DIR \ - -DPYTHON_LIBRARY=$PY_PREFIX/lib/libpython${PYTHON_VERSION}.dylib \ - -DLINK_STATIC=ON \ - -DBUILD_TESTS=OFF \ - -DBUILD_WIRESHARK=OFF \ - -DPROTOC_PATH=$PREFIX/bin/protoc - - make clean - make _pulsar -j16 - - cd python - $PY_EXE setup.py bdist_wheel -done diff --git a/pulsar-client-cpp/python/custom_logger_test.py b/pulsar-client-cpp/python/custom_logger_test.py deleted file mode 100755 index 60f331592e36c..0000000000000 --- a/pulsar-client-cpp/python/custom_logger_test.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python3 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from unittest import TestCase, main -import asyncio -import logging -from pulsar import Client - -class CustomLoggingTest(TestCase): - - serviceUrl = 'pulsar://localhost:6650' - - def test_async_func_with_custom_logger(self): - # boost::python::call may fail in C++ destructors, even worse, calls - # to PyErr_Print could corrupt the Python interpreter. - # See https://github.com/boostorg/python/issues/374 for details. - # This test is to verify these functions won't be called in C++ destructors - # so that Python's async function works well. - client = Client( - self.serviceUrl, - logger=logging.getLogger('custom-logger') - ) - - async def async_get(value): - consumer = client.subscribe('test_async_get', 'sub') - consumer.close() - return value - - value = 'foo' - result = asyncio.run(async_get(value)) - self.assertEqual(value, result) - - client.close() - -if __name__ == '__main__': - logging.basicConfig(level=logging.DEBUG) - main() diff --git a/pulsar-client-cpp/python/examples/company.avsc b/pulsar-client-cpp/python/examples/company.avsc deleted file mode 100644 index 5fb186092182b..0000000000000 --- a/pulsar-client-cpp/python/examples/company.avsc +++ /dev/null @@ -1,21 +0,0 @@ -{ - "doc": "this is doc", - "namespace": "example.avro", - "type": "record", - "name": "Company", - "fields": [ - {"name": "name", "type": ["null", "string"]}, - {"name": "address", "type": ["null", "string"]}, - {"name": "employees", "type": ["null", {"type": "array", "items": { - "type": "record", - "name": "Employee", - "fields": [ - {"name": "name", "type": ["null", "string"]}, - {"name": "age", "type": ["null", "int"]} - ] - }}]}, - {"name": "labels", "type": ["null", {"type": "map", "values": "string"}]}, - {"name": "companyType", "type": ["null", {"type": "enum", "name": "CompanyType", "symbols": - ["companyType1", "companyType2", "companyType3"]}]} - ] -} \ No newline at end of file diff --git a/pulsar-client-cpp/python/examples/rpc_client.py b/pulsar-client-cpp/python/examples/rpc_client.py deleted file mode 100755 index fceac5480f412..0000000000000 --- a/pulsar-client-cpp/python/examples/rpc_client.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python3 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - - -import pulsar -import threading -import uuid - - -DEFAULT_CLIENT_TOPIC = 'rpc-client-topic' -DEFAULT_SERVER_TOPIC = 'rpc-server-topic' -UUID = str(uuid.uuid4()) -NUM_CLIENT = 0 -LOCK = threading.Lock() - - -class RPCClient(object): - - def __init__(self, - client_topic=DEFAULT_CLIENT_TOPIC, - server_topic=DEFAULT_SERVER_TOPIC): - self.client_topic = client_topic - self.server_topic = server_topic - - global NUM_CLIENT - with LOCK: - self.client_no = NUM_CLIENT - NUM_CLIENT += 1 - - self.response = None - self.partition_key = '{0}_{1}'.format(UUID, self.client_no) - self.client = pulsar.Client('pulsar://localhost:6650') - self.producer = self.client.create_producer(server_topic) - self.consumer = \ - self.client.subscribe(client_topic, - 'rpc-client-{}'.format(self.partition_key), - message_listener=self.on_response) - - self.consumer.resume_message_listener() - - def on_response(self, consumer, message): - if message.partition_key() == self.partition_key \ - and consumer.topic() == self.client_topic: - msg = message.data().decode('utf-8') - print('Received: {0}'.format(msg)) - self.response = msg - consumer.acknowledge(message) - - def call(self, message): - self.response = None - self.producer.send(message.encode('utf-8'), partition_key=self.partition_key) - - while self.response is None: - pass - - return self.response - - -msg = 'foo' -rpc_client = RPCClient() -ret = rpc_client.call(msg) - -print('RPCClient message sent: {0}, result: {1}'.format(msg, ret)) diff --git a/pulsar-client-cpp/python/examples/rpc_server.py b/pulsar-client-cpp/python/examples/rpc_server.py deleted file mode 100755 index d5c445f929b29..0000000000000 --- a/pulsar-client-cpp/python/examples/rpc_server.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - - -import pulsar - - -DEFAULT_CLIENT_TOPIC = 'rpc-client-topic' -DEFAULT_SERVER_TOPIC = 'rpc-server-topic' - - -class RPCServer(object): - def __init__(self, - client_topic=DEFAULT_CLIENT_TOPIC, - server_topic=DEFAULT_SERVER_TOPIC): - self.client_topic = client_topic - self.server_topic = server_topic - - self.client = pulsar.Client('pulsar://localhost:6650') - self.producer = self.client.create_producer(client_topic) - self.consumer = \ - self.client.subscribe(server_topic, - 'rpc-server', - pulsar.ConsumerType.Shared, - message_listener=self.on_response) - - def on_response(self, consumer, message): - print('Received from {0}: {1}'.format(message.partition_key(), - message.data().decode('utf-8'))) - - self.producer.send('{} bar'.format(message.data().decode('utf-8')), - partition_key=message.partition_key()) - consumer.acknowledge(message) - - def start(self): - self.consumer.resume_message_listener() - - -rpc_server = RPCServer() -rpc_server.start() - -try: - while True: - pass -except KeyboardInterrupt: - print('Interrupted.') diff --git a/pulsar-client-cpp/python/pulsar/__init__.py b/pulsar-client-cpp/python/pulsar/__init__.py deleted file mode 100644 index 942ec8ffc0944..0000000000000 --- a/pulsar-client-cpp/python/pulsar/__init__.py +++ /dev/null @@ -1,1428 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -""" -The Pulsar Python client library is based on the existing C++ client library. -All the same features are exposed through the Python interface. - -Currently, the supported Python versions are 3.7, 3.8, 3.9 and 3.10. - -## Install from PyPI - -Download Python wheel binary files for MacOS and Linux -directly from the PyPI archive. - - #!shell - $ sudo pip install pulsar-client - -## Install from sources - -Follow the instructions to compile the Pulsar C++ client library. This method -will also build the Python binding for the library. - -To install the Python bindings: - - #!shell - $ cd pulsar-client-cpp/python - $ sudo python setup.py install - -## Examples - -### [Producer](#pulsar.Producer) example - - #!python - import pulsar - - client = pulsar.Client('pulsar://localhost:6650') - - producer = client.create_producer('my-topic') - - for i in range(10): - producer.send(('Hello-%d' % i).encode('utf-8')) - - client.close() - -#### [Consumer](#pulsar.Consumer) Example - - #!python - import pulsar - - client = pulsar.Client('pulsar://localhost:6650') - - consumer = client.subscribe('my-topic', 'my-subscription') - - while True: - msg = consumer.receive() - try: - print("Received message '{}' id='{}'".format(msg.data(), msg.message_id())) - consumer.acknowledge(msg) - except Exception: - consumer.negative_acknowledge(msg) - - client.close() - -### [Async producer](#pulsar.Producer.send_async) example - - #!python - import pulsar - - client = pulsar.Client('pulsar://localhost:6650') - - producer = client.create_producer( - 'my-topic', - block_if_queue_full=True, - batching_enabled=True, - batching_max_publish_delay_ms=10 - ) - - def send_callback(res, msg_id): - print('Message published res=%s', res) - - while True: - producer.send_async(('Hello-%d' % i).encode('utf-8'), send_callback) - - client.close() -""" - -import logging -import _pulsar - -from _pulsar import Result, CompressionType, ConsumerType, InitialPosition, PartitionsRoutingMode, BatchingType # noqa: F401 - -from pulsar.exceptions import * - -from pulsar.functions.function import Function -from pulsar.functions.context import Context -from pulsar.functions.serde import SerDe, IdentitySerDe, PickleSerDe -from pulsar import schema -_schema = schema - -import re -_retype = type(re.compile('x')) - -import certifi -from datetime import timedelta - - -class MessageId: - """ - Represents a message id - """ - - def __init__(self, partition=-1, ledger_id=-1, entry_id=-1, batch_index=-1): - self._msg_id = _pulsar.MessageId(partition, ledger_id, entry_id, batch_index) - - 'Represents the earliest message stored in a topic' - earliest = _pulsar.MessageId.earliest - - 'Represents the latest message published on a topic' - latest = _pulsar.MessageId.latest - - def ledger_id(self): - return self._msg_id.ledger_id() - - def entry_id(self): - return self._msg_id.entry_id() - - def batch_index(self): - return self._msg_id.batch_index() - - def partition(self): - return self._msg_id.partition() - - def serialize(self): - """ - Returns a bytes representation of the message id. - This bytes sequence can be stored and later deserialized. - """ - return self._msg_id.serialize() - - @staticmethod - def deserialize(message_id_bytes): - """ - Deserialize a message id object from a previously - serialized bytes sequence. - """ - return _pulsar.MessageId.deserialize(message_id_bytes) - - -class Message: - """ - Message objects are returned by a consumer, either by calling `receive` or - through a listener. - """ - - def data(self): - """ - Returns object typed bytes with the payload of the message. - """ - return self._message.data() - - def value(self): - """ - Returns object with the de-serialized version of the message content - """ - return self._schema.decode(self._message.data()) - - def properties(self): - """ - Return the properties attached to the message. Properties are - application-defined key/value pairs that will be attached to the - message. - """ - return self._message.properties() - - def partition_key(self): - """ - Get the partitioning key for the message. - """ - return self._message.partition_key() - - def publish_timestamp(self): - """ - Get the timestamp in milliseconds with the message publish time. - """ - return self._message.publish_timestamp() - - def event_timestamp(self): - """ - Get the timestamp in milliseconds with the message event time. - """ - return self._message.event_timestamp() - - def message_id(self): - """ - The message ID that can be used to refere to this particular message. - """ - return self._message.message_id() - - def topic_name(self): - """ - Get the topic Name from which this message originated from - """ - return self._message.topic_name() - - def redelivery_count(self): - """ - Get the redelivery count for this message - """ - return self._message.redelivery_count() - - def schema_version(self): - """ - Get the schema version for this message - """ - return self._message.schema_version() - - @staticmethod - def _wrap(_message): - self = Message() - self._message = _message - return self - - -class MessageBatch: - - def __init__(self): - self._msg_batch = _pulsar.MessageBatch() - - def with_message_id(self, msg_id): - if not isinstance(msg_id, _pulsar.MessageId): - if isinstance(msg_id, MessageId): - msg_id = msg_id._msg_id - else: - raise TypeError("unknown message id type") - self._msg_batch.with_message_id(msg_id) - return self - - def parse_from(self, data, size): - self._msg_batch.parse_from(data, size) - _msgs = self._msg_batch.messages() - return list(map(Message._wrap, _msgs)) - - -class Authentication: - """ - Authentication provider object. Used to load authentication from an external - shared library. - """ - def __init__(self, dynamicLibPath, authParamsString): - """ - Create the authentication provider instance. - - **Args** - - * `dynamicLibPath`: Path to the authentication provider shared library - (such as `tls.so`) - * `authParamsString`: Comma-separated list of provider-specific - configuration params - """ - _check_type(str, dynamicLibPath, 'dynamicLibPath') - _check_type(str, authParamsString, 'authParamsString') - self.auth = _pulsar.Authentication(dynamicLibPath, authParamsString) - - -class AuthenticationTLS(Authentication): - """ - TLS Authentication implementation - """ - def __init__(self, certificate_path, private_key_path): - """ - Create the TLS authentication provider instance. - - **Args** - - * `certificatePath`: Path to the public certificate - * `privateKeyPath`: Path to private TLS key - """ - _check_type(str, certificate_path, 'certificate_path') - _check_type(str, private_key_path, 'private_key_path') - self.auth = _pulsar.AuthenticationTLS(certificate_path, private_key_path) - - -class AuthenticationToken(Authentication): - """ - Token based authentication implementation - """ - def __init__(self, token): - """ - Create the token authentication provider instance. - - **Args** - - * `token`: A string containing the token or a functions that provides a - string with the token - """ - if not (isinstance(token, str) or callable(token)): - raise ValueError("Argument token is expected to be of type 'str' or a function returning 'str'") - self.auth = _pulsar.AuthenticationToken(token) - - -class AuthenticationAthenz(Authentication): - """ - Athenz Authentication implementation - """ - def __init__(self, auth_params_string): - """ - Create the Athenz authentication provider instance. - - **Args** - - * `auth_params_string`: JSON encoded configuration for Athenz client - """ - _check_type(str, auth_params_string, 'auth_params_string') - self.auth = _pulsar.AuthenticationAthenz(auth_params_string) - -class AuthenticationOauth2(Authentication): - """ - Oauth2 Authentication implementation - """ - def __init__(self, auth_params_string): - """ - Create the Oauth2 authentication provider instance. - - **Args** - - * `auth_params_string`: JSON encoded configuration for Oauth2 client - """ - _check_type(str, auth_params_string, 'auth_params_string') - self.auth = _pulsar.AuthenticationOauth2(auth_params_string) - -class AuthenticationBasic(Authentication): - """ - Basic Authentication implementation - """ - def __init__(self, username, password): - """ - Create the Basic authentication provider instance. - - **Args** - - * `username`: Used to authentication as username - * `password`: Used to authentication as password - """ - _check_type(str, username, 'username') - _check_type(str, password, 'password') - self.auth = _pulsar.AuthenticationBasic(username, password) - -class Client: - """ - The Pulsar client. A single client instance can be used to create producers - and consumers on multiple topics. - - The client will share the same connection pool and threads across all - producers and consumers. - """ - - def __init__(self, service_url, - authentication=None, - operation_timeout_seconds=30, - io_threads=1, - message_listener_threads=1, - concurrent_lookup_requests=50000, - log_conf_file_path=None, - use_tls=False, - tls_trust_certs_file_path=None, - tls_allow_insecure_connection=False, - tls_validate_hostname=False, - logger=None, - connection_timeout_ms=10000, - listener_name=None - ): - """ - Create a new Pulsar client instance. - - **Args** - - * `service_url`: The Pulsar service url eg: pulsar://my-broker.com:6650/ - - **Options** - - * `authentication`: - Set the authentication provider to be used with the broker. For example: - `AuthenticationTls`, `AuthenticationToken`, `AuthenticationAthenz` or `AuthenticationOauth2` - * `operation_timeout_seconds`: - Set timeout on client operations (subscribe, create producer, close, - unsubscribe). - * `io_threads`: - Set the number of IO threads to be used by the Pulsar client. - * `message_listener_threads`: - Set the number of threads to be used by the Pulsar client when - delivering messages through message listener. The default is 1 thread - per Pulsar client. If using more than 1 thread, messages for distinct - `message_listener`s will be delivered in different threads, however a - single `MessageListener` will always be assigned to the same thread. - * `concurrent_lookup_requests`: - Number of concurrent lookup-requests allowed on each broker connection - to prevent overload on the broker. - * `log_conf_file_path`: - Initialize log4cxx from a configuration file. - * `use_tls`: - Configure whether to use TLS encryption on the connection. This setting - is deprecated. TLS will be automatically enabled if the `serviceUrl` is - set to `pulsar+ssl://` or `https://` - * `tls_trust_certs_file_path`: - Set the path to the trusted TLS certificate file. If empty defaults to - certifi. - * `tls_allow_insecure_connection`: - Configure whether the Pulsar client accepts untrusted TLS certificates - from the broker. - * `tls_validate_hostname`: - Configure whether the Pulsar client validates that the hostname of the - endpoint, matches the common name on the TLS certificate presented by - the endpoint. - * `logger`: - Set a Python logger for this Pulsar client. Should be an instance of `logging.Logger`. - * `connection_timeout_ms`: - Set timeout in milliseconds on TCP connections. - * `listener_name`: - Listener name for lookup. Clients can use listenerName to choose one of the listeners - as the service URL to create a connection to the broker as long as the network is accessible. - advertisedListeners must enabled in broker side. - """ - _check_type(str, service_url, 'service_url') - _check_type_or_none(Authentication, authentication, 'authentication') - _check_type(int, operation_timeout_seconds, 'operation_timeout_seconds') - _check_type(int, connection_timeout_ms, 'connection_timeout_ms') - _check_type(int, io_threads, 'io_threads') - _check_type(int, message_listener_threads, 'message_listener_threads') - _check_type(int, concurrent_lookup_requests, 'concurrent_lookup_requests') - _check_type_or_none(str, log_conf_file_path, 'log_conf_file_path') - _check_type(bool, use_tls, 'use_tls') - _check_type_or_none(str, tls_trust_certs_file_path, 'tls_trust_certs_file_path') - _check_type(bool, tls_allow_insecure_connection, 'tls_allow_insecure_connection') - _check_type(bool, tls_validate_hostname, 'tls_validate_hostname') - _check_type_or_none(logging.Logger, logger, 'logger') - _check_type_or_none(str, listener_name, 'listener_name') - - conf = _pulsar.ClientConfiguration() - if authentication: - conf.authentication(authentication.auth) - conf.operation_timeout_seconds(operation_timeout_seconds) - conf.connection_timeout(connection_timeout_ms) - conf.io_threads(io_threads) - conf.message_listener_threads(message_listener_threads) - conf.concurrent_lookup_requests(concurrent_lookup_requests) - if log_conf_file_path: - conf.log_conf_file_path(log_conf_file_path) - conf.set_logger(self._prepare_logger(logger) if logger else None) - if listener_name: - conf.listener_name(listener_name) - if use_tls or service_url.startswith('pulsar+ssl://') or service_url.startswith('https://'): - conf.use_tls(True) - if tls_trust_certs_file_path: - conf.tls_trust_certs_file_path(tls_trust_certs_file_path) - else: - conf.tls_trust_certs_file_path(certifi.where()) - conf.tls_allow_insecure_connection(tls_allow_insecure_connection) - conf.tls_validate_hostname(tls_validate_hostname) - self._client = _pulsar.Client(service_url, conf) - self._consumers = [] - - @staticmethod - def _prepare_logger(logger): - import logging - def log(level, message): - old_threads = logging.logThreads - logging.logThreads = False - logger.log(logging.getLevelName(level), message) - logging.logThreads = old_threads - return log - - def create_producer(self, topic, - producer_name=None, - schema=schema.BytesSchema(), - initial_sequence_id=None, - send_timeout_millis=30000, - compression_type=CompressionType.NONE, - max_pending_messages=1000, - max_pending_messages_across_partitions=50000, - block_if_queue_full=False, - batching_enabled=False, - batching_max_messages=1000, - batching_max_allowed_size_in_bytes=128*1024, - batching_max_publish_delay_ms=10, - chunking_enabled=False, - message_routing_mode=PartitionsRoutingMode.RoundRobinDistribution, - lazy_start_partitioned_producers=False, - properties=None, - batching_type=BatchingType.Default, - encryption_key=None, - crypto_key_reader=None - ): - """ - Create a new producer on a given topic. - - **Args** - - * `topic`: - The topic name - - **Options** - - * `producer_name`: - Specify a name for the producer. If not assigned, - the system will generate a globally unique name which can be accessed - with `Producer.producer_name()`. When specifying a name, it is app to - the user to ensure that, for a given topic, the producer name is unique - across all Pulsar's clusters. - * `schema`: - Define the schema of the data that will be published by this producer. - The schema will be used for two purposes: - - Validate the data format against the topic defined schema - - Perform serialization/deserialization between data and objects - An example for this parameter would be to pass `schema=JsonSchema(MyRecordClass)`. - * `initial_sequence_id`: - Set the baseline for the sequence ids for messages - published by the producer. First message will be using - `(initialSequenceId + 1)`` as its sequence id and subsequent messages will - be assigned incremental sequence ids, if not otherwise specified. - * `send_timeout_millis`: - If a message is not acknowledged by the server before the - `send_timeout` expires, an error will be reported. - * `compression_type`: - Set the compression type for the producer. By default, message - payloads are not compressed. Supported compression types are - `CompressionType.LZ4`, `CompressionType.ZLib`, `CompressionType.ZSTD` and `CompressionType.SNAPPY`. - ZSTD is supported since Pulsar 2.3. Consumers will need to be at least at that - release in order to be able to receive messages compressed with ZSTD. - SNAPPY is supported since Pulsar 2.4. Consumers will need to be at least at that - release in order to be able to receive messages compressed with SNAPPY. - * `max_pending_messages`: - Set the max size of the queue holding the messages pending to receive - an acknowledgment from the broker. - * `max_pending_messages_across_partitions`: - Set the max size of the queue holding the messages pending to receive - an acknowledgment across partitions from the broker. - * `block_if_queue_full`: Set whether `send_async` operations should - block when the outgoing message queue is full. - * `message_routing_mode`: - Set the message routing mode for the partitioned producer. Default is `PartitionsRoutingMode.RoundRobinDistribution`, - other option is `PartitionsRoutingMode.UseSinglePartition` - * `lazy_start_partitioned_producers`: - This config affects producers of partitioned topics only. It controls whether - producers register and connect immediately to the owner broker of each partition - or start lazily on demand. The internal producer of one partition is always - started eagerly, chosen by the routing policy, but the internal producers of - any additional partitions are started on demand, upon receiving their first - message. - Using this mode can reduce the strain on brokers for topics with large numbers of - partitions and when the SinglePartition routing policy is used without keyed messages. - Because producer connection can be on demand, this can produce extra send latency - for the first messages of a given partition. - * `properties`: - Sets the properties for the producer. The properties associated with a producer - can be used for identify a producer at broker side. - * `batching_type`: - Sets the batching type for the producer. - There are two batching type: DefaultBatching and KeyBasedBatching. - - Default batching - incoming single messages: - (k1, v1), (k2, v1), (k3, v1), (k1, v2), (k2, v2), (k3, v2), (k1, v3), (k2, v3), (k3, v3) - batched into single batch message: - [(k1, v1), (k2, v1), (k3, v1), (k1, v2), (k2, v2), (k3, v2), (k1, v3), (k2, v3), (k3, v3)] - - - KeyBasedBatching - incoming single messages: - (k1, v1), (k2, v1), (k3, v1), (k1, v2), (k2, v2), (k3, v2), (k1, v3), (k2, v3), (k3, v3) - batched into single batch message: - [(k1, v1), (k1, v2), (k1, v3)], [(k2, v1), (k2, v2), (k2, v3)], [(k3, v1), (k3, v2), (k3, v3)] - * `chunking_enabled`: - If message size is higher than allowed max publish-payload size by broker then chunking_enabled - helps producer to split message into multiple chunks and publish them to broker separately and in - order. So, it allows client to successfully publish large size of messages in pulsar. - * encryption_key: - The key used for symmetric encryption, configured on the producer side - * crypto_key_reader: - Symmetric encryption class implementation, configuring public key encryption messages for the producer - and private key decryption messages for the consumer - """ - _check_type(str, topic, 'topic') - _check_type_or_none(str, producer_name, 'producer_name') - _check_type(_schema.Schema, schema, 'schema') - _check_type_or_none(int, initial_sequence_id, 'initial_sequence_id') - _check_type(int, send_timeout_millis, 'send_timeout_millis') - _check_type(CompressionType, compression_type, 'compression_type') - _check_type(int, max_pending_messages, 'max_pending_messages') - _check_type(int, max_pending_messages_across_partitions, 'max_pending_messages_across_partitions') - _check_type(bool, block_if_queue_full, 'block_if_queue_full') - _check_type(bool, batching_enabled, 'batching_enabled') - _check_type(int, batching_max_messages, 'batching_max_messages') - _check_type(int, batching_max_allowed_size_in_bytes, 'batching_max_allowed_size_in_bytes') - _check_type(int, batching_max_publish_delay_ms, 'batching_max_publish_delay_ms') - _check_type(bool, chunking_enabled, 'chunking_enabled') - _check_type_or_none(dict, properties, 'properties') - _check_type(BatchingType, batching_type, 'batching_type') - _check_type_or_none(str, encryption_key, 'encryption_key') - _check_type_or_none(CryptoKeyReader, crypto_key_reader, 'crypto_key_reader') - _check_type(bool, lazy_start_partitioned_producers, 'lazy_start_partitioned_producers') - - conf = _pulsar.ProducerConfiguration() - conf.send_timeout_millis(send_timeout_millis) - conf.compression_type(compression_type) - conf.max_pending_messages(max_pending_messages) - conf.max_pending_messages_across_partitions(max_pending_messages_across_partitions) - conf.block_if_queue_full(block_if_queue_full) - conf.batching_enabled(batching_enabled) - conf.batching_max_messages(batching_max_messages) - conf.batching_max_allowed_size_in_bytes(batching_max_allowed_size_in_bytes) - conf.batching_max_publish_delay_ms(batching_max_publish_delay_ms) - conf.partitions_routing_mode(message_routing_mode) - conf.batching_type(batching_type) - conf.chunking_enabled(chunking_enabled) - conf.lazy_start_partitioned_producers(lazy_start_partitioned_producers) - if producer_name: - conf.producer_name(producer_name) - if initial_sequence_id: - conf.initial_sequence_id(initial_sequence_id) - if properties: - for k, v in properties.items(): - conf.property(k, v) - - conf.schema(schema.schema_info()) - if encryption_key: - conf.encryption_key(encryption_key) - if crypto_key_reader: - conf.crypto_key_reader(crypto_key_reader.cryptoKeyReader) - - if batching_enabled and chunking_enabled: - raise ValueError("Batching and chunking of messages can't be enabled together.") - - p = Producer() - p._producer = self._client.create_producer(topic, conf) - p._schema = schema - p._client = self._client - return p - - def subscribe(self, topic, subscription_name, - consumer_type=ConsumerType.Exclusive, - schema=schema.BytesSchema(), - message_listener=None, - receiver_queue_size=1000, - max_total_receiver_queue_size_across_partitions=50000, - consumer_name=None, - unacked_messages_timeout_ms=None, - broker_consumer_stats_cache_time_ms=30000, - negative_ack_redelivery_delay_ms=60000, - is_read_compacted=False, - properties=None, - pattern_auto_discovery_period=60, - initial_position=InitialPosition.Latest, - crypto_key_reader=None, - replicate_subscription_state_enabled=False, - max_pending_chunked_message=10, - auto_ack_oldest_chunked_message_on_queue_full=False - ): - """ - Subscribe to the given topic and subscription combination. - - **Args** - - * `topic`: The name of the topic, list of topics or regex pattern. - This method will accept these forms: - - `topic='my-topic'` - - `topic=['topic-1', 'topic-2', 'topic-3']` - - `topic=re.compile('persistent://public/default/topic-*')` - * `subscription`: The name of the subscription. - - **Options** - - * `consumer_type`: - Select the subscription type to be used when subscribing to the topic. - * `schema`: - Define the schema of the data that will be received by this consumer. - * `message_listener`: - Sets a message listener for the consumer. When the listener is set, - the application will receive messages through it. Calls to - `consumer.receive()` will not be allowed. The listener function needs - to accept (consumer, message), for example: - - #!python - def my_listener(consumer, message): - # process message - consumer.acknowledge(message) - - * `receiver_queue_size`: - Sets the size of the consumer receive queue. The consumer receive - queue controls how many messages can be accumulated by the consumer - before the application calls `receive()`. Using a higher value could - potentially increase the consumer throughput at the expense of higher - memory utilization. Setting the consumer queue size to zero decreases - the throughput of the consumer by disabling pre-fetching of messages. - This approach improves the message distribution on shared subscription - by pushing messages only to those consumers that are ready to process - them. Neither receive with timeout nor partitioned topics can be used - if the consumer queue size is zero. The `receive()` function call - should not be interrupted when the consumer queue size is zero. The - default value is 1000 messages and should work well for most use - cases. - * `max_total_receiver_queue_size_across_partitions` - Set the max total receiver queue size across partitions. - This setting will be used to reduce the receiver queue size for individual partitions - * `consumer_name`: - Sets the consumer name. - * `unacked_messages_timeout_ms`: - Sets the timeout in milliseconds for unacknowledged messages. The - timeout needs to be greater than 10 seconds. An exception is thrown if - the given value is less than 10 seconds. If a successful - acknowledgement is not sent within the timeout, all the unacknowledged - messages are redelivered. - * `negative_ack_redelivery_delay_ms`: - The delay after which to redeliver the messages that failed to be - processed (with the `consumer.negative_acknowledge()`) - * `broker_consumer_stats_cache_time_ms`: - Sets the time duration for which the broker-side consumer stats will - be cached in the client. - * `is_read_compacted`: - Selects whether to read the compacted version of the topic - * `properties`: - Sets the properties for the consumer. The properties associated with a consumer - can be used for identify a consumer at broker side. - * `pattern_auto_discovery_period`: - Periods of seconds for consumer to auto discover match topics. - * `initial_position`: - Set the initial position of a consumer when subscribing to the topic. - It could be either: `InitialPosition.Earliest` or `InitialPosition.Latest`. - Default: `Latest`. - * crypto_key_reader: - Symmetric encryption class implementation, configuring public key encryption messages for the producer - and private key decryption messages for the consumer - * replicate_subscription_state_enabled: - Set whether the subscription status should be replicated. - Default: `False`. - * max_pending_chunked_message: - Consumer buffers chunk messages into memory until it receives all the chunks of the original message. - While consuming chunk-messages, chunks from same message might not be contiguous in the stream and they - might be mixed with other messages' chunks. so, consumer has to maintain multiple buffers to manage - chunks coming from different messages. This mainly happens when multiple publishers are publishing - messages on the topic concurrently or publisher failed to publish all chunks of the messages. - - If it's zero, the pending chunked messages will not be limited. - - Default: `10`. - * auto_ack_oldest_chunked_message_on_queue_full: - Buffering large number of outstanding uncompleted chunked messages can create memory pressure and it - can be guarded by providing the maxPendingChunkedMessage threshold. See setMaxPendingChunkedMessage. - Once, consumer reaches this threshold, it drops the outstanding unchunked-messages by silently acking - if autoAckOldestChunkedMessageOnQueueFull is true else it marks them for redelivery. - Default: `False`. - """ - _check_type(str, subscription_name, 'subscription_name') - _check_type(ConsumerType, consumer_type, 'consumer_type') - _check_type(_schema.Schema, schema, 'schema') - _check_type(int, receiver_queue_size, 'receiver_queue_size') - _check_type(int, max_total_receiver_queue_size_across_partitions, - 'max_total_receiver_queue_size_across_partitions') - _check_type_or_none(str, consumer_name, 'consumer_name') - _check_type_or_none(int, unacked_messages_timeout_ms, 'unacked_messages_timeout_ms') - _check_type(int, broker_consumer_stats_cache_time_ms, 'broker_consumer_stats_cache_time_ms') - _check_type(int, negative_ack_redelivery_delay_ms, 'negative_ack_redelivery_delay_ms') - _check_type(int, pattern_auto_discovery_period, 'pattern_auto_discovery_period') - _check_type(bool, is_read_compacted, 'is_read_compacted') - _check_type_or_none(dict, properties, 'properties') - _check_type(InitialPosition, initial_position, 'initial_position') - _check_type_or_none(CryptoKeyReader, crypto_key_reader, 'crypto_key_reader') - _check_type(int, max_pending_chunked_message, 'max_pending_chunked_message') - _check_type(bool, auto_ack_oldest_chunked_message_on_queue_full, 'auto_ack_oldest_chunked_message_on_queue_full') - - conf = _pulsar.ConsumerConfiguration() - conf.consumer_type(consumer_type) - conf.read_compacted(is_read_compacted) - if message_listener: - conf.message_listener(_listener_wrapper(message_listener, schema)) - conf.receiver_queue_size(receiver_queue_size) - conf.max_total_receiver_queue_size_across_partitions(max_total_receiver_queue_size_across_partitions) - if consumer_name: - conf.consumer_name(consumer_name) - if unacked_messages_timeout_ms: - conf.unacked_messages_timeout_ms(unacked_messages_timeout_ms) - - conf.negative_ack_redelivery_delay_ms(negative_ack_redelivery_delay_ms) - conf.broker_consumer_stats_cache_time_ms(broker_consumer_stats_cache_time_ms) - if properties: - for k, v in properties.items(): - conf.property(k, v) - conf.subscription_initial_position(initial_position) - - conf.schema(schema.schema_info()) - - if crypto_key_reader: - conf.crypto_key_reader(crypto_key_reader.cryptoKeyReader) - - conf.replicate_subscription_state_enabled(replicate_subscription_state_enabled) - conf.max_pending_chunked_message(max_pending_chunked_message) - conf.auto_ack_oldest_chunked_message_on_queue_full(auto_ack_oldest_chunked_message_on_queue_full) - - c = Consumer() - if isinstance(topic, str): - # Single topic - c._consumer = self._client.subscribe(topic, subscription_name, conf) - elif isinstance(topic, list): - # List of topics - c._consumer = self._client.subscribe_topics(topic, subscription_name, conf) - elif isinstance(topic, _retype): - # Regex pattern - c._consumer = self._client.subscribe_pattern(topic.pattern, subscription_name, conf) - else: - raise ValueError("Argument 'topic' is expected to be of a type between (str, list, re.pattern)") - - c._client = self - c._schema = schema - self._consumers.append(c) - return c - - def create_reader(self, topic, start_message_id, - schema=schema.BytesSchema(), - reader_listener=None, - receiver_queue_size=1000, - reader_name=None, - subscription_role_prefix=None, - is_read_compacted=False, - crypto_key_reader=None - ): - """ - Create a reader on a particular topic - - **Args** - - * `topic`: The name of the topic. - * `start_message_id`: The initial reader positioning is done by specifying a message id. - The options are: - * `MessageId.earliest`: Start reading from the earliest message available in the topic - * `MessageId.latest`: Start reading from the end topic, only getting messages published - after the reader was created - * `MessageId`: When passing a particular message id, the reader will position itself on - that specific position. The first message to be read will be the message next to the - specified messageId. Message id can be serialized into a string and deserialized - back into a `MessageId` object: - - # Serialize to string - s = msg.message_id().serialize() - - # Deserialize from string - msg_id = MessageId.deserialize(s) - - **Options** - - * `schema`: - Define the schema of the data that will be received by this reader. - * `reader_listener`: - Sets a message listener for the reader. When the listener is set, - the application will receive messages through it. Calls to - `reader.read_next()` will not be allowed. The listener function needs - to accept (reader, message), for example: - - def my_listener(reader, message): - # process message - pass - - * `receiver_queue_size`: - Sets the size of the reader receive queue. The reader receive - queue controls how many messages can be accumulated by the reader - before the application calls `read_next()`. Using a higher value could - potentially increase the reader throughput at the expense of higher - memory utilization. - * `reader_name`: - Sets the reader name. - * `subscription_role_prefix`: - Sets the subscription role prefix. - * `is_read_compacted`: - Selects whether to read the compacted version of the topic - * crypto_key_reader: - Symmetric encryption class implementation, configuring public key encryption messages for the producer - and private key decryption messages for the consumer - """ - _check_type(str, topic, 'topic') - _check_type(_pulsar.MessageId, start_message_id, 'start_message_id') - _check_type(_schema.Schema, schema, 'schema') - _check_type(int, receiver_queue_size, 'receiver_queue_size') - _check_type_or_none(str, reader_name, 'reader_name') - _check_type_or_none(str, subscription_role_prefix, 'subscription_role_prefix') - _check_type(bool, is_read_compacted, 'is_read_compacted') - _check_type_or_none(CryptoKeyReader, crypto_key_reader, 'crypto_key_reader') - - conf = _pulsar.ReaderConfiguration() - if reader_listener: - conf.reader_listener(_listener_wrapper(reader_listener, schema)) - conf.receiver_queue_size(receiver_queue_size) - if reader_name: - conf.reader_name(reader_name) - if subscription_role_prefix: - conf.subscription_role_prefix(subscription_role_prefix) - conf.schema(schema.schema_info()) - conf.read_compacted(is_read_compacted) - if crypto_key_reader: - conf.crypto_key_reader(crypto_key_reader.cryptoKeyReader) - - c = Reader() - c._reader = self._client.create_reader(topic, start_message_id, conf) - c._client = self - c._schema = schema - self._consumers.append(c) - return c - - def get_topic_partitions(self, topic): - """ - Get the list of partitions for a given topic. - - If the topic is partitioned, this will return a list of partition names. If the topic is not - partitioned, the returned list will contain the topic name itself. - - This can be used to discover the partitions and create Reader, Consumer or Producer - instances directly on a particular partition. - :param topic: the topic name to lookup - :return: a list of partition name - """ - _check_type(str, topic, 'topic') - return self._client.get_topic_partitions(topic) - - def shutdown(self): - """ - Perform immediate shutdown of Pulsar client. - - Release all resources and close all producer, consumer, and readers without waiting - for ongoing operations to complete. - """ - self._client.shutdown() - - def close(self): - """ - Close the client and all the associated producers and consumers - """ - self._client.close() - - -class Producer: - """ - The Pulsar message producer, used to publish messages on a topic. - """ - - def topic(self): - """ - Return the topic which producer is publishing to - """ - return self._producer.topic() - - def producer_name(self): - """ - Return the producer name which could have been assigned by the - system or specified by the client - """ - return self._producer.producer_name() - - def last_sequence_id(self): - """ - Get the last sequence id that was published by this producer. - - This represent either the automatically assigned or custom sequence id - (set on the `MessageBuilder`) that was published and acknowledged by the broker. - - After recreating a producer with the same producer name, this will return the - last message that was published in the previous producer session, or -1 if - there no message was ever published. - """ - return self._producer.last_sequence_id() - - def send(self, content, - properties=None, - partition_key=None, - sequence_id=None, - replication_clusters=None, - disable_replication=False, - event_timestamp=None, - deliver_at=None, - deliver_after=None, - ): - """ - Publish a message on the topic. Blocks until the message is acknowledged - - Returns a `MessageId` object that represents where the message is persisted. - - **Args** - - * `content`: - A `bytes` object with the message payload. - - **Options** - - * `properties`: - A dict of application-defined string properties. - * `partition_key`: - Sets the partition key for message routing. A hash of this key is used - to determine the message's topic partition. - * `sequence_id`: - Specify a custom sequence id for the message being published. - * `replication_clusters`: - Override namespace replication clusters. Note that it is the caller's - responsibility to provide valid cluster names and that all clusters - have been previously configured as topics. Given an empty list, - the message will replicate according to the namespace configuration. - * `disable_replication`: - Do not replicate this message. - * `event_timestamp`: - Timestamp in millis of the timestamp of event creation - * `deliver_at`: - Specify the this message should not be delivered earlier than the - specified timestamp. - The timestamp is milliseconds and based on UTC - * `deliver_after`: - Specify a delay in timedelta for the delivery of the messages. - - """ - msg = self._build_msg(content, properties, partition_key, sequence_id, - replication_clusters, disable_replication, event_timestamp, - deliver_at, deliver_after) - return MessageId.deserialize(self._producer.send(msg)) - - def send_async(self, content, callback, - properties=None, - partition_key=None, - sequence_id=None, - replication_clusters=None, - disable_replication=False, - event_timestamp=None, - deliver_at=None, - deliver_after=None, - ): - """ - Send a message asynchronously. - - The `callback` will be invoked once the message has been acknowledged - by the broker. - - Example: - - #!python - def callback(res, msg_id): - print('Message published: %s' % res) - - producer.send_async(msg, callback) - - When the producer queue is full, by default the message will be rejected - and the callback invoked with an error code. - - **Args** - - * `content`: - A `bytes` object with the message payload. - - **Options** - - * `properties`: - A dict of application0-defined string properties. - * `partition_key`: - Sets the partition key for the message routing. A hash of this key is - used to determine the message's topic partition. - * `sequence_id`: - Specify a custom sequence id for the message being published. - * `replication_clusters`: Override namespace replication clusters. Note - that it is the caller's responsibility to provide valid cluster names - and that all clusters have been previously configured as topics. - Given an empty list, the message will replicate per the namespace - configuration. - * `disable_replication`: - Do not replicate this message. - * `event_timestamp`: - Timestamp in millis of the timestamp of event creation - * `deliver_at`: - Specify the this message should not be delivered earlier than the - specified timestamp. - The timestamp is milliseconds and based on UTC - * `deliver_after`: - Specify a delay in timedelta for the delivery of the messages. - """ - msg = self._build_msg(content, properties, partition_key, sequence_id, - replication_clusters, disable_replication, event_timestamp, - deliver_at, deliver_after) - self._producer.send_async(msg, callback) - - - def flush(self): - """ - Flush all the messages buffered in the client and wait until all messages have been - successfully persisted - """ - self._producer.flush() - - - def close(self): - """ - Close the producer. - """ - self._producer.close() - - def _build_msg(self, content, properties, partition_key, sequence_id, - replication_clusters, disable_replication, event_timestamp, - deliver_at, deliver_after): - data = self._schema.encode(content) - - _check_type(bytes, data, 'data') - _check_type_or_none(dict, properties, 'properties') - _check_type_or_none(str, partition_key, 'partition_key') - _check_type_or_none(int, sequence_id, 'sequence_id') - _check_type_or_none(list, replication_clusters, 'replication_clusters') - _check_type(bool, disable_replication, 'disable_replication') - _check_type_or_none(int, event_timestamp, 'event_timestamp') - _check_type_or_none(int, deliver_at, 'deliver_at') - _check_type_or_none(timedelta, deliver_after, 'deliver_after') - - mb = _pulsar.MessageBuilder() - mb.content(data) - if properties: - for k, v in properties.items(): - mb.property(k, v) - if partition_key: - mb.partition_key(partition_key) - if sequence_id: - mb.sequence_id(sequence_id) - if replication_clusters: - mb.replication_clusters(replication_clusters) - if disable_replication: - mb.disable_replication(disable_replication) - if event_timestamp: - mb.event_timestamp(event_timestamp) - if deliver_at: - mb.deliver_at(deliver_at) - if deliver_after: - mb.deliver_after(deliver_after) - - return mb.build() - - def is_connected(self): - """ - Check if the producer is connected or not. - """ - return self._producer.is_connected() - - -class Consumer: - """ - Pulsar consumer. - """ - - def topic(self): - """ - Return the topic this consumer is subscribed to. - """ - return self._consumer.topic() - - def subscription_name(self): - """ - Return the subscription name. - """ - return self._consumer.subscription_name() - - def unsubscribe(self): - """ - Unsubscribe the current consumer from the topic. - - This method will block until the operation is completed. Once the - consumer is unsubscribed, no more messages will be received and - subsequent new messages will not be retained for this consumer. - - This consumer object cannot be reused. - """ - return self._consumer.unsubscribe() - - def receive(self, timeout_millis=None): - """ - Receive a single message. - - If a message is not immediately available, this method will block until - a new message is available. - - **Options** - - * `timeout_millis`: - If specified, the receive will raise an exception if a message is not - available within the timeout. - """ - if timeout_millis is None: - msg = self._consumer.receive() - else: - _check_type(int, timeout_millis, 'timeout_millis') - msg = self._consumer.receive(timeout_millis) - - m = Message() - m._message = msg - m._schema = self._schema - return m - - def acknowledge(self, message): - """ - Acknowledge the reception of a single message. - - This method will block until an acknowledgement is sent to the broker. - After that, the message will not be re-delivered to this consumer. - - **Args** - - * `message`: - The received message or message id. - """ - if isinstance(message, Message): - self._consumer.acknowledge(message._message) - else: - self._consumer.acknowledge(message) - - def acknowledge_cumulative(self, message): - """ - Acknowledge the reception of all the messages in the stream up to (and - including) the provided message. - - This method will block until an acknowledgement is sent to the broker. - After that, the messages will not be re-delivered to this consumer. - - **Args** - - * `message`: - The received message or message id. - """ - if isinstance(message, Message): - self._consumer.acknowledge_cumulative(message._message) - else: - self._consumer.acknowledge_cumulative(message) - - def negative_acknowledge(self, message): - """ - Acknowledge the failure to process a single message. - - When a message is "negatively acked" it will be marked for redelivery after - some fixed delay. The delay is configurable when constructing the consumer - with {@link ConsumerConfiguration#setNegativeAckRedeliveryDelayMs}. - - This call is not blocking. - - **Args** - - * `message`: - The received message or message id. - """ - if isinstance(message, Message): - self._consumer.negative_acknowledge(message._message) - else: - self._consumer.negative_acknowledge(message) - - def pause_message_listener(self): - """ - Pause receiving messages via the `message_listener` until - `resume_message_listener()` is called. - """ - self._consumer.pause_message_listener() - - def resume_message_listener(self): - """ - Resume receiving the messages via the message listener. - Asynchronously receive all the messages enqueued from the time - `pause_message_listener()` was called. - """ - self._consumer.resume_message_listener() - - def redeliver_unacknowledged_messages(self): - """ - Redelivers all the unacknowledged messages. In failover mode, the - request is ignored if the consumer is not active for the given topic. In - shared mode, the consumer's messages to be redelivered are distributed - across all the connected consumers. This is a non-blocking call and - doesn't throw an exception. In case the connection breaks, the messages - are redelivered after reconnect. - """ - self._consumer.redeliver_unacknowledged_messages() - - def seek(self, messageid): - """ - Reset the subscription associated with this consumer to a specific message id or publish timestamp. - The message id can either be a specific message or represent the first or last messages in the topic. - Note: this operation can only be done on non-partitioned topics. For these, one can rather perform the - seek() on the individual partitions. - - **Args** - - * `message`: - The message id for seek, OR an integer event time to seek to - """ - self._consumer.seek(messageid) - - def close(self): - """ - Close the consumer. - """ - self._consumer.close() - self._client._consumers.remove(self) - - def is_connected(self): - """ - Check if the consumer is connected or not. - """ - return self._consumer.is_connected() - - def get_last_message_id(self): - """ - Get the last message id. - """ - return self._consumer.get_last_message_id() - - -class Reader: - """ - Pulsar topic reader. - """ - - def topic(self): - """ - Return the topic this reader is reading from. - """ - return self._reader.topic() - - def read_next(self, timeout_millis=None): - """ - Read a single message. - - If a message is not immediately available, this method will block until - a new message is available. - - **Options** - - * `timeout_millis`: - If specified, the receive will raise an exception if a message is not - available within the timeout. - """ - if timeout_millis is None: - msg = self._reader.read_next() - else: - _check_type(int, timeout_millis, 'timeout_millis') - msg = self._reader.read_next(timeout_millis) - - m = Message() - m._message = msg - m._schema = self._schema - return m - - def has_message_available(self): - """ - Check if there is any message available to read from the current position. - """ - return self._reader.has_message_available(); - - def seek(self, messageid): - """ - Reset this reader to a specific message id or publish timestamp. - The message id can either be a specific message or represent the first or last messages in the topic. - Note: this operation can only be done on non-partitioned topics. For these, one can rather perform the - seek() on the individual partitions. - - **Args** - - * `message`: - The message id for seek, OR an integer event time to seek to - """ - self._reader.seek(messageid) - - def close(self): - """ - Close the reader. - """ - self._reader.close() - self._client._consumers.remove(self) - - def is_connected(self): - """ - Check if the reader is connected or not. - """ - return self._reader.is_connected() - - -class CryptoKeyReader: - """ - Default crypto key reader implementation - """ - def __init__(self, public_key_path, private_key_path): - """ - Create crypto key reader. - - **Args** - - * `public_key_path`: Path to the public key - * `private_key_path`: Path to private key - """ - _check_type(str, public_key_path, 'public_key_path') - _check_type(str, private_key_path, 'private_key_path') - self.cryptoKeyReader = _pulsar.CryptoKeyReader(public_key_path, private_key_path) - -def _check_type(var_type, var, name): - if not isinstance(var, var_type): - raise ValueError("Argument %s is expected to be of type '%s' and not '%s'" - % (name, var_type.__name__, type(var).__name__)) - - -def _check_type_or_none(var_type, var, name): - if var is not None and not isinstance(var, var_type): - raise ValueError("Argument %s is expected to be either None or of type '%s'" - % (name, var_type.__name__)) - - -def _listener_wrapper(listener, schema): - def wrapper(consumer, msg): - c = Consumer() - c._consumer = consumer - m = Message() - m._message = msg - m._schema = schema - listener(c, m) - return wrapper diff --git a/pulsar-client-cpp/python/pulsar/exceptions.py b/pulsar-client-cpp/python/pulsar/exceptions.py deleted file mode 100644 index d151564f90128..0000000000000 --- a/pulsar-client-cpp/python/pulsar/exceptions.py +++ /dev/null @@ -1,28 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from _pulsar import PulsarException, UnknownError, InvalidConfiguration, Timeout, LookupError, ConnectError, \ - ReadError, AuthenticationError, AuthorizationError, ErrorGettingAuthenticationData, BrokerMetadataError, \ - BrokerPersistenceError, ChecksumError, ConsumerBusy, NotConnected, AlreadyClosed, InvalidMessage, \ - ConsumerNotInitialized, ProducerNotInitialized, ProducerBusy, TooManyLookupRequestException, InvalidTopicName, \ - InvalidUrl, ServiceUnitNotReady, OperationNotSupported, ProducerBlockedQuotaExceededError, \ - ProducerBlockedQuotaExceededException, ProducerQueueIsFull, MessageTooBig, TopicNotFound, SubscriptionNotFound, \ - ConsumerNotFound, UnsupportedVersionError, TopicTerminated, CryptoError, IncompatibleSchema, ConsumerAssignError, \ - CumulativeAcknowledgementNotAllowedError, TransactionCoordinatorNotFoundError, InvalidTxnStatusError, \ - NotAllowedError, TransactionConflict, TransactionNotFound, ProducerFenced, MemoryBufferIsFull diff --git a/pulsar-client-cpp/python/pulsar/functions/__init__.py b/pulsar-client-cpp/python/pulsar/functions/__init__.py deleted file mode 100644 index 47c179a51ede9..0000000000000 --- a/pulsar-client-cpp/python/pulsar/functions/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# -*- encoding: utf-8 -*- diff --git a/pulsar-client-cpp/python/pulsar/functions/context.py b/pulsar-client-cpp/python/pulsar/functions/context.py deleted file mode 100644 index c1f6801c72274..0000000000000 --- a/pulsar-client-cpp/python/pulsar/functions/context.py +++ /dev/null @@ -1,191 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# -*- encoding: utf-8 -*- - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -"""context.py: Context defines context information available during -# processing of a request. -""" -from abc import abstractmethod - -class Context(object): - """Interface defining information available at process time""" - @abstractmethod - def get_message_id(self): - """Return the messageid of the current message that we are processing""" - pass - - @abstractmethod - def get_message_key(self): - """Return the key of the current message that we are processing""" - pass - - @abstractmethod - def get_message_eventtime(self): - """Return the event time of the current message that we are processing""" - pass - - @abstractmethod - def get_message_properties(self): - """Return the message properties kv map of the current message that we are processing""" - pass - - @abstractmethod - def get_current_message_topic_name(self): - """Returns the topic name of the message that we are processing""" - pass - - @abstractmethod - def get_function_tenant(self): - """Returns the tenant of the message that's being processed""" - pass - - @abstractmethod - def get_function_namespace(self): - """Returns the namespace of the message that's being processed""" - - @abstractmethod - def get_function_name(self): - """Returns the function name that we are a part of""" - pass - - @abstractmethod - def get_function_id(self): - """Returns the function id that we are a part of""" - pass - - @abstractmethod - def get_instance_id(self): - """Returns the instance id that is executing the function""" - pass - - @abstractmethod - def get_function_version(self): - """Returns the version of function that we are executing""" - pass - - @abstractmethod - def get_logger(self): - """Returns the logger object that can be used to do logging""" - pass - - @abstractmethod - def get_user_config_value(self, key): - """Returns the value of the user-defined config. If the key doesn't exist, None is returned""" - pass - - @abstractmethod - def get_user_config_map(self): - """Returns the entire user-defined config as a dict (the dict will be empty if no user-defined config is supplied)""" - pass - - @abstractmethod - def get_secret(self, secret_name): - """Returns the secret value associated with the name. None if nothing was found""" - pass - - @abstractmethod - def get_partition_key(self): - """Returns partition key of the input message is one exists""" - pass - - - @abstractmethod - def record_metric(self, metric_name, metric_value): - """Records the metric_value. metric_value has to satisfy isinstance(metric_value, numbers.Number)""" - pass - - @abstractmethod - def publish(self, topic_name, message, serde_class_name="serde.IdentitySerDe", properties=None, compression_type=None, callback=None, message_conf=None): - """Publishes message to topic_name by first serializing the message using serde_class_name serde - The message will have properties specified if any - - The available options for message_conf: - - properties, - partition_key, - sequence_id, - replication_clusters, - disable_replication, - event_timestamp - - """ - pass - - @abstractmethod - def get_input_topics(self): - """Returns the input topics of function""" - pass - - @abstractmethod - def get_output_topic(self): - """Returns the output topic of function""" - pass - - @abstractmethod - def get_output_serde_class_name(self): - """return output Serde class""" - pass - - @abstractmethod - def ack(self, msgid, topic): - """ack this message id""" - pass - - @abstractmethod - def incr_counter(self, key, amount): - """incr the counter of a given key in the managed state""" - pass - - @abstractmethod - def get_counter(self, key): - """get the counter of a given key in the managed state""" - pass - - @abstractmethod - def del_counter(self, key): - """delete the counter of a given key in the managed state""" - pass - - @abstractmethod - def put_state(self, key, value): - """update the value of a given key in the managed state""" - pass - - @abstractmethod - def get_state(self, key): - """get the value of a given key in the managed state""" - pass diff --git a/pulsar-client-cpp/python/pulsar/functions/function.py b/pulsar-client-cpp/python/pulsar/functions/function.py deleted file mode 100644 index ce2919d08ca7d..0000000000000 --- a/pulsar-client-cpp/python/pulsar/functions/function.py +++ /dev/null @@ -1,51 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# -*- encoding: utf-8 -*- - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -"""function.py: This is the core interface of the function api. -# The process method is called for every message of the input topic of the -# function. The incoming input bytes are deserialized using the serde. -# The process function can optionally emit an output -""" -from abc import abstractmethod - -class Function(object): - """Interface for Pulsar Function""" - @abstractmethod - def process(self, input, context): - """Process input message""" - pass \ No newline at end of file diff --git a/pulsar-client-cpp/python/pulsar/functions/serde.py b/pulsar-client-cpp/python/pulsar/functions/serde.py deleted file mode 100644 index 7b07673a77013..0000000000000 --- a/pulsar-client-cpp/python/pulsar/functions/serde.py +++ /dev/null @@ -1,87 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# -*- encoding: utf-8 -*- - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -"""serde.py: SerDe defines the interface for serialization/deserialization. -# Everytime a message is read from pulsar topic, the serde is invoked to -# serialize the bytes into an object before invoking the process method. -# Anytime a python object needs to be written back to pulsar, it is -# serialized into bytes before writing. -""" -from abc import abstractmethod - -import pickle - -class SerDe(object): - """Interface for Serialization/Deserialization""" - @abstractmethod - def serialize(self, input): - """Serialize input message into bytes""" - pass - - @abstractmethod - def deserialize(self, input_bytes): - """Serialize input_bytes into an object""" - pass - -class PickleSerDe(SerDe): - """Pickle based serializer""" - def serialize(self, input): - return pickle.dumps(input) - - def deserialize(self, input_bytes): - return pickle.loads(input_bytes) - -class IdentitySerDe(SerDe): - """Simple Serde that just conversion to string and back""" - def __init__(self): - self._types = [int, float, complex, str] - - def serialize(self, input): - if type(input) in self._types: - return str(input).encode('utf-8') - if type(input) == bytes: - return input - raise TypeError("IdentitySerde cannot serialize object of type %s" % type(input)) - - def deserialize(self, input_bytes): - for typ in self._types: - try: - return typ(input_bytes.decode('utf-8')) - except: - pass - return input_bytes diff --git a/pulsar-client-cpp/python/pulsar/schema/__init__.py b/pulsar-client-cpp/python/pulsar/schema/__init__.py deleted file mode 100644 index efa680666a729..0000000000000 --- a/pulsar-client-cpp/python/pulsar/schema/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from .definition import Record, Field, Null, Boolean, Integer, Long, \ - Float, Double, Bytes, String, Array, Map, CustomEnum - -from .schema import Schema, BytesSchema, StringSchema, JsonSchema -from .schema_avro import AvroSchema diff --git a/pulsar-client-cpp/python/pulsar/schema/definition.py b/pulsar-client-cpp/python/pulsar/schema/definition.py deleted file mode 100644 index 60ab7ccbe102f..0000000000000 --- a/pulsar-client-cpp/python/pulsar/schema/definition.py +++ /dev/null @@ -1,515 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -import copy -from abc import abstractmethod -from collections import OrderedDict -from enum import Enum, EnumMeta - - -def _check_record_or_field(x): - if (type(x) is type and not issubclass(x, Record)) \ - and not isinstance(x, Field): - raise Exception('Argument ' + x + ' is not a Record or a Field') - - -class RecordMeta(type): - def __new__(metacls, name, parents, dct): - if name != 'Record': - # Do not apply this logic to the base class itself - dct['_fields'] = RecordMeta._get_fields(dct) - dct['_required'] = False - return type.__new__(metacls, name, parents, dct) - - @classmethod - def _get_fields(cls, dct): - # Build a set of valid fields for this record - fields = OrderedDict() - for name, value in dct.items(): - if issubclass(type(value), EnumMeta): - value = CustomEnum(value) - elif type(value) == RecordMeta: - # We expect an instance of a record rather than the class itself - value = value() - - if isinstance(value, Record) or isinstance(value, Field): - fields[name] = value - return fields - - -class Record(metaclass=RecordMeta): - - # This field is used to set namespace for Avro Record schema. - _avro_namespace = None - - # Generate a schema where fields are sorted alphabetically - _sorted_fields = False - - def __init__(self, default=None, required_default=False, required=False, *args, **kwargs): - self._required_default = required_default - self._default = default - self._required = required - - for k, value in self._fields.items(): - if k in kwargs: - if isinstance(value, Record) and isinstance(kwargs[k], dict): - # Use dict init Record object - copied = copy.copy(value) - copied.__init__(**kwargs[k]) - self.__setattr__(k, copied) - elif isinstance(value, Array) and isinstance(kwargs[k], list) and len(kwargs[k]) > 0 \ - and isinstance(value.array_type, Record) and isinstance(kwargs[k][0], dict): - arr = [] - for item in kwargs[k]: - copied = copy.copy(value.array_type) - copied.__init__(**item) - arr.append(copied) - self.__setattr__(k, arr) - elif isinstance(value, Map) and isinstance(kwargs[k], dict) and len(kwargs[k]) > 0 \ - and isinstance(value.value_type, Record) and isinstance(list(kwargs[k].values())[0], dict): - dic = {} - for mapKey, mapValue in kwargs[k].items(): - copied = copy.copy(value.value_type) - copied.__init__(**mapValue) - dic[mapKey] = copied - self.__setattr__(k, dic) - else: - # Value was overridden at constructor - self.__setattr__(k, kwargs[k]) - elif isinstance(value, Record): - # Value is a subrecord - self.__setattr__(k, value) - else: - # Set field to default value, without revalidating the default value type - super(Record, self).__setattr__(k, value.default()) - - @classmethod - def schema(cls): - return cls.schema_info(set()) - - @classmethod - def schema_info(cls, defined_names): - namespace_prefix = '' - if cls._avro_namespace is not None: - namespace_prefix = cls._avro_namespace + '.' - namespace_name = namespace_prefix + cls.__name__ - - if namespace_name in defined_names: - return namespace_name - - defined_names.add(namespace_name) - - schema = { - 'type': 'record', - 'name': str(cls.__name__) - } - if cls._avro_namespace is not None: - schema['namespace'] = cls._avro_namespace - schema['fields'] = [] - - def get_filed_default_value(value): - if isinstance(value, Enum): - return value.name - else: - return value - - if cls._sorted_fields: - fields = sorted(cls._fields.keys()) - else: - fields = cls._fields.keys() - for name in fields: - field = cls._fields[name] - field_type = field.schema_info(defined_names) \ - if field._required else ['null', field.schema_info(defined_names)] - schema['fields'].append({ - 'name': name, - 'default': get_filed_default_value(field.default()), - 'type': field_type - }) if field.required_default() else schema['fields'].append({ - 'name': name, - 'type': field_type, - }) - - return schema - - def __setattr__(self, key, value): - if key == '_default': - super(Record, self).__setattr__(key, value) - elif key == '_required_default': - super(Record, self).__setattr__(key, value) - elif key == '_required': - super(Record, self).__setattr__(key, value) - else: - if key not in self._fields: - raise AttributeError('Cannot set undeclared field ' + key + ' on record') - - # Check that type of value matches the field type - field = self._fields[key] - value = field.validate_type(key, value) - super(Record, self).__setattr__(key, value) - - def __eq__(self, other): - for field in self._fields: - if self.__getattribute__(field) != other.__getattribute__(field): - return False - return True - - def __ne__(self, other): - return not self.__eq__(other) - - def __str__(self): - return str(self.__dict__) - - def type(self): - return str(self.__class__.__name__) - - def python_type(self): - return self.__class__ - - def validate_type(self, name, val): - if val is None and not self._required: - return self.default() - - if not isinstance(val, self.__class__): - raise TypeError("Invalid type '%s' for sub-record field '%s'. Expected: %s" % ( - type(val), name, self.__class__)) - return val - - def default(self): - if self._default is not None: - return self._default - else: - return None - - def required_default(self): - return self._required_default - - -class Field(object): - def __init__(self, default=None, required=False, required_default=False): - if default is not None: - default = self.validate_type('default', default) - self._default = default - self._required_default = required_default - self._required = required - - @abstractmethod - def type(self): - pass - - @abstractmethod - def python_type(self): - pass - - def validate_type(self, name, val): - if val is None and not self._required: - return self.default() - - if type(val) != self.python_type(): - raise TypeError("Invalid type '%s' for field '%s'. Expected: %s" % (type(val), name, self.python_type())) - return val - - def schema(self): - # For primitive types, the schema would just be the type itself - return self.type() - - def schema_info(self, defined_names): - return self.type() - - def default(self): - return self._default - - def required_default(self): - return self._required_default - - -# All types - - -class Null(Field): - def type(self): - return 'null' - - def python_type(self): - return type(None) - - def validate_type(self, name, val): - if val is not None: - raise TypeError('Field ' + name + ' is set to be None') - return val - - -class Boolean(Field): - def type(self): - return 'boolean' - - def python_type(self): - return bool - - def default(self): - if self._default is not None: - return self._default - else: - return False - - -class Integer(Field): - def type(self): - return 'int' - - def python_type(self): - return int - - def default(self): - if self._default is not None: - return self._default - else: - return None - - -class Long(Field): - def type(self): - return 'long' - - def python_type(self): - return int - - def default(self): - if self._default is not None: - return self._default - else: - return None - - -class Float(Field): - def type(self): - return 'float' - - def python_type(self): - return float - - def default(self): - if self._default is not None: - return self._default - else: - return None - - -class Double(Field): - def type(self): - return 'double' - - def python_type(self): - return float - - def default(self): - if self._default is not None: - return self._default - else: - return None - - -class Bytes(Field): - def type(self): - return 'bytes' - - def python_type(self): - return bytes - - def default(self): - if self._default is not None: - return self._default - else: - return None - - -class String(Field): - def type(self): - return 'string' - - def python_type(self): - return str - - def validate_type(self, name, val): - t = type(val) - - if val is None and not self._required: - return self.default() - - if not (t is str or t.__name__ == 'unicode'): - raise TypeError("Invalid type '%s' for field '%s'. Expected a string" % (t, name)) - return val - - def default(self): - if self._default is not None: - return self._default - else: - return None - -# Complex types - - -class CustomEnum(Field): - def __init__(self, enum_type, default=None, required=False, required_default=False): - if not issubclass(enum_type, Enum): - raise Exception(enum_type + " is not a valid Enum type") - self.enum_type = enum_type - self.values = {} - for x in enum_type.__members__.values(): - self.values[x.value] = x - super(CustomEnum, self).__init__(default, required, required_default) - - def type(self): - return 'enum' - - def python_type(self): - return self.enum_type - - def validate_type(self, name, val): - if val is None: - return None - - if type(val) is str: - # The enum was passed as a string, we need to check it against the possible values - if val in self.enum_type.__members__: - return self.enum_type.__members__[val] - else: - raise TypeError( - "Invalid enum value '%s' for field '%s'. Expected: %s" % (val, name, self.enum_type.__members__.keys())) - elif type(val) is int: - # The enum was passed as an int, we need to check it against the possible values - if val in self.values: - return self.values[val] - else: - raise TypeError( - "Invalid enum value '%s' for field '%s'. Expected: %s" % (val, name, self.values.keys())) - elif type(val) != self.python_type(): - raise TypeError("Invalid type '%s' for field '%s'. Expected: %s" % (type(val), name, self.python_type())) - else: - return val - - def schema(self): - return self.schema_info(set()) - - def schema_info(self, defined_names): - if self.enum_type.__name__ in defined_names: - return self.enum_type.__name__ - defined_names.add(self.enum_type.__name__) - return { - 'type': self.type(), - 'name': self.enum_type.__name__, - 'symbols': [x.name for x in self.enum_type] - } - - def default(self): - if self._default is not None: - return self._default - else: - return None - - -class Array(Field): - def __init__(self, array_type, default=None, required=False, required_default=False): - _check_record_or_field(array_type) - self.array_type = array_type - super(Array, self).__init__(default=default, required=required, required_default=required_default) - - def type(self): - return 'array' - - def python_type(self): - return list - - def validate_type(self, name, val): - if val is None: - return None - - super(Array, self).validate_type(name, val) - - for x in val: - if type(x) != self.array_type.python_type(): - raise TypeError('Array field ' + name + ' items should all be of type ' + - self.array_type.type()) - return val - - def schema(self): - return self.schema_info(set()) - - def schema_info(self, defined_names): - return { - 'type': self.type(), - 'items': self.array_type.schema_info(defined_names) if isinstance(self.array_type, (Array, Map, Record)) - else self.array_type.type() - } - - def default(self): - if self._default is not None: - return self._default - else: - return None - - -class Map(Field): - def __init__(self, value_type, default=None, required=False, required_default=False): - _check_record_or_field(value_type) - self.value_type = value_type - super(Map, self).__init__(default=default, required=required, required_default=required_default) - - def type(self): - return 'map' - - def python_type(self): - return dict - - def validate_type(self, name, val): - if val is None: - return None - - super(Map, self).validate_type(name, val) - - for k, v in val.items(): - if type(k) != str and not is_unicode(k): - raise TypeError('Map keys for field ' + name + ' should all be strings') - if type(v) != self.value_type.python_type(): - raise TypeError('Map values for field ' + name + ' should all be of type ' - + self.value_type.python_type()) - - return val - - def schema(self): - return self.schema_info(set()) - - def schema_info(self, defined_names): - return { - 'type': self.type(), - 'values': self.value_type.schema_info(defined_names) if isinstance(self.value_type, (Array, Map, Record)) - else self.value_type.type() - } - - def default(self): - if self._default is not None: - return self._default - else: - return None - - -# Python3 has no `unicode` type, so here we use a tricky way to check if the type of `x` is `unicode` in Python2 -# and also make it work well with Python3. -def is_unicode(x): - return 'encode' in dir(x) and type(x.encode()) == str diff --git a/pulsar-client-cpp/python/pulsar/schema/schema.py b/pulsar-client-cpp/python/pulsar/schema/schema.py deleted file mode 100644 index f062c2e5e5e03..0000000000000 --- a/pulsar-client-cpp/python/pulsar/schema/schema.py +++ /dev/null @@ -1,111 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - - -from abc import abstractmethod -import json -import _pulsar -import enum - - -class Schema(object): - def __init__(self, record_cls, schema_type, schema_definition, schema_name): - self._record_cls = record_cls - self._schema_info = _pulsar.SchemaInfo(schema_type, schema_name, - json.dumps(schema_definition, indent=True)) - - @abstractmethod - def encode(self, obj): - pass - - @abstractmethod - def decode(self, data): - pass - - def schema_info(self): - return self._schema_info - - def _validate_object_type(self, obj): - if not isinstance(obj, self._record_cls): - raise TypeError('Invalid record obj of type ' + str(type(obj)) - + ' - expected type is ' + str(self._record_cls)) - - -class BytesSchema(Schema): - def __init__(self): - super(BytesSchema, self).__init__(bytes, _pulsar.SchemaType.BYTES, None, 'BYTES') - - def encode(self, data): - self._validate_object_type(data) - return data - - def decode(self, data): - return data - - def __str__(self): - return 'BytesSchema' - - -class StringSchema(Schema): - def __init__(self): - super(StringSchema, self).__init__(str, _pulsar.SchemaType.STRING, None, 'STRING') - - def encode(self, obj): - self._validate_object_type(obj) - return obj.encode('utf-8') - - def decode(self, data): - return data.decode('utf-8') - - def __str__(self): - return 'StringSchema' - - -def remove_reserved_key(data): - if '_default' in data: - del data['_default'] - if '_required' in data: - del data['_required'] - if '_required_default' in data: - del data['_required_default'] - - -class JsonSchema(Schema): - - def __init__(self, record_cls): - super(JsonSchema, self).__init__(record_cls, _pulsar.SchemaType.JSON, - record_cls.schema(), 'JSON') - - def _get_serialized_value(self, o): - if isinstance(o, enum.Enum): - return o.value - else: - data = o.__dict__.copy() - remove_reserved_key(data) - return data - - def encode(self, obj): - self._validate_object_type(obj) - # Copy the dict of the object as to not modify the provided object via the reference provided - data = obj.__dict__.copy() - remove_reserved_key(data) - return json.dumps(data, default=self._get_serialized_value, indent=True).encode('utf-8') - - def decode(self, data): - return self._record_cls(**json.loads(data)) diff --git a/pulsar-client-cpp/python/pulsar/schema/schema_avro.py b/pulsar-client-cpp/python/pulsar/schema/schema_avro.py deleted file mode 100644 index 05ceb8e20eff3..0000000000000 --- a/pulsar-client-cpp/python/pulsar/schema/schema_avro.py +++ /dev/null @@ -1,96 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -import _pulsar -import io -import enum - -from . import Record -from .schema import Schema - -try: - import fastavro - HAS_AVRO = True -except ImportError: - HAS_AVRO = False - -if HAS_AVRO: - class AvroSchema(Schema): - def __init__(self, record_cls, schema_definition=None): - if record_cls is None and schema_definition is None: - raise AssertionError("The param record_cls and schema_definition shouldn't be both None.") - - if record_cls is not None: - self._schema = record_cls.schema() - else: - self._schema = schema_definition - super(AvroSchema, self).__init__(record_cls, _pulsar.SchemaType.AVRO, self._schema, 'AVRO') - - def _get_serialized_value(self, x): - if isinstance(x, enum.Enum): - return x.name - elif isinstance(x, Record): - return self.encode_dict(x.__dict__) - elif isinstance(x, list): - arr = [] - for item in x: - arr.append(self._get_serialized_value(item)) - return arr - elif isinstance(x, dict): - return self.encode_dict(x) - else: - return x - - def encode(self, obj): - buffer = io.BytesIO() - m = obj - if self._record_cls is not None: - self._validate_object_type(obj) - m = self.encode_dict(obj.__dict__) - elif not isinstance(obj, dict): - raise ValueError('If using the custom schema, the record data should be dict type.') - - fastavro.schemaless_writer(buffer, self._schema, m) - return buffer.getvalue() - - def encode_dict(self, d): - obj = {} - for k, v in d.items(): - obj[k] = self._get_serialized_value(v) - return obj - - def decode(self, data): - buffer = io.BytesIO(data) - d = fastavro.schemaless_reader(buffer, self._schema) - if self._record_cls is not None: - return self._record_cls(**d) - else: - return d - -else: - class AvroSchema(Schema): - def __init__(self, _record_cls, _schema_definition): - raise Exception("Avro library support was not found. Make sure to install Pulsar client " + - "with Avro support: pip3 install 'pulsar-client[avro]'") - - def encode(self, obj): - pass - - def decode(self, data): - pass diff --git a/pulsar-client-cpp/python/pulsar_test.py b/pulsar-client-cpp/python/pulsar_test.py deleted file mode 100755 index 375afe43adb61..0000000000000 --- a/pulsar-client-cpp/python/pulsar_test.py +++ /dev/null @@ -1,1341 +0,0 @@ -#!/usr/bin/env python3 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - - -import threading -import logging -from unittest import TestCase, main -import time -import os -import pulsar -import uuid -from datetime import timedelta -from pulsar import ( - Client, - MessageId, - CompressionType, - ConsumerType, - PartitionsRoutingMode, - AuthenticationBasic, - AuthenticationTLS, - Authentication, - AuthenticationToken, - InitialPosition, - CryptoKeyReader, -) -from pulsar.schema import JsonSchema, Record, Integer - -from _pulsar import ProducerConfiguration, ConsumerConfiguration - -from schema_test import * - -try: - # For Python 3.0 and later - from urllib.request import urlopen, Request -except ImportError: - # Fall back to Python 2's urllib2 - from urllib2 import urlopen, Request - -TM = 10000 # Do not wait forever in tests - - -def doHttpPost(url, data): - req = Request(url, data.encode()) - req.add_header("Content-Type", "application/json") - urlopen(req) - - -def doHttpPut(url, data): - try: - req = Request(url, data.encode()) - req.add_header("Content-Type", "application/json") - req.get_method = lambda: "PUT" - urlopen(req) - except Exception as ex: - # ignore conflicts exception to have test idempotency - if "409" in str(ex): - pass - else: - raise ex - - -def doHttpGet(url): - req = Request(url) - req.add_header("Accept", "application/json") - return urlopen(req).read() - - -class TestRecord(Record): - a = Integer() - b = Integer() - - -class PulsarTest(TestCase): - - serviceUrl = "pulsar://localhost:6650" - adminUrl = "http://localhost:8080" - - serviceUrlTls = "pulsar+ssl://localhost:6651" - - def test_producer_config(self): - conf = ProducerConfiguration() - conf.send_timeout_millis(12) - self.assertEqual(conf.send_timeout_millis(), 12) - - self.assertEqual(conf.compression_type(), CompressionType.NONE) - conf.compression_type(CompressionType.LZ4) - self.assertEqual(conf.compression_type(), CompressionType.LZ4) - - conf.max_pending_messages(120) - self.assertEqual(conf.max_pending_messages(), 120) - - def test_consumer_config(self): - conf = ConsumerConfiguration() - self.assertEqual(conf.consumer_type(), ConsumerType.Exclusive) - conf.consumer_type(ConsumerType.Shared) - self.assertEqual(conf.consumer_type(), ConsumerType.Shared) - - self.assertEqual(conf.consumer_name(), "") - conf.consumer_name("my-name") - self.assertEqual(conf.consumer_name(), "my-name") - - self.assertEqual(conf.replicate_subscription_state_enabled(), False) - conf.replicate_subscription_state_enabled(True) - self.assertEqual(conf.replicate_subscription_state_enabled(), True) - - def test_connect_error(self): - with self.assertRaises(ValueError): - Client("fakeServiceUrl") - - def test_exception_inheritance(self): - assert issubclass(pulsar.ConnectError, pulsar.PulsarException) - assert issubclass(pulsar.PulsarException, Exception) - - def test_simple_producer(self): - client = Client(self.serviceUrl) - producer = client.create_producer("my-python-topic") - producer.send(b"hello") - producer.close() - client.close() - - def test_producer_send_async(self): - client = Client(self.serviceUrl) - producer = client.create_producer("my-python-topic") - - sent_messages = [] - - def send_callback(producer, msg): - sent_messages.append(msg) - - producer.send_async(b"hello", send_callback) - producer.send_async(b"hello", send_callback) - producer.send_async(b"hello", send_callback) - - i = 0 - while len(sent_messages) < 3 and i < 100: - time.sleep(0.1) - i += 1 - self.assertEqual(len(sent_messages), 3) - client.close() - - def test_producer_send(self): - client = Client(self.serviceUrl) - topic = "test_producer_send" - producer = client.create_producer(topic) - consumer = client.subscribe(topic, "sub-name") - msg_id = producer.send(b"hello") - print("send to {}".format(msg_id)) - msg = consumer.receive(TM) - consumer.acknowledge(msg) - print("receive from {}".format(msg.message_id())) - self.assertEqual(msg_id, msg.message_id()) - client.close() - - def test_producer_is_connected(self): - client = Client(self.serviceUrl) - topic = "test_producer_is_connected" - producer = client.create_producer(topic) - self.assertTrue(producer.is_connected()) - producer.close() - self.assertFalse(producer.is_connected()) - client.close() - - def test_producer_consumer(self): - client = Client(self.serviceUrl) - consumer = client.subscribe("my-python-topic-producer-consumer", "my-sub", consumer_type=ConsumerType.Shared) - producer = client.create_producer("my-python-topic-producer-consumer") - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - - consumer.unsubscribe() - client.close() - - def test_redelivery_count(self): - client = Client(self.serviceUrl) - consumer = client.subscribe( - "my-python-topic-redelivery-count", - "my-sub", - consumer_type=ConsumerType.Shared, - negative_ack_redelivery_delay_ms=500, - ) - producer = client.create_producer("my-python-topic-redelivery-count") - producer.send(b"hello") - - redelivery_count = 0 - for i in range(4): - msg = consumer.receive(TM) - print("Received message %s" % msg.data()) - consumer.negative_acknowledge(msg) - redelivery_count = msg.redelivery_count() - - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - self.assertEqual(3, redelivery_count) - consumer.unsubscribe() - producer.close() - client.close() - - def test_deliver_at(self): - client = Client(self.serviceUrl) - consumer = client.subscribe("my-python-topic-deliver-at", "my-sub", consumer_type=ConsumerType.Shared) - producer = client.create_producer("my-python-topic-deliver-at") - # Delay message in 1.1s - producer.send(b"hello", deliver_at=int(round(time.time() * 1000)) + 1100) - - # Message should not be available in the next second - with self.assertRaises(pulsar.Timeout): - consumer.receive(1000) - - # Message should be published now - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - consumer.unsubscribe() - producer.close() - client.close() - - def test_deliver_after(self): - client = Client(self.serviceUrl) - consumer = client.subscribe("my-python-topic-deliver-after", "my-sub", consumer_type=ConsumerType.Shared) - producer = client.create_producer("my-python-topic-deliver-after") - # Delay message in 1.1s - producer.send(b"hello", deliver_after=timedelta(milliseconds=1100)) - - # Message should not be available in the next second - with self.assertRaises(pulsar.Timeout): - consumer.receive(1000) - - # Message should be published in the next 500ms - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - consumer.unsubscribe() - producer.close() - client.close() - - def test_consumer_initial_position(self): - client = Client(self.serviceUrl) - producer = client.create_producer("consumer-initial-position") - - # Sending 5 messages before consumer creation. - # These should be received with initial_position set to Earliest but not with Latest. - for i in range(5): - producer.send(b"hello-%d" % i) - - consumer = client.subscribe( - "consumer-initial-position", - "my-sub", - consumer_type=ConsumerType.Shared, - initial_position=InitialPosition.Earliest, - ) - - # Sending 5 other messages that should be received regardless of the initial_position. - for i in range(5, 10): - producer.send(b"hello-%d" % i) - - for i in range(10): - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello-%d" % i) - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - - consumer.unsubscribe() - client.close() - - def test_consumer_queue_size_is_zero(self): - client = Client(self.serviceUrl) - consumer = client.subscribe( - "my-python-topic-consumer-init-queue-size-is-zero", - "my-sub", - consumer_type=ConsumerType.Shared, - receiver_queue_size=0, - initial_position=InitialPosition.Earliest, - ) - producer = client.create_producer("my-python-topic-consumer-init-queue-size-is-zero") - producer.send(b"hello") - time.sleep(0.1) - msg = consumer.receive() - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - - consumer.unsubscribe() - client.close() - - def test_message_properties(self): - client = Client(self.serviceUrl) - topic = "my-python-test-message-properties" - consumer = client.subscribe( - topic=topic, subscription_name="my-subscription", schema=pulsar.schema.StringSchema() - ) - producer = client.create_producer(topic=topic, schema=pulsar.schema.StringSchema()) - producer.send("hello", properties={"a": "1", "b": "2"}) - - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.value(), "hello") - self.assertEqual(msg.properties(), {"a": "1", "b": "2"}) - - consumer.unsubscribe() - client.close() - - def test_tls_auth(self): - certs_dir = "/pulsar/pulsar-broker/src/test/resources/authentication/tls/" - if not os.path.exists(certs_dir): - certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/" - client = Client( - self.serviceUrlTls, - tls_trust_certs_file_path=certs_dir + "cacert.pem", - tls_allow_insecure_connection=False, - authentication=AuthenticationTLS(certs_dir + "client-cert.pem", certs_dir + "client-key.pem"), - ) - - topic = "my-python-topic-tls-auth-" + str(time.time()) - consumer = client.subscribe(topic, "my-sub", consumer_type=ConsumerType.Shared) - producer = client.create_producer(topic) - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - - client.close() - - def test_tls_auth2(self): - certs_dir = "/pulsar/pulsar-broker/src/test/resources/authentication/tls/" - if not os.path.exists(certs_dir): - certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/" - authPlugin = "org.apache.pulsar.client.impl.auth.AuthenticationTls" - authParams = "tlsCertFile:%s/client-cert.pem,tlsKeyFile:%s/client-key.pem" % (certs_dir, certs_dir) - - client = Client( - self.serviceUrlTls, - tls_trust_certs_file_path=certs_dir + "cacert.pem", - tls_allow_insecure_connection=False, - authentication=Authentication(authPlugin, authParams), - ) - - topic = "my-python-topic-tls-auth-2-" + str(time.time()) - consumer = client.subscribe(topic, "my-sub", consumer_type=ConsumerType.Shared) - producer = client.create_producer(topic) - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - - client.close() - - def test_encryption(self): - publicKeyPath = "/pulsar//pulsar-broker/src/test/resources/certificate/public-key.client-rsa.pem" - privateKeyPath = "/pulsar/pulsar-broker/src/test/resources/certificate/private-key.client-rsa.pem" - crypto_key_reader = CryptoKeyReader(publicKeyPath, privateKeyPath) - client = Client(self.serviceUrl) - topic = "my-python-test-end-to-end-encryption" - consumer = client.subscribe( - topic=topic, subscription_name="my-subscription", crypto_key_reader=crypto_key_reader - ) - producer = client.create_producer( - topic=topic, encryption_key="client-rsa.pem", crypto_key_reader=crypto_key_reader - ) - reader = client.create_reader( - topic=topic, start_message_id=MessageId.earliest, crypto_key_reader=crypto_key_reader - ) - producer.send(b"hello") - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.value(), b"hello") - consumer.unsubscribe() - - msg = reader.read_next(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - - with self.assertRaises(pulsar.Timeout): - reader.read_next(100) - - reader.close() - - client.close() - - def test_tls_auth3(self): - certs_dir = "/pulsar/pulsar-broker/src/test/resources/authentication/tls/" - if not os.path.exists(certs_dir): - certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/" - authPlugin = "tls" - authParams = "tlsCertFile:%s/client-cert.pem,tlsKeyFile:%s/client-key.pem" % (certs_dir, certs_dir) - - client = Client( - self.serviceUrlTls, - tls_trust_certs_file_path=certs_dir + "cacert.pem", - tls_allow_insecure_connection=False, - authentication=Authentication(authPlugin, authParams), - ) - - topic = "my-python-topic-tls-auth-3-" + str(time.time()) - consumer = client.subscribe(topic, "my-sub", consumer_type=ConsumerType.Shared) - producer = client.create_producer(topic) - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - - client.close() - - def test_auth_junk_params(self): - certs_dir = "/pulsar/pulsar-broker/src/test/resources/authentication/tls/" - if not os.path.exists(certs_dir): - certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/" - authPlugin = "someoldjunk.so" - authParams = "blah" - client = Client( - self.serviceUrlTls, - tls_trust_certs_file_path=certs_dir + "cacert.pem", - tls_allow_insecure_connection=False, - authentication=Authentication(authPlugin, authParams), - ) - - with self.assertRaises(pulsar.ConnectError): - client.subscribe("my-python-topic-auth-junk-params", "my-sub", consumer_type=ConsumerType.Shared) - - def test_message_listener(self): - client = Client(self.serviceUrl) - - received_messages = [] - - def listener(consumer, msg): - print("Got message: %s" % msg) - received_messages.append(msg) - consumer.acknowledge(msg) - - client.subscribe( - "my-python-topic-listener", "my-sub", consumer_type=ConsumerType.Exclusive, message_listener=listener - ) - producer = client.create_producer("my-python-topic-listener") - producer.send(b"hello-1") - producer.send(b"hello-2") - producer.send(b"hello-3") - - time.sleep(0.1) - self.assertEqual(len(received_messages), 3) - self.assertEqual(received_messages[0].data(), b"hello-1") - self.assertEqual(received_messages[1].data(), b"hello-2") - self.assertEqual(received_messages[2].data(), b"hello-3") - client.close() - - def test_consumer_is_connected(self): - client = Client(self.serviceUrl) - topic = "test_consumer_is_connected" - sub = "sub" - consumer = client.subscribe(topic, sub) - self.assertTrue(consumer.is_connected()) - consumer.close() - self.assertFalse(consumer.is_connected()) - client.close() - - def test_reader_simple(self): - client = Client(self.serviceUrl) - reader = client.create_reader("my-python-topic-reader-simple", MessageId.earliest) - - producer = client.create_producer("my-python-topic-reader-simple") - producer.send(b"hello") - - msg = reader.read_next(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - - with self.assertRaises(pulsar.Timeout): - reader.read_next(100) - - reader.close() - client.close() - - def test_reader_on_last_message(self): - client = Client(self.serviceUrl) - producer = client.create_producer("my-python-topic-reader-on-last-message") - - for i in range(10): - producer.send(b"hello-%d" % i) - - reader = client.create_reader("my-python-topic-reader-on-last-message", MessageId.latest) - - for i in range(10, 20): - producer.send(b"hello-%d" % i) - - for i in range(10, 20): - msg = reader.read_next(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello-%d" % i) - - reader.close() - client.close() - - def test_reader_on_specific_message(self): - num_of_msgs = 10 - client = Client(self.serviceUrl) - producer = client.create_producer("my-python-topic-reader-on-specific-message") - - for i in range(num_of_msgs): - producer.send(b"hello-%d" % i) - - reader1 = client.create_reader("my-python-topic-reader-on-specific-message", MessageId.earliest) - - for i in range(num_of_msgs // 2): - msg = reader1.read_next(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello-%d" % i) - last_msg_id = msg.message_id() - last_msg_idx = i - - reader2 = client.create_reader("my-python-topic-reader-on-specific-message", last_msg_id) - - # The reset would be effectively done on the next position relative to reset. - # When available, we should test this behaviour with `startMessageIdInclusive` opt. - from_msg_idx = last_msg_idx - for i in range(from_msg_idx + 1, num_of_msgs): - msg = reader2.read_next(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello-%d" % i) - - reader1.close() - reader2.close() - client.close() - - def test_reader_on_specific_message_with_batches(self): - client = Client(self.serviceUrl) - producer = client.create_producer( - "my-python-topic-reader-on-specific-message-with-batches", - batching_enabled=True, - batching_max_publish_delay_ms=1000, - ) - - for i in range(10): - producer.send_async(b"hello-%d" % i, None) - - # Send one sync message to make sure everything was published - producer.send(b"hello-10") - - reader1 = client.create_reader("my-python-topic-reader-on-specific-message-with-batches", MessageId.earliest) - - for i in range(5): - msg = reader1.read_next(TM) - last_msg_id = msg.message_id() - - reader2 = client.create_reader("my-python-topic-reader-on-specific-message-with-batches", last_msg_id) - - for i in range(5, 11): - msg = reader2.read_next(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello-%d" % i) - - reader1.close() - reader2.close() - client.close() - - def test_reader_is_connected(self): - client = Client(self.serviceUrl) - topic = "test_reader_is_connected" - reader = client.create_reader(topic, MessageId.earliest) - self.assertTrue(reader.is_connected()) - reader.close() - self.assertFalse(reader.is_connected()) - client.close() - - def test_producer_sequence_after_reconnection(self): - # Enable deduplication on namespace - doHttpPost(self.adminUrl + "/admin/v2/namespaces/public/default/deduplication", "true") - client = Client(self.serviceUrl) - - topic = "my-python-test-producer-sequence-after-reconnection-" + str(time.time()) - - producer = client.create_producer(topic, producer_name="my-producer-name") - self.assertEqual(producer.last_sequence_id(), -1) - - for i in range(10): - producer.send(b"hello-%d" % i) - self.assertEqual(producer.last_sequence_id(), i) - - producer.close() - - producer = client.create_producer(topic, producer_name="my-producer-name") - self.assertEqual(producer.last_sequence_id(), 9) - - for i in range(10, 20): - producer.send(b"hello-%d" % i) - self.assertEqual(producer.last_sequence_id(), i) - - client.close() - - doHttpPost(self.adminUrl + "/admin/v2/namespaces/public/default/deduplication", "false") - - def test_producer_deduplication(self): - # Enable deduplication on namespace - doHttpPost(self.adminUrl + "/admin/v2/namespaces/public/default/deduplication", "true") - client = Client(self.serviceUrl) - - topic = "my-python-test-producer-deduplication-" + str(time.time()) - - producer = client.create_producer(topic, producer_name="my-producer-name") - self.assertEqual(producer.last_sequence_id(), -1) - - consumer = client.subscribe(topic, "my-sub") - - producer.send(b"hello-0", sequence_id=0) - producer.send(b"hello-1", sequence_id=1) - producer.send(b"hello-2", sequence_id=2) - self.assertEqual(producer.last_sequence_id(), 2) - - # Repeat the messages and verify they're not received by consumer - producer.send(b"hello-1", sequence_id=1) - producer.send(b"hello-2", sequence_id=2) - self.assertEqual(producer.last_sequence_id(), 2) - - for i in range(3): - msg = consumer.receive(TM) - self.assertEqual(msg.data(), b"hello-%d" % i) - consumer.acknowledge(msg) - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - - producer.close() - - producer = client.create_producer(topic, producer_name="my-producer-name") - self.assertEqual(producer.last_sequence_id(), 2) - - # Repeat the messages and verify they're not received by consumer - producer.send(b"hello-1", sequence_id=1) - producer.send(b"hello-2", sequence_id=2) - self.assertEqual(producer.last_sequence_id(), 2) - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - - client.close() - - doHttpPost(self.adminUrl + "/admin/v2/namespaces/public/default/deduplication", "false") - - def test_producer_routing_mode(self): - client = Client(self.serviceUrl) - producer = client.create_producer( - "my-python-test-producer", message_routing_mode=PartitionsRoutingMode.UseSinglePartition - ) - producer.send(b"test") - client.close() - - def test_message_argument_errors(self): - client = Client(self.serviceUrl) - topic = "my-python-test-producer" - producer = client.create_producer(topic) - - content = "test".encode("utf-8") - - self._check_type_error(lambda: producer.send(5)) - self._check_value_error(lambda: producer.send(content, properties="test")) - self._check_value_error(lambda: producer.send(content, partition_key=5)) - self._check_value_error(lambda: producer.send(content, sequence_id="test")) - self._check_value_error(lambda: producer.send(content, replication_clusters=5)) - self._check_value_error(lambda: producer.send(content, disable_replication="test")) - self._check_value_error(lambda: producer.send(content, event_timestamp="test")) - self._check_value_error(lambda: producer.send(content, deliver_at="test")) - self._check_value_error(lambda: producer.send(content, deliver_after="test")) - client.close() - - def test_client_argument_errors(self): - self._check_value_error(lambda: Client(None)) - self._check_value_error(lambda: Client(self.serviceUrl, authentication="test")) - self._check_value_error(lambda: Client(self.serviceUrl, operation_timeout_seconds="test")) - self._check_value_error(lambda: Client(self.serviceUrl, io_threads="test")) - self._check_value_error(lambda: Client(self.serviceUrl, message_listener_threads="test")) - self._check_value_error(lambda: Client(self.serviceUrl, concurrent_lookup_requests="test")) - self._check_value_error(lambda: Client(self.serviceUrl, log_conf_file_path=5)) - self._check_value_error(lambda: Client(self.serviceUrl, use_tls="test")) - self._check_value_error(lambda: Client(self.serviceUrl, tls_trust_certs_file_path=5)) - self._check_value_error(lambda: Client(self.serviceUrl, tls_allow_insecure_connection="test")) - - def test_producer_argument_errors(self): - client = Client(self.serviceUrl) - - self._check_value_error(lambda: client.create_producer(None)) - - topic = "my-python-test-producer" - - self._check_value_error(lambda: client.create_producer(topic, producer_name=5)) - self._check_value_error(lambda: client.create_producer(topic, initial_sequence_id="test")) - self._check_value_error(lambda: client.create_producer(topic, send_timeout_millis="test")) - self._check_value_error(lambda: client.create_producer(topic, compression_type=None)) - self._check_value_error(lambda: client.create_producer(topic, max_pending_messages="test")) - self._check_value_error(lambda: client.create_producer(topic, block_if_queue_full="test")) - self._check_value_error(lambda: client.create_producer(topic, batching_enabled="test")) - self._check_value_error(lambda: client.create_producer(topic, batching_enabled="test")) - self._check_value_error(lambda: client.create_producer(topic, batching_max_allowed_size_in_bytes="test")) - self._check_value_error(lambda: client.create_producer(topic, batching_max_publish_delay_ms="test")) - client.close() - - def test_consumer_argument_errors(self): - client = Client(self.serviceUrl) - - topic = "my-python-test-producer" - sub_name = "my-sub-name" - - self._check_value_error(lambda: client.subscribe(None, sub_name)) - self._check_value_error(lambda: client.subscribe(topic, None)) - self._check_value_error(lambda: client.subscribe(topic, sub_name, consumer_type=None)) - self._check_value_error(lambda: client.subscribe(topic, sub_name, receiver_queue_size="test")) - self._check_value_error(lambda: client.subscribe(topic, sub_name, consumer_name=5)) - self._check_value_error(lambda: client.subscribe(topic, sub_name, unacked_messages_timeout_ms="test")) - self._check_value_error(lambda: client.subscribe(topic, sub_name, broker_consumer_stats_cache_time_ms="test")) - client.close() - - def test_reader_argument_errors(self): - client = Client(self.serviceUrl) - topic = "my-python-test-producer" - - # This should not raise exception - client.create_reader(topic, MessageId.earliest) - - self._check_value_error(lambda: client.create_reader(None, MessageId.earliest)) - self._check_value_error(lambda: client.create_reader(topic, None)) - self._check_value_error(lambda: client.create_reader(topic, MessageId.earliest, receiver_queue_size="test")) - self._check_value_error(lambda: client.create_reader(topic, MessageId.earliest, reader_name=5)) - client.close() - - def test_get_last_message_id(self): - client = Client(self.serviceUrl) - consumer = client.subscribe( - "persistent://public/default/topic_name_test", "topic_name_test_sub", consumer_type=ConsumerType.Shared - ) - producer = client.create_producer("persistent://public/default/topic_name_test") - msg_id = producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertEqual(msg.message_id(), msg_id) - client.close() - - def test_publish_compact_and_consume(self): - client = Client(self.serviceUrl) - topic = "compaction_%s" % (uuid.uuid4()) - producer = client.create_producer(topic, producer_name="my-producer-name", batching_enabled=False) - self.assertEqual(producer.last_sequence_id(), -1) - consumer = client.subscribe(topic, "my-sub1", is_read_compacted=True) - consumer.close() - consumer2 = client.subscribe(topic, "my-sub2", is_read_compacted=False) - - # producer create 2 messages with same key. - producer.send(b"hello-0", partition_key="key0") - producer.send(b"hello-1", partition_key="key0") - producer.close() - - # issue compact command, and wait success - url = "%s/admin/v2/persistent/public/default/%s/compaction" % (self.adminUrl, topic) - doHttpPut(url, "") - while True: - s = doHttpGet(url).decode("utf-8") - if "RUNNING" in s: - print(s) - print("Compact still running") - time.sleep(0.2) - else: - print(s) - print("Compact Complete now") - self.assertTrue("SUCCESS" in s) - break - - # after compaction completes the compacted ledger is recorded - # as a property of a cursor. As persisting the cursor is async - # and we don't wait for the acknowledgement of the acknowledgement, - # there may be a race if we try to read the compacted ledger immediately. - # therefore wait a second to allow the compacted ledger to be updated on - # the broker. - time.sleep(1.0) - - # after compact, consumer with `is_read_compacted=True`, expected read only the second message for same key. - consumer1 = client.subscribe(topic, "my-sub1", is_read_compacted=True) - msg0 = consumer1.receive(TM) - self.assertEqual(msg0.data(), b"hello-1") - consumer1.acknowledge(msg0) - consumer1.close() - - # ditto for reader - reader1 = client.create_reader(topic, MessageId.earliest, is_read_compacted=True) - msg0 = reader1.read_next(TM) - self.assertEqual(msg0.data(), b"hello-1") - reader1.close() - - # after compact, consumer with `is_read_compacted=False`, expected read 2 messages for same key. - msg0 = consumer2.receive(TM) - self.assertEqual(msg0.data(), b"hello-0") - consumer2.acknowledge(msg0) - msg1 = consumer2.receive(TM) - self.assertEqual(msg1.data(), b"hello-1") - consumer2.acknowledge(msg1) - consumer2.close() - - # ditto for reader - reader2 = client.create_reader(topic, MessageId.earliest, is_read_compacted=False) - msg0 = reader2.read_next(TM) - self.assertEqual(msg0.data(), b"hello-0") - msg1 = reader2.read_next(TM) - self.assertEqual(msg1.data(), b"hello-1") - reader2.close() - client.close() - - def test_reader_has_message_available(self): - # create client, producer, reader - client = Client(self.serviceUrl) - producer = client.create_producer("my-python-topic-reader-has-message-available") - reader = client.create_reader("my-python-topic-reader-has-message-available", MessageId.latest) - - # before produce data, expected not has message available - self.assertFalse(reader.has_message_available()) - - for i in range(10): - producer.send(b"hello-%d" % i) - - # produced data, expected has message available - self.assertTrue(reader.has_message_available()) - - for i in range(10): - msg = reader.read_next(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello-%d" % i) - - # consumed all data, expected not has message available - self.assertFalse(reader.has_message_available()) - - for i in range(10, 20): - producer.send(b"hello-%d" % i) - - # produced data again, expected has message available - self.assertTrue(reader.has_message_available()) - reader.close() - producer.close() - client.close() - - def test_seek(self): - client = Client(self.serviceUrl) - topic = "my-python-topic-seek-" + str(time.time()) - consumer = client.subscribe(topic, "my-sub", consumer_type=ConsumerType.Shared) - producer = client.create_producer(topic) - - for i in range(100): - if i > 0: - time.sleep(0.02) - producer.send(b"hello-%d" % i) - - ids = [] - timestamps = [] - for i in range(100): - msg = consumer.receive(TM) - self.assertEqual(msg.data(), b"hello-%d" % i) - ids.append(msg.message_id()) - timestamps.append(msg.publish_timestamp()) - consumer.acknowledge(msg) - - # seek, and after reconnect, expected receive first message. - consumer.seek(MessageId.earliest) - time.sleep(0.5) - msg = consumer.receive(TM) - self.assertEqual(msg.data(), b"hello-0") - - # seek on messageId - consumer.seek(ids[50]) - time.sleep(0.5) - msg = consumer.receive(TM) - self.assertEqual(msg.data(), b"hello-51") - - # ditto, but seek on timestamp - consumer.seek(timestamps[42]) - time.sleep(0.5) - msg = consumer.receive(TM) - self.assertEqual(msg.data(), b"hello-42") - - # repeat with reader - reader = client.create_reader(topic, MessageId.latest) - with self.assertRaises(pulsar.Timeout): - reader.read_next(100) - - # earliest - reader.seek(MessageId.earliest) - time.sleep(0.5) - msg = reader.read_next(TM) - self.assertEqual(msg.data(), b"hello-0") - msg = reader.read_next(TM) - self.assertEqual(msg.data(), b"hello-1") - - # seek on messageId - reader.seek(ids[33]) - time.sleep(0.5) - msg = reader.read_next(TM) - self.assertEqual(msg.data(), b"hello-34") - msg = reader.read_next(TM) - self.assertEqual(msg.data(), b"hello-35") - - # seek on timestamp - reader.seek(timestamps[79]) - time.sleep(0.5) - msg = reader.read_next(TM) - self.assertEqual(msg.data(), b"hello-79") - msg = reader.read_next(TM) - self.assertEqual(msg.data(), b"hello-80") - - reader.close() - client.close() - - def test_v2_topics(self): - self._v2_topics(self.serviceUrl) - - def test_v2_topics_http(self): - self._v2_topics(self.adminUrl) - - def _v2_topics(self, url): - client = Client(url) - consumer = client.subscribe("my-v2-topic-producer-consumer", "my-sub", consumer_type=ConsumerType.Shared) - producer = client.create_producer("my-v2-topic-producer-consumer") - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - consumer.acknowledge(msg) - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - - client.close() - - def test_topics_consumer(self): - client = Client(self.serviceUrl) - topic1 = "persistent://public/default/my-python-topics-consumer-1" - topic2 = "persistent://public/default/my-python-topics-consumer-2" - topic3 = "persistent://public/default-2/my-python-topics-consumer-3" # topic from different namespace - topics = [topic1, topic2, topic3] - - url1 = self.adminUrl + "/admin/v2/persistent/public/default/my-python-topics-consumer-1/partitions" - url2 = self.adminUrl + "/admin/v2/persistent/public/default/my-python-topics-consumer-2/partitions" - url3 = self.adminUrl + "/admin/v2/persistent/public/default-2/my-python-topics-consumer-3/partitions" - - doHttpPut(url1, "2") - doHttpPut(url2, "3") - doHttpPut(url3, "4") - - producer1 = client.create_producer(topic1) - producer2 = client.create_producer(topic2) - producer3 = client.create_producer(topic3) - - consumer = client.subscribe( - topics, "my-topics-consumer-sub", consumer_type=ConsumerType.Shared, receiver_queue_size=10 - ) - - for i in range(100): - producer1.send(b"hello-1-%d" % i) - - for i in range(100): - producer2.send(b"hello-2-%d" % i) - - for i in range(100): - producer3.send(b"hello-3-%d" % i) - - for i in range(300): - msg = consumer.receive(TM) - consumer.acknowledge(msg) - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - client.close() - - def test_topics_pattern_consumer(self): - import re - - client = Client(self.serviceUrl) - - topics_pattern = "persistent://public/default/my-python-pattern-consumer.*" - - topic1 = "persistent://public/default/my-python-pattern-consumer-1" - topic2 = "persistent://public/default/my-python-pattern-consumer-2" - topic3 = "persistent://public/default/my-python-pattern-consumer-3" - - url1 = self.adminUrl + "/admin/v2/persistent/public/default/my-python-pattern-consumer-1/partitions" - url2 = self.adminUrl + "/admin/v2/persistent/public/default/my-python-pattern-consumer-2/partitions" - url3 = self.adminUrl + "/admin/v2/persistent/public/default/my-python-pattern-consumer-3/partitions" - - doHttpPut(url1, "2") - doHttpPut(url2, "3") - doHttpPut(url3, "4") - - producer1 = client.create_producer(topic1) - producer2 = client.create_producer(topic2) - producer3 = client.create_producer(topic3) - - consumer = client.subscribe( - re.compile(topics_pattern), - "my-pattern-consumer-sub", - consumer_type=ConsumerType.Shared, - receiver_queue_size=10, - pattern_auto_discovery_period=1, - ) - - # wait enough time to trigger auto discovery - time.sleep(2) - - for i in range(100): - producer1.send(b"hello-1-%d" % i) - - for i in range(100): - producer2.send(b"hello-2-%d" % i) - - for i in range(100): - producer3.send(b"hello-3-%d" % i) - - for i in range(300): - msg = consumer.receive(TM) - consumer.acknowledge(msg) - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - client.close() - - def test_message_id(self): - s = MessageId.earliest.serialize() - self.assertEqual(MessageId.deserialize(s), MessageId.earliest) - - s = MessageId.latest.serialize() - self.assertEqual(MessageId.deserialize(s), MessageId.latest) - - def test_get_topics_partitions(self): - client = Client(self.serviceUrl) - topic_partitioned = "persistent://public/default/test_get_topics_partitions" - topic_non_partitioned = "persistent://public/default/test_get_topics_not-partitioned" - - url1 = self.adminUrl + "/admin/v2/persistent/public/default/test_get_topics_partitions/partitions" - doHttpPut(url1, "3") - - self.assertEqual( - client.get_topic_partitions(topic_partitioned), - [ - "persistent://public/default/test_get_topics_partitions-partition-0", - "persistent://public/default/test_get_topics_partitions-partition-1", - "persistent://public/default/test_get_topics_partitions-partition-2", - ], - ) - - self.assertEqual(client.get_topic_partitions(topic_non_partitioned), [topic_non_partitioned]) - client.close() - - def test_token_auth(self): - with open("/tmp/pulsar-test-data/tokens/token.txt") as tf: - token = tf.read().strip() - - # Use adminUrl to test both HTTP request and binary protocol - client = Client(self.adminUrl, authentication=AuthenticationToken(token)) - - consumer = client.subscribe( - "persistent://private/auth/my-python-topic-token-auth", "my-sub", consumer_type=ConsumerType.Shared - ) - producer = client.create_producer("persistent://private/auth/my-python-topic-token-auth") - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - client.close() - - def test_token_auth_supplier(self): - def read_token(): - with open("/tmp/pulsar-test-data/tokens/token.txt") as tf: - return tf.read().strip() - - client = Client(self.serviceUrl, authentication=AuthenticationToken(read_token)) - consumer = client.subscribe( - "persistent://private/auth/my-python-topic-token-auth", "my-sub", consumer_type=ConsumerType.Shared - ) - producer = client.create_producer("persistent://private/auth/my-python-topic-token-auth") - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - client.close() - - def test_producer_consumer_zstd(self): - client = Client(self.serviceUrl) - consumer = client.subscribe( - "my-python-topic-producer-consumer-zstd", "my-sub", consumer_type=ConsumerType.Shared - ) - producer = client.create_producer( - "my-python-topic-producer-consumer-zstd", compression_type=CompressionType.ZSTD - ) - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - - consumer.unsubscribe() - client.close() - - def test_client_reference_deleted(self): - def get_producer(): - cl = Client(self.serviceUrl) - return cl.create_producer(topic="foobar") - - producer = get_producer() - producer.send(b"test_payload") - - ##### - - def test_get_topic_name(self): - client = Client(self.serviceUrl) - consumer = client.subscribe( - "persistent://public/default/topic_name_test", "topic_name_test_sub", consumer_type=ConsumerType.Shared - ) - producer = client.create_producer("persistent://public/default/topic_name_test") - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertEqual(msg.topic_name(), "persistent://public/default/topic_name_test") - client.close() - - def test_get_partitioned_topic_name(self): - client = Client(self.serviceUrl) - url1 = self.adminUrl + "/admin/v2/persistent/public/default/partitioned_topic_name_test/partitions" - doHttpPut(url1, "3") - - partitions = [ - "persistent://public/default/partitioned_topic_name_test-partition-0", - "persistent://public/default/partitioned_topic_name_test-partition-1", - "persistent://public/default/partitioned_topic_name_test-partition-2", - ] - self.assertEqual( - client.get_topic_partitions("persistent://public/default/partitioned_topic_name_test"), partitions - ) - - consumer = client.subscribe( - "persistent://public/default/partitioned_topic_name_test", - "partitioned_topic_name_test_sub", - consumer_type=ConsumerType.Shared, - ) - producer = client.create_producer("persistent://public/default/partitioned_topic_name_test") - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertTrue(msg.topic_name() in partitions) - client.close() - - def test_shutdown_client(self): - client = Client(self.serviceUrl) - producer = client.create_producer("persistent://public/default/partitioned_topic_name_test") - producer.send(b"hello") - client.shutdown() - - try: - producer.send(b"hello") - self.assertTrue(False) - except pulsar.PulsarException: - # Expected - pass - - def test_listener_name_client(self): - client = Client(self.serviceUrl, listener_name='test') - try: - producer = client.create_producer("persistent://public/default/partitioned_topic_name_test") - self.fail() - except pulsar.PulsarException: - # Expected - pass - client.close() - - def test_negative_acks(self): - client = Client(self.serviceUrl) - consumer = client.subscribe( - "test_negative_acks", "test", schema=pulsar.schema.StringSchema(), negative_ack_redelivery_delay_ms=1000 - ) - producer = client.create_producer("test_negative_acks", schema=pulsar.schema.StringSchema()) - for i in range(10): - producer.send_async("hello-%d" % i, callback=None) - - producer.flush() - - for i in range(10): - msg = consumer.receive() - self.assertEqual(msg.value(), "hello-%d" % i) - consumer.negative_acknowledge(msg) - - for i in range(10): - msg = consumer.receive() - self.assertEqual(msg.value(), "hello-%d" % i) - consumer.acknowledge(msg) - - with self.assertRaises(pulsar.Timeout): - consumer.receive(100) - client.close() - - def test_connect_timeout(self): - client = pulsar.Client( - service_url="pulsar://192.0.2.1:1234", - connection_timeout_ms=1000, # 1 second - ) - t1 = time.time() - try: - producer = client.create_producer("test_connect_timeout") - self.fail("create_producer should not succeed") - except pulsar.ConnectError as expected: - print("expected error: {} when create producer".format(expected)) - t2 = time.time() - self.assertGreater(t2 - t1, 1.0) - self.assertLess(t2 - t1, 1.5) # 1.5 seconds is long enough - client.close() - - def test_json_schema_encode(self): - schema = JsonSchema(TestRecord) - record = TestRecord(a=1, b=2) - # Ensure that encoding a JsonSchema more than once works and produces the same result - first_encode = schema.encode(record) - second_encode = schema.encode(record) - self.assertEqual(first_encode, second_encode) - - def test_logger_thread_leaks(self): - def _do_connect(close): - logger = logging.getLogger(str(threading.current_thread().ident)) - logger.setLevel(logging.INFO) - client = pulsar.Client( - service_url="pulsar://localhost:6650", - io_threads=4, - message_listener_threads=4, - operation_timeout_seconds=1, - log_conf_file_path=None, - authentication=None, - logger=logger, - ) - client.get_topic_partitions("persistent://public/default/partitioned_topic_name_test") - if close: - client.close() - - for should_close in (True, False): - self.assertEqual(threading.active_count(), 1, "Explicit close: {}; baseline is 1 thread".format(should_close)) - _do_connect(should_close) - self.assertEqual(threading.active_count(), 1, "Explicit close: {}; synchronous connect doesn't leak threads".format(should_close)) - threads = [] - for _ in range(10): - threads.append(threading.Thread(target=_do_connect, args=(should_close))) - threads[-1].start() - for thread in threads: - thread.join() - assert threading.active_count() == 1, "Explicit close: {}; threaded connect in parallel doesn't leak threads".format(should_close) - - def test_chunking(self): - client = Client(self.serviceUrl) - data_size = 10 * 1024 * 1024 - producer = client.create_producer( - 'test_chunking', - chunking_enabled=True - ) - - consumer = client.subscribe('test_chunking', "my-subscription", - max_pending_chunked_message=10, - auto_ack_oldest_chunked_message_on_queue_full=False - ) - - producer.send(bytes(bytearray(os.urandom(data_size))), None) - msg = consumer.receive(TM) - self.assertEqual(len(msg.data()), data_size) - - def test_invalid_chunking_config(self): - client = Client(self.serviceUrl) - - self._check_value_error(lambda: client.create_producer( - 'test_invalid_chunking_config', - chunking_enabled=True, - batching_enabled=True - )) - - def _check_value_error(self, fun): - with self.assertRaises(ValueError): - fun() - - def _check_type_error(self, fun): - with self.assertRaises(TypeError): - fun() - - def test_basic_auth(self): - username = "admin" - password = "123456" - client = Client(self.adminUrl, authentication=AuthenticationBasic(username, password)) - - topic = "persistent://private/auth/my-python-topic-basic-auth" - consumer = client.subscribe(topic, "my-sub", consumer_type=ConsumerType.Shared) - producer = client.create_producer(topic) - producer.send(b"hello") - - msg = consumer.receive(TM) - self.assertTrue(msg) - self.assertEqual(msg.data(), b"hello") - client.close() - - def test_invalid_basic_auth(self): - username = "invalid" - password = "123456" - client = Client(self.adminUrl, authentication=AuthenticationBasic(username, password)) - topic = "persistent://private/auth/my-python-topic-invalid-basic-auth" - with self.assertRaises(pulsar.ConnectError): - client.subscribe(topic, "my-sub", consumer_type=ConsumerType.Shared) - -if __name__ == "__main__": - main() diff --git a/pulsar-client-cpp/python/schema_test.py b/pulsar-client-cpp/python/schema_test.py deleted file mode 100755 index 47acc304ef44a..0000000000000 --- a/pulsar-client-cpp/python/schema_test.py +++ /dev/null @@ -1,1291 +0,0 @@ -#!/usr/bin/env python3 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from unittest import TestCase, main - -import fastavro -import pulsar -from pulsar.schema import * -from enum import Enum -import json -from fastavro.schema import load_schema - - -class SchemaTest(TestCase): - - serviceUrl = 'pulsar://localhost:6650' - - def test_simple(self): - class Color(Enum): - red = 1 - green = 2 - blue = 3 - - class Example(Record): - _sorted_fields = True - a = String() - b = Integer() - c = Array(String()) - d = Color - e = Boolean() - f = Float() - g = Double() - h = Bytes() - i = Map(String()) - j = CustomEnum(Color) - - fastavro.parse_schema(Example.schema()) - self.assertEqual(Example.schema(), { - "name": "Example", - "type": "record", - "fields": [ - {"name": "a", "type": ["null", "string"]}, - {"name": "b", "type": ["null", "int"]}, - {"name": "c", "type": ["null", { - "type": "array", - "items": "string"}] - }, - {"name": "d", - "type": ["null", { - "type": "enum", - "name": "Color", - "symbols": ["red", "green", "blue"]}] - }, - {"name": "e", "type": ["null", "boolean"]}, - {"name": "f", "type": ["null", "float"]}, - {"name": "g", "type": ["null", "double"]}, - {"name": "h", "type": ["null", "bytes"]}, - {"name": "i", "type": ["null", { - "type": "map", - "values": "string"}] - }, - {"name": "j", "type": ["null", "Color"]} - ] - }) - - def test_complex(self): - class Color(Enum): - red = 1 - green = 2 - blue = 3 - - class MySubRecord(Record): - _sorted_fields = True - x = Integer() - y = Long() - z = String() - color = CustomEnum(Color) - - class Example(Record): - _sorted_fields = True - a = String() - sub = MySubRecord # Test with class - sub2 = MySubRecord() # Test with instance - - fastavro.parse_schema(Example.schema()) - self.assertEqual(Example.schema(), { - "name": "Example", - "type": "record", - "fields": [ - {"name": "a", "type": ["null", "string"]}, - {"name": "sub", - "type": ["null", { - "name": "MySubRecord", - "type": "record", - "fields": [ - {'name': 'color', 'type': ['null', {'type': 'enum', 'name': 'Color', 'symbols': - ['red', 'green', 'blue']}]}, - {"name": "x", "type": ["null", "int"]}, - {"name": "y", "type": ["null", "long"]}, - {"name": "z", "type": ["null", "string"]}] - }] - }, - {"name": "sub2", - "type": ["null", 'MySubRecord'] - } - ] - }) - - def test_complex_with_required_fields(self): - class MySubRecord(Record): - x = Integer(required=True) - y = Long(required=True) - z = String() - - class Example(Record): - a = String(required=True) - sub = MySubRecord(required=True) - - self.assertEqual(Example.schema(), { - "name": "Example", - "type": "record", - "fields": [ - {"name": "a", "type": "string"}, - {"name": "sub", - "type": { - "name": "MySubRecord", - "type": "record", - "fields": [{"name": "x", "type": "int"}, - {"name": "y", "type": "long"}, - {"name": "z", "type": ["null", "string"]}] - } - }, - ] - }) - - def test_invalid_enum(self): - class Color: - red = 1 - green = 2 - blue = 3 - - class InvalidEnum(Record): - a = Integer() - b = Color - - # Enum will be ignored - self.assertEqual(InvalidEnum.schema(), - {'name': 'InvalidEnum', 'type': 'record', - 'fields': [{'name': 'a', 'type': ["null", 'int']}]}) - - def test_initialization(self): - class Example(Record): - a = Integer() - b = Integer() - - r = Example(a=1, b=2) - self.assertEqual(r.a, 1) - self.assertEqual(r.b, 2) - - r.b = 5 - - self.assertEqual(r.b, 5) - - # Setting non-declared field should fail - try: - r.c = 3 - self.fail('Should have failed') - except AttributeError: - # Expected - pass - - try: - Record(a=1, c=8) - self.fail('Should have failed') - except AttributeError: - # Expected - pass - - except TypeError: - # Expected - pass - - def _expectTypeError(self, func): - try: - func() - self.fail('Should have failed') - except TypeError: - # Expected - pass - - def test_field_type_check(self): - class Example(Record): - a = Integer() - b = String(required=False) - - self._expectTypeError(lambda: Example(a=1, b=2)) - - class E2(Record): - a = Boolean() - - E2(a=False) # ok - self._expectTypeError(lambda: E2(a=1)) - - class E3(Record): - a = Float() - - E3(a=1.0) # Ok - self._expectTypeError(lambda: E3(a=1)) - - class E4(Record): - a = Null() - - E4(a=None) # Ok - self._expectTypeError(lambda: E4(a=1)) - - class E5(Record): - a = Long() - - E5(a=1234) # Ok - self._expectTypeError(lambda: E5(a=1.12)) - - class E6(Record): - a = String() - - E6(a="hello") # Ok - self._expectTypeError(lambda: E5(a=1.12)) - - class E6(Record): - a = Bytes() - - E6(a="hello".encode('utf-8')) # Ok - self._expectTypeError(lambda: E5(a=1.12)) - - class E7(Record): - a = Double() - - E7(a=1.0) # Ok - self._expectTypeError(lambda: E3(a=1)) - - class Color(Enum): - red = 1 - green = 2 - blue = 3 - - class OtherEnum(Enum): - red = 1 - green = 2 - blue = 3 - - class E8(Record): - a = Color - - e = E8(a=Color.red) # Ok - self.assertEqual(e.a, Color.red) - - e = E8(a='red') # Ok - self.assertEqual(e.a, Color.red) - - e = E8(a=1) # Ok - self.assertEqual(e.a, Color.red) - - self._expectTypeError(lambda: E8(a='redx')) - self._expectTypeError(lambda: E8(a=OtherEnum.red)) - self._expectTypeError(lambda: E8(a=5)) - - class E9(Record): - a = Array(String()) - - E9(a=['a', 'b', 'c']) # Ok - self._expectTypeError(lambda: E9(a=1)) - self._expectTypeError(lambda: E9(a=[1, 2, 3])) - self._expectTypeError(lambda: E9(a=['1', '2', 3])) - - class E10(Record): - a = Map(Integer()) - - E10(a={'a': 1, 'b': 2}) # Ok - self._expectTypeError(lambda: E10(a=1)) - self._expectTypeError(lambda: E10(a={'a': '1', 'b': 2})) - self._expectTypeError(lambda: E10(a={1: 1, 'b': 2})) - - class SubRecord1(Record): - s = Integer() - - class SubRecord2(Record): - s = String() - - class E11(Record): - a = SubRecord1 - - E11(a=SubRecord1(s=1)) # Ok - self._expectTypeError(lambda: E11(a=1)) - self._expectTypeError(lambda: E11(a=SubRecord2(s='hello'))) - - def test_field_type_check_defaults(self): - try: - class Example(Record): - a = Integer(default="xyz") - - self.fail("Class declaration should have failed") - except TypeError: - pass # Expected - - def test_serialize_json(self): - class Example(Record): - a = Integer() - b = Integer() - - self.assertEqual(Example.schema(), { - "name": "Example", - "type": "record", - "fields": [ - {"name": "a", "type": ["null", "int"]}, - {"name": "b", "type": ["null", "int"]}, - ] - }) - - s = JsonSchema(Example) - r = Example(a=1, b=2) - data = s.encode(r) - self.assertEqual(json.loads(data), {'a': 1, 'b': 2}) - - r2 = s.decode(data) - self.assertEqual(r2.__class__.__name__, 'Example') - self.assertEqual(r2, r) - - def test_serialize_avro(self): - class Example(Record): - a = Integer() - b = Integer() - - self.assertEqual(Example.schema(), { - "name": "Example", - "type": "record", - "fields": [ - {"name": "a", "type": ["null", "int"]}, - {"name": "b", "type": ["null", "int"]}, - ] - }) - - s = AvroSchema(Example) - r = Example(a=1, b=2) - data = s.encode(r) - - r2 = s.decode(data) - self.assertEqual(r2.__class__.__name__, 'Example') - self.assertEqual(r2, r) - - def test_non_sorted_fields(self): - class T1(Record): - a = Integer() - b = Integer() - c = Double() - d = String() - - class T2(Record): - b = Integer() - a = Integer() - d = String() - c = Double() - - self.assertNotEqual(T1.schema()['fields'], T2.schema()['fields']) - - def test_sorted_fields(self): - class T1(Record): - _sorted_fields = True - a = Integer() - b = Integer() - - class T2(Record): - _sorted_fields = True - b = Integer() - a = Integer() - - self.assertEqual(T1.schema()['fields'], T2.schema()['fields']) - - def test_schema_version(self): - class Example(Record): - a = Integer() - b = Integer() - - client = pulsar.Client(self.serviceUrl) - producer = client.create_producer( - 'my-avro-python-schema-version-topic', - schema=AvroSchema(Example)) - - consumer = client.subscribe('my-avro-python-schema-version-topic', 'sub-1', - schema=AvroSchema(Example)) - - r = Example(a=1, b=2) - producer.send(r) - - msg = consumer.receive() - - self.assertIsNotNone(msg.schema_version()) - - self.assertEquals(b'\x00\x00\x00\x00\x00\x00\x00\x00', msg.schema_version().encode()) - - self.assertEqual(r, msg.value()) - - client.close() - - def test_serialize_wrong_types(self): - class Example(Record): - a = Integer() - b = Integer() - - class Foo(Record): - x = Integer() - y = Integer() - - s = JsonSchema(Example) - try: - data = s.encode(Foo(x=1, y=2)) - self.fail('Should have failed') - except TypeError: - pass # expected - - try: - data = s.encode('hello') - self.fail('Should have failed') - except TypeError: - pass # expected - - def test_defaults(self): - class Example(Record): - a = Integer(default=5) - b = Integer() - c = String(default='hello') - - r = Example() - self.assertEqual(r.a, 5) - self.assertEqual(r.b, None) - self.assertEqual(r.c, 'hello') - - def test_none_value(self): - """ - The objective of the test is to check that if no value is assigned to the attribute, the validation is returning - the expect default value as defined in the Field class - """ - class Example(Record): - a = Null() - b = Boolean() - c = Integer() - d = Long() - e = Float() - f = Double() - g = Bytes() - h = String() - - r = Example() - - self.assertIsNone(r.a) - self.assertFalse(r.b) - self.assertIsNone(r.c) - self.assertIsNone(r.d) - self.assertIsNone(r.e) - self.assertIsNone(r.f) - self.assertIsNone(r.g) - self.assertIsNone(r.h) - #### - - def test_json_schema(self): - - class Example(Record): - a = Integer() - b = Integer() - - # Incompatible variation of the class - class BadExample(Record): - a = String() - b = Integer() - - client = pulsar.Client(self.serviceUrl) - producer = client.create_producer( - 'my-json-python-topic', - schema=JsonSchema(Example)) - - # Validate that incompatible schema is rejected - try: - client.subscribe('my-json-python-topic', 'sub-1', - schema=JsonSchema(BadExample)) - self.fail('Should have failed') - except Exception as e: - pass # Expected - - try: - client.subscribe('my-json-python-topic', 'sub-1', - schema=StringSchema(BadExample)) - self.fail('Should have failed') - except Exception as e: - pass # Expected - - try: - client.subscribe('my-json-python-topic', 'sub-1', - schema=AvroSchema(BadExample)) - self.fail('Should have failed') - except Exception as e: - pass # Expected - - consumer = client.subscribe('my-json-python-topic', 'sub-1', - schema=JsonSchema(Example)) - - r = Example(a=1, b=2) - producer.send(r) - - msg = consumer.receive() - - self.assertEqual(r, msg.value()) - - producer.close() - consumer.close() - client.close() - - def test_string_schema(self): - client = pulsar.Client(self.serviceUrl) - producer = client.create_producer( - 'my-string-python-topic', - schema=StringSchema()) - - - # Validate that incompatible schema is rejected - try: - class Example(Record): - a = Integer() - b = Integer() - - client.create_producer('my-string-python-topic', - schema=JsonSchema(Example)) - self.fail('Should have failed') - except Exception as e: - pass # Expected - - consumer = client.subscribe('my-string-python-topic', 'sub-1', - schema=StringSchema()) - - producer.send("Hello") - - msg = consumer.receive() - - self.assertEqual("Hello", msg.value()) - self.assertEqual(b"Hello", msg.data()) - client.close() - - def test_bytes_schema(self): - client = pulsar.Client(self.serviceUrl) - producer = client.create_producer( - 'my-bytes-python-topic', - schema=BytesSchema()) - - # Validate that incompatible schema is rejected - try: - class Example(Record): - a = Integer() - b = Integer() - - client.create_producer('my-bytes-python-topic', - schema=JsonSchema(Example)) - self.fail('Should have failed') - except Exception as e: - pass # Expected - - consumer = client.subscribe('my-bytes-python-topic', 'sub-1', - schema=BytesSchema()) - - producer.send(b"Hello") - - msg = consumer.receive() - - self.assertEqual(b"Hello", msg.value()) - client.close() - - def test_avro_schema(self): - - class Example(Record): - a = Integer() - b = Integer() - - # Incompatible variation of the class - class BadExample(Record): - a = String() - b = Integer() - - client = pulsar.Client(self.serviceUrl) - producer = client.create_producer( - 'my-avro-python-topic', - schema=AvroSchema(Example)) - - # Validate that incompatible schema is rejected - try: - client.subscribe('my-avro-python-topic', 'sub-1', - schema=AvroSchema(BadExample)) - self.fail('Should have failed') - except Exception as e: - pass # Expected - - try: - client.subscribe('my-avro-python-topic', 'sub-2', - schema=JsonSchema(Example)) - self.fail('Should have failed') - except Exception as e: - pass # Expected - - consumer = client.subscribe('my-avro-python-topic', 'sub-3', - schema=AvroSchema(Example)) - - r = Example(a=1, b=2) - producer.send(r) - - msg = consumer.receive() - - self.assertEqual(r, msg.value()) - - producer.close() - consumer.close() - client.close() - - def test_json_enum(self): - class MyEnum(Enum): - A = 1 - B = 2 - C = 3 - - class Example(Record): - name = String() - v = MyEnum - w = CustomEnum(MyEnum) - x = CustomEnum(MyEnum, required=True, default=MyEnum.A, required_default=True) - - topic = 'my-json-enum-topic' - - client = pulsar.Client(self.serviceUrl) - producer = client.create_producer( - topic=topic, - schema=JsonSchema(Example)) - - consumer = client.subscribe(topic, 'test', - schema=JsonSchema(Example)) - - r = Example(name='test', v=MyEnum.C, w=MyEnum.B) - producer.send(r) - - msg = consumer.receive() - - self.assertEqual('test', msg.value().name) - self.assertEqual(MyEnum.C, MyEnum(msg.value().v)) - self.assertEqual(MyEnum.B, MyEnum(msg.value().w)) - self.assertEqual(MyEnum.A, MyEnum(msg.value().x)) - client.close() - - def test_avro_enum(self): - class MyEnum(Enum): - A = 1 - B = 2 - C = 3 - - class Example(Record): - name = String() - v = MyEnum - w = CustomEnum(MyEnum) - x = CustomEnum(MyEnum, required=True, default=MyEnum.B, required_default=True) - - topic = 'my-avro-enum-topic' - - client = pulsar.Client(self.serviceUrl) - producer = client.create_producer( - topic=topic, - schema=AvroSchema(Example)) - - consumer = client.subscribe(topic, 'test', - schema=AvroSchema(Example)) - - r = Example(name='test', v=MyEnum.C, w=MyEnum.A) - producer.send(r) - - msg = consumer.receive() - msg.value() - self.assertEqual(MyEnum.C, msg.value().v) - self.assertEqual(MyEnum.A, MyEnum(msg.value().w)) - self.assertEqual(MyEnum.B, MyEnum(msg.value().x)) - client.close() - - def test_avro_map_array(self): - class MapArray(Record): - values = Map(Array(Integer())) - - class MapMap(Record): - values = Map(Map(Integer())) - - class ArrayMap(Record): - values = Array(Map(Integer())) - - class ArrayArray(Record): - values = Array(Array(Integer())) - - topic_prefix = "my-avro-map-array-topic-" - data_list = ( - (topic_prefix + "0", AvroSchema(MapArray), - MapArray(values={"A": [1, 2], "B": [3]})), - (topic_prefix + "1", AvroSchema(MapMap), - MapMap(values={"A": {"B": 2},})), - (topic_prefix + "2", AvroSchema(ArrayMap), - ArrayMap(values=[{"A": 1}, {"B": 2}, {"C": 3}])), - (topic_prefix + "3", AvroSchema(ArrayArray), - ArrayArray(values=[[1, 2, 3], [4]])), - ) - - client = pulsar.Client(self.serviceUrl) - for data in data_list: - topic = data[0] - schema = data[1] - record = data[2] - - producer = client.create_producer(topic, schema=schema) - consumer = client.subscribe(topic, 'sub', schema=schema) - - producer.send(record) - msg = consumer.receive() - self.assertEqual(msg.value().values, record.values) - consumer.acknowledge(msg) - consumer.close() - producer.close() - - client.close() - - def test_avro_required_default(self): - class MySubRecord(Record): - _sorted_fields = True - x = Integer() - y = Long() - z = String() - - class Example(Record): - a = Integer() - b = Boolean(required=True) - c = Long() - d = Float() - e = Double() - f = String() - g = Bytes() - h = Array(String()) - i = Map(String()) - j = MySubRecord() - - - class ExampleRequiredDefault(Record): - _sorted_fields = True - a = Integer(required_default=True) - b = Boolean(required=True, required_default=True) - c = Long(required_default=True) - d = Float(required_default=True) - e = Double(required_default=True) - f = String(required_default=True) - g = Bytes(required_default=True) - h = Array(String(), required_default=True) - i = Map(String(), required_default=True) - j = MySubRecord(required_default=True) - self.assertEqual(ExampleRequiredDefault.schema(), { - "name": "ExampleRequiredDefault", - "type": "record", - "fields": [ - { - "name": "a", - "type": [ - "null", - "int" - ], - "default": None - }, - { - "name": "b", - "type": "boolean", - "default": False - }, - { - "name": "c", - "type": [ - "null", - "long" - ], - "default": None - }, - { - "name": "d", - "type": [ - "null", - "float" - ], - "default": None - }, - { - "name": "e", - "type": [ - "null", - "double" - ], - "default": None - }, - { - "name": "f", - "type": [ - "null", - "string" - ], - "default": None - }, - { - "name": "g", - "type": [ - "null", - "bytes" - ], - "default": None - }, - { - "name": "h", - "type": [ - "null", - { - "type": "array", - "items": "string" - } - ], - "default": None - }, - { - "name": "i", - "type": [ - "null", - { - "type": "map", - "values": "string" - } - ], - "default": None - }, - { - "name": "j", - "type": [ - "null", - { - "name": "MySubRecord", - "type": "record", - "fields": [ - { - "name": "x", - "type": [ - "null", - "int" - ] - }, - { - "name": "y", - "type": [ - "null", - "long" - ], - }, - { - "name": "z", - "type": [ - "null", - "string" - ] - } - ] - } - ], - "default": None - } - ] - }) - - client = pulsar.Client(self.serviceUrl) - producer = client.create_producer( - 'my-avro-python-default-topic', - schema=AvroSchema(Example)) - - producer_default = client.create_producer( - 'my-avro-python-default-topic', - schema=AvroSchema(ExampleRequiredDefault)) - - producer.close() - producer_default.close() - - client.close() - - def test_default_value(self): - class MyRecord(Record): - A = Integer() - B = String() - C = Boolean(default=True, required=True) - D = Double(default=6.4) - - topic = "my-default-value-topic" - - client = pulsar.Client(self.serviceUrl) - producer = client.create_producer( - topic=topic, - schema=JsonSchema(MyRecord)) - - consumer = client.subscribe(topic, 'test', schema=JsonSchema(MyRecord)) - - r = MyRecord(A=5, B="text") - producer.send(r) - - msg = consumer.receive() - self.assertEqual(msg.value().A, 5) - self.assertEqual(msg.value().B, u'text') - self.assertEqual(msg.value().C, True) - self.assertEqual(msg.value().D, 6.4) - - producer.close() - consumer.close() - client.close() - - def test_serialize_schema_complex(self): - class Color(Enum): - red = 1 - green = 2 - blue = 3 - - class NestedObj1(Record): - _sorted_fields = True - na1 = String() - nb1 = Double() - - class NestedObj2(Record): - _sorted_fields = True - na2 = Integer() - nb2 = Boolean() - nc2 = NestedObj1() - - class NestedObj3(Record): - _sorted_fields = True - color = CustomEnum(Color) - na3 = Integer() - - class NestedObj4(Record): - _avro_namespace = 'xxx4' - _sorted_fields = True - na4 = String() - nb4 = Integer() - - class ComplexRecord(Record): - _avro_namespace = 'xxx.xxx' - _sorted_fields = True - a = Integer() - b = Integer() - color = Color - color2 = Color - color3 = CustomEnum(Color, required=True, default=Color.red, required_default=True) - nested = NestedObj2() - nested2 = NestedObj2() - mapNested = Map(NestedObj3()) - mapNested2 = Map(NestedObj3()) - arrayNested = Array(NestedObj4()) - arrayNested2 = Array(NestedObj4()) - - print('complex schema: ', ComplexRecord.schema()) - self.assertEqual(ComplexRecord.schema(), { - "name": "ComplexRecord", - "namespace": "xxx.xxx", - "type": "record", - "fields": [ - {"name": "a", "type": ["null", "int"]}, - {'name': 'arrayNested', 'type': ['null', {'type': 'array', 'items': - {'name': 'NestedObj4', 'namespace': 'xxx4', 'type': 'record', 'fields': [ - {'name': 'na4', 'type': ['null', 'string']}, - {'name': 'nb4', 'type': ['null', 'int']} - ]}} - ]}, - {'name': 'arrayNested2', 'type': ['null', {'type': 'array', 'items': 'xxx4.NestedObj4'}]}, - {"name": "b", "type": ["null", "int"]}, - {'name': 'color', 'type': ['null', {'type': 'enum', 'name': 'Color', 'symbols': [ - 'red', 'green', 'blue']}]}, - {'name': 'color2', 'type': ['null', 'Color']}, - {'name': 'color3', 'default': 'red', 'type': 'Color'}, - {'name': 'mapNested', 'type': ['null', {'type': 'map', 'values': - {'name': 'NestedObj3', 'type': 'record', 'fields': [ - {'name': 'color', 'type': ['null', 'Color']}, - {'name': 'na3', 'type': ['null', 'int']} - ]}} - ]}, - {'name': 'mapNested2', 'type': ['null', {'type': 'map', 'values': 'NestedObj3'}]}, - {"name": "nested", "type": ['null', {'name': 'NestedObj2', 'type': 'record', 'fields': [ - {'name': 'na2', 'type': ['null', 'int']}, - {'name': 'nb2', 'type': ['null', 'boolean']}, - {'name': 'nc2', 'type': ['null', {'name': 'NestedObj1', 'type': 'record', 'fields': [ - {'name': 'na1', 'type': ['null', 'string']}, - {'name': 'nb1', 'type': ['null', 'double']} - ]}]} - ]}]}, - {"name": "nested2", "type": ['null', 'NestedObj2']} - ] - }) - - def encode_and_decode(schema_type): - data_schema = AvroSchema(ComplexRecord) - if schema_type == 'json': - data_schema = JsonSchema(ComplexRecord) - - nested_obj1 = NestedObj1(na1='na1 value', nb1=20.5) - nested_obj2 = NestedObj2(na2=22, nb2=True, nc2=nested_obj1) - r = ComplexRecord(a=1, b=2, color=Color.red, color2=Color.blue, - nested=nested_obj2, nested2=nested_obj2, - mapNested={ - 'a': NestedObj3(na3=1, color=Color.green), - 'b': NestedObj3(na3=2), - 'c': NestedObj3(na3=3, color=Color.red) - }, mapNested2={ - 'd': NestedObj3(na3=4, color=Color.red), - 'e': NestedObj3(na3=5, color=Color.blue), - 'f': NestedObj3(na3=6) - }, arrayNested=[ - NestedObj4(na4='value na4 1', nb4=100), - NestedObj4(na4='value na4 2', nb4=200) - ], arrayNested2=[ - NestedObj4(na4='value na4 3', nb4=300), - NestedObj4(na4='value na4 4', nb4=400) - ]) - data_encode = data_schema.encode(r) - - data_decode = data_schema.decode(data_encode) - self.assertEqual(data_decode.__class__.__name__, 'ComplexRecord') - self.assertEqual(data_decode, r) - self.assertEqual(r.color3, Color.red) - self.assertEqual(r.mapNested['a'].color, Color.green) - self.assertEqual(r.mapNested['b'].color, None) - print('Encode and decode complex schema finish. schema_type: ', schema_type) - - encode_and_decode('avro') - encode_and_decode('json') - - def test_sub_record_set_to_none(self): - class NestedObj1(Record): - na1 = String() - nb1 = Double() - - class NestedObj2(Record): - na2 = Integer() - nb2 = Boolean() - nc2 = NestedObj1() - - data_schema = AvroSchema(NestedObj2) - r = NestedObj2(na2=1, nb2=True) - - data_encode = data_schema.encode(r) - data_decode = data_schema.decode(data_encode) - - self.assertEqual(data_decode.__class__.__name__, 'NestedObj2') - self.assertEqual(data_decode, r) - self.assertEqual(data_decode.na2, 1) - self.assertTrue(data_decode.nb2) - - def test_produce_and_consume_complex_schema_data(self): - class Color(Enum): - red = 1 - green = 2 - blue = 3 - - class NestedObj1(Record): - na1 = String() - nb1 = Double() - - class NestedObj2(Record): - na2 = Integer() - nb2 = Boolean() - nc2 = NestedObj1() - - class NestedObj3(Record): - na3 = Integer() - color = CustomEnum(Color, required=True, required_default=True, default=Color.blue) - - class NestedObj4(Record): - na4 = String() - nb4 = Integer() - - class ComplexRecord(Record): - a = Integer() - b = Integer() - color = CustomEnum(Color) - nested = NestedObj2() - mapNested = Map(NestedObj3()) - arrayNested = Array(NestedObj4()) - - client = pulsar.Client(self.serviceUrl) - - def produce_consume_test(schema_type): - topic = "my-complex-schema-topic-" + schema_type - - data_schema = AvroSchema(ComplexRecord) - if schema_type == 'json': - data_schema= JsonSchema(ComplexRecord) - - producer = client.create_producer( - topic=topic, - schema=data_schema) - - consumer = client.subscribe(topic, 'test', schema=data_schema) - - nested_obj1 = NestedObj1(na1='na1 value', nb1=20.5) - nested_obj2 = NestedObj2(na2=22, nb2=True, nc2=nested_obj1) - r = ComplexRecord(a=1, b=2, nested=nested_obj2, mapNested={ - 'a': NestedObj3(na3=1, color=Color.red), - 'b': NestedObj3(na3=2, color=Color.green), - 'c': NestedObj3(na3=3) - }, arrayNested=[ - NestedObj4(na4='value na4 1', nb4=100), - NestedObj4(na4='value na4 2', nb4=200) - ]) - producer.send(r) - - msg = consumer.receive() - value = msg.value() - self.assertEqual(value.__class__.__name__, 'ComplexRecord') - self.assertEqual(value, r) - - print('Produce and consume complex schema data finish. schema_type', schema_type) - - produce_consume_test('avro') - produce_consume_test('json') - - client.close() - - def custom_schema_test(self): - - def encode_and_decode(schema_definition): - avro_schema = AvroSchema(None, schema_definition=schema_definition) - - company = { - "name": "company-name", - "address": 'xxx road xxx street', - "employees": [ - {"name": "user1", "age": 25}, - {"name": "user2", "age": 30}, - {"name": "user3", "age": 35}, - ], - "labels": { - "industry": "software", - "scale": ">100", - "funds": "1000000.0" - }, - "companyType": "companyType1" - } - data = avro_schema.encode(company) - company_decode = avro_schema.decode(data) - self.assertEqual(company, company_decode) - - schema_definition = { - 'doc': 'this is doc', - 'namespace': 'example.avro', - 'type': 'record', - 'name': 'Company', - 'fields': [ - {'name': 'name', 'type': ['null', 'string']}, - {'name': 'address', 'type': ['null', 'string']}, - {'name': 'employees', 'type': ['null', {'type': 'array', 'items': { - 'type': 'record', - 'name': 'Employee', - 'fields': [ - {'name': 'name', 'type': ['null', 'string']}, - {'name': 'age', 'type': ['null', 'int']} - ] - }}]}, - {'name': 'labels', 'type': ['null', {'type': 'map', 'values': 'string'}]}, - {'name': 'companyType', 'type': ['null', {'type': 'enum', 'name': 'CompanyType', 'symbols': - ['companyType1', 'companyType2', 'companyType3']}]} - ] - } - encode_and_decode(schema_definition) - # Users could load schema from file by `fastavro.schema` - # Or use `avro.schema` like this `avro.schema.parse(open("examples/company.avsc", "rb").read()).to_json()` - encode_and_decode(load_schema("examples/company.avsc")) - - def custom_schema_produce_and_consume_test(self): - client = pulsar.Client(self.serviceUrl) - - def produce_and_consume(topic, schema_definition): - print('custom schema produce and consume test topic - ', topic) - example_avro_schema = AvroSchema(None, schema_definition=schema_definition) - - producer = client.create_producer( - topic=topic, - schema=example_avro_schema) - consumer = client.subscribe(topic, 'test', schema=example_avro_schema) - - for i in range(0, 10): - company = { - "name": "company-name" + str(i), - "address": 'xxx road xxx street ' + str(i), - "employees": [ - {"name": "user" + str(i), "age": 20 + i}, - {"name": "user" + str(i), "age": 30 + i}, - {"name": "user" + str(i), "age": 35 + i}, - ], - "labels": { - "industry": "software" + str(i), - "scale": ">100", - "funds": "1000000.0" - }, - "companyType": "companyType" + str((i % 3) + 1) - } - producer.send(company) - - for i in range(0, 10): - msg = consumer.receive() - company = { - "name": "company-name" + str(i), - "address": 'xxx road xxx street ' + str(i), - "employees": [ - {"name": "user" + str(i), "age": 20 + i}, - {"name": "user" + str(i), "age": 30 + i}, - {"name": "user" + str(i), "age": 35 + i}, - ], - "labels": { - "industry": "software" + str(i), - "scale": ">100", - "funds": "1000000.0" - } - } - self.assertEqual(msg.value(), company) - consumer.acknowledge(msg) - - consumer.close() - producer.close() - - schema_definition = { - 'doc': 'this is doc', - 'namespace': 'example.avro', - 'type': 'record', - 'name': 'Company', - 'fields': [ - {'name': 'name', 'type': ['null', 'string']}, - {'name': 'address', 'type': ['null', 'string']}, - {'name': 'employees', 'type': ['null', {'type': 'array', 'items': { - 'type': 'record', - 'name': 'Employee', - 'fields': [ - {'name': 'name', 'type': ['null', 'string']}, - {'name': 'age', 'type': ['null', 'int']} - ] - }}]}, - {'name': 'labels', 'type': ['null', {'type': 'map', 'values': 'string'}]} - ] - } - produce_and_consume('custom-schema-test-1', schema_definition=schema_definition) - produce_and_consume('custom-schema-test-2', schema_definition=load_schema("examples/company.avsc")) - - client.close() - - def test_json_schema_encode_remove_reserved_key(self): - class SchemaB(Record): - field = String(required=True) - - class SchemaA(Record): - field = SchemaB() - - a = SchemaA(field=SchemaB(field="something")) - b = JsonSchema(SchemaA).encode(a) - # reserved field should not be in the encoded json - self.assertTrue(b'_default' not in b) - self.assertTrue(b'_required' not in b) - self.assertTrue(b'_required_default' not in b) - - def test_schema_array_wrong_type(self): - class SomeSchema(Record): - some_field = Array(Integer(), required=False, default=[]) - # descriptive error message - with self.assertRaises(TypeError) as e: - SomeSchema(some_field=["not", "integer"]) - self.assertEqual(str(e.exception), "Array field some_field items should all be of type int") -if __name__ == '__main__': - main() diff --git a/pulsar-client-cpp/python/setup.py b/pulsar-client-cpp/python/setup.py deleted file mode 100644 index 684d809d1a99e..0000000000000 --- a/pulsar-client-cpp/python/setup.py +++ /dev/null @@ -1,117 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from setuptools import setup -from distutils.core import Extension -from distutils.util import strtobool -from os import environ - -from distutils.command import build_ext - -import xml.etree.ElementTree as ET -from os.path import dirname, realpath, join - -def get_version(): - use_full_pom_name = strtobool(environ.get('USE_FULL_POM_NAME', 'False')) - - # Get the pulsar version from pom.xml - TOP_LEVEL_PATH = dirname(dirname(dirname(realpath(__file__)))) - POM_PATH = join(TOP_LEVEL_PATH, 'pom.xml') - root = ET.XML(open(POM_PATH).read()) - version = root.find('{http://maven.apache.org/POM/4.0.0}version').text.strip() - - if use_full_pom_name: - return version - else: - # Strip the '-incubating' suffix, since it prevents the packages - # from being uploaded into PyPI - return version.split('-')[0] - - -def get_name(): - postfix = environ.get('NAME_POSTFIX', '') - base = 'pulsar-client' - return base + postfix - - -VERSION = get_version() -NAME = get_name() - -print(VERSION) -print(NAME) - - -# This is a workaround to have setuptools to include -# the already compiled _pulsar.so library -class my_build_ext(build_ext.build_ext): - def build_extension(self, ext): - import shutil - import os.path - - try: - os.makedirs(os.path.dirname(self.get_ext_fullpath(ext.name))) - except OSError as e: - if e.errno != 17: # already exists - raise - shutil.copyfile('_pulsar.so', self.get_ext_fullpath(ext.name)) - - -# Core Client dependencies -dependencies = [ - 'certifi', -] - -extras_require = {} - -# functions dependencies -extras_require["functions"] = sorted( - { - "protobuf>=3.6.1,<=3.20.*", - "grpcio<1.28,>=1.8.2", - "apache-bookkeeper-client>=4.9.2", - "prometheus_client", - "ratelimit" - } -) - -# avro dependencies -extras_require["avro"] = sorted( - { - "fastavro==0.24.0" - } -) - -# all dependencies -extras_require["all"] = sorted(set(sum(extras_require.values(), []))) - -setup( - name=NAME, - version=VERSION, - packages=['pulsar', 'pulsar.schema', 'pulsar.functions'], - cmdclass={'build_ext': my_build_ext}, - ext_modules=[Extension('_pulsar', [])], - - author="Pulsar Devs", - author_email="dev@pulsar.apache.org", - description="Apache Pulsar Python client library", - license="Apache License v2.0", - url="https://pulsar.apache.org/", - install_requires=dependencies, - extras_require=extras_require, -) diff --git a/pulsar-client-cpp/python/src/authentication.cc b/pulsar-client-cpp/python/src/authentication.cc deleted file mode 100644 index 791749819ba46..0000000000000 --- a/pulsar-client-cpp/python/src/authentication.cc +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" - -AuthenticationWrapper::AuthenticationWrapper() {} - -AuthenticationWrapper::AuthenticationWrapper(const std::string& dynamicLibPath, - const std::string& authParamsString) { - this->auth = AuthFactory::create(dynamicLibPath, authParamsString); -} - -struct AuthenticationTlsWrapper : public AuthenticationWrapper { - AuthenticationTlsWrapper(const std::string& certificatePath, const std::string& privateKeyPath) - : AuthenticationWrapper() { - this->auth = AuthTls::create(certificatePath, privateKeyPath); - } -}; - -struct TokenSupplierWrapper { - PyObject* _pySupplier; - - TokenSupplierWrapper(py::object pySupplier) : _pySupplier(pySupplier.ptr()) { Py_XINCREF(_pySupplier); } - - TokenSupplierWrapper(const TokenSupplierWrapper& other) { - _pySupplier = other._pySupplier; - Py_XINCREF(_pySupplier); - } - - TokenSupplierWrapper& operator=(const TokenSupplierWrapper& other) { - _pySupplier = other._pySupplier; - Py_XINCREF(_pySupplier); - return *this; - } - - virtual ~TokenSupplierWrapper() { Py_XDECREF(_pySupplier); } - - std::string operator()() { - PyGILState_STATE state = PyGILState_Ensure(); - - std::string token; - try { - token = py::call(_pySupplier); - } catch (const py::error_already_set& e) { - PyErr_Print(); - } - - PyGILState_Release(state); - return token; - } -}; - -struct AuthenticationTokenWrapper : public AuthenticationWrapper { - AuthenticationTokenWrapper(py::object token) : AuthenticationWrapper() { - if (py::extract(token).check()) { - // It's a string - std::string tokenStr = py::extract(token); - this->auth = AuthToken::createWithToken(tokenStr); - } else { - // It's a function object - this->auth = AuthToken::create(TokenSupplierWrapper(token)); - } - } -}; - -struct AuthenticationAthenzWrapper : public AuthenticationWrapper { - AuthenticationAthenzWrapper(const std::string& authParamsString) : AuthenticationWrapper() { - this->auth = AuthAthenz::create(authParamsString); - } -}; - -struct AuthenticationOauth2Wrapper : public AuthenticationWrapper { - AuthenticationOauth2Wrapper(const std::string& authParamsString) : AuthenticationWrapper() { - this->auth = AuthOauth2::create(authParamsString); - } -}; - -struct AuthenticationBasicWrapper : public AuthenticationWrapper { - AuthenticationBasicWrapper(const std::string& username, const std::string& password) - : AuthenticationWrapper() { - this->auth = AuthBasic::create(username, password); - } -}; - -void export_authentication() { - using namespace boost::python; - - class_("Authentication", init()); - - class_ >( - "AuthenticationTLS", init()); - - class_ >("AuthenticationToken", - init()); - - class_ >("AuthenticationAthenz", - init()); - - class_ >("AuthenticationOauth2", - init()); - - class_ >( - "AuthenticationBasic", init()); -} diff --git a/pulsar-client-cpp/python/src/client.cc b/pulsar-client-cpp/python/src/client.cc deleted file mode 100644 index 701578de54169..0000000000000 --- a/pulsar-client-cpp/python/src/client.cc +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" - -Producer Client_createProducer(Client& client, const std::string& topic, const ProducerConfiguration& conf) { - Producer producer; - - waitForAsyncValue(std::function([&](CreateProducerCallback callback) { - client.createProducerAsync(topic, conf, callback); - }), - producer); - - return producer; -} - -Consumer Client_subscribe(Client& client, const std::string& topic, const std::string& subscriptionName, - const ConsumerConfiguration& conf) { - Consumer consumer; - - waitForAsyncValue(std::function([&](SubscribeCallback callback) { - client.subscribeAsync(topic, subscriptionName, conf, callback); - }), - consumer); - - return consumer; -} - -Consumer Client_subscribe_topics(Client& client, boost::python::list& topics, - const std::string& subscriptionName, const ConsumerConfiguration& conf) { - std::vector topics_vector; - for (int i = 0; i < len(topics); i++) { - std::string content = boost::python::extract(topics[i]); - topics_vector.push_back(content); - } - - Consumer consumer; - - waitForAsyncValue(std::function([&](SubscribeCallback callback) { - client.subscribeAsync(topics_vector, subscriptionName, conf, callback); - }), - consumer); - - return consumer; -} - -Consumer Client_subscribe_pattern(Client& client, const std::string& topic_pattern, - const std::string& subscriptionName, const ConsumerConfiguration& conf) { - Consumer consumer; - - waitForAsyncValue(std::function([&](SubscribeCallback callback) { - client.subscribeWithRegexAsync(topic_pattern, subscriptionName, conf, callback); - }), - consumer); - - return consumer; -} - -Reader Client_createReader(Client& client, const std::string& topic, const MessageId& startMessageId, - const ReaderConfiguration& conf) { - Reader reader; - - waitForAsyncValue(std::function([&](ReaderCallback callback) { - client.createReaderAsync(topic, startMessageId, conf, callback); - }), - reader); - - return reader; -} - -boost::python::list Client_getTopicPartitions(Client& client, const std::string& topic) { - std::vector partitions; - - waitForAsyncValue(std::function([&](GetPartitionsCallback callback) { - client.getPartitionsForTopicAsync(topic, callback); - }), - partitions); - - boost::python::list pyList; - for (int i = 0; i < partitions.size(); i++) { - pyList.append(boost::python::object(partitions[i])); - } - - return pyList; -} - -void Client_close(Client& client) { - waitForAsyncResult([&](ResultCallback callback) { client.closeAsync(callback); }); -} - -void export_client() { - using namespace boost::python; - - class_("Client", init()) - .def("create_producer", &Client_createProducer) - .def("subscribe", &Client_subscribe) - .def("subscribe_topics", &Client_subscribe_topics) - .def("subscribe_pattern", &Client_subscribe_pattern) - .def("create_reader", &Client_createReader) - .def("get_topic_partitions", &Client_getTopicPartitions) - .def("close", &Client_close) - .def("shutdown", &Client::shutdown); -} diff --git a/pulsar-client-cpp/python/src/config.cc b/pulsar-client-cpp/python/src/config.cc deleted file mode 100644 index fed9c283f6a4c..0000000000000 --- a/pulsar-client-cpp/python/src/config.cc +++ /dev/null @@ -1,300 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" -#include -#include "lib/Utils.h" -#include - -template -struct ListenerWrapper { - PyObject* _pyListener; - - ListenerWrapper(py::object pyListener) : _pyListener(pyListener.ptr()) { Py_XINCREF(_pyListener); } - - ListenerWrapper(const ListenerWrapper& other) { - _pyListener = other._pyListener; - Py_XINCREF(_pyListener); - } - - ListenerWrapper& operator=(const ListenerWrapper& other) { - _pyListener = other._pyListener; - Py_XINCREF(_pyListener); - return *this; - } - - virtual ~ListenerWrapper() { Py_XDECREF(_pyListener); } - - void operator()(T consumer, const Message& msg) { - PyGILState_STATE state = PyGILState_Ensure(); - - try { - py::call(_pyListener, py::object(&consumer), py::object(&msg)); - } catch (const py::error_already_set& e) { - PyErr_Print(); - } - - PyGILState_Release(state); - } -}; - -static ConsumerConfiguration& ConsumerConfiguration_setMessageListener(ConsumerConfiguration& conf, - py::object pyListener) { - conf.setMessageListener(ListenerWrapper(pyListener)); - return conf; -} - -static ReaderConfiguration& ReaderConfiguration_setReaderListener(ReaderConfiguration& conf, - py::object pyListener) { - conf.setReaderListener(ListenerWrapper(pyListener)); - return conf; -} - -static ClientConfiguration& ClientConfiguration_setAuthentication(ClientConfiguration& conf, - py::object authentication) { - AuthenticationWrapper wrapper = py::extract(authentication); - conf.setAuth(wrapper.auth); - return conf; -} - -static ConsumerConfiguration& ConsumerConfiguration_setCryptoKeyReader(ConsumerConfiguration& conf, - py::object cryptoKeyReader) { - CryptoKeyReaderWrapper cryptoKeyReaderWrapper = py::extract(cryptoKeyReader); - conf.setCryptoKeyReader(cryptoKeyReaderWrapper.cryptoKeyReader); - return conf; -} - -static ProducerConfiguration& ProducerConfiguration_setCryptoKeyReader(ProducerConfiguration& conf, - py::object cryptoKeyReader) { - CryptoKeyReaderWrapper cryptoKeyReaderWrapper = py::extract(cryptoKeyReader); - conf.setCryptoKeyReader(cryptoKeyReaderWrapper.cryptoKeyReader); - return conf; -} - -static ReaderConfiguration& ReaderConfiguration_setCryptoKeyReader(ReaderConfiguration& conf, - py::object cryptoKeyReader) { - CryptoKeyReaderWrapper cryptoKeyReaderWrapper = py::extract(cryptoKeyReader); - conf.setCryptoKeyReader(cryptoKeyReaderWrapper.cryptoKeyReader); - return conf; -} - -class LoggerWrapper : public Logger, public CaptivePythonObjectMixin { - const std::unique_ptr _fallbackLogger; - - public: - LoggerWrapper(PyObject* pyLogger, Logger* fallbackLogger) - : CaptivePythonObjectMixin(pyLogger), _fallbackLogger(fallbackLogger) {} - - LoggerWrapper(const LoggerWrapper&) = delete; - LoggerWrapper(LoggerWrapper&&) noexcept = delete; - LoggerWrapper& operator=(const LoggerWrapper&) = delete; - LoggerWrapper& operator=(LoggerWrapper&&) = delete; - - bool isEnabled(Level level) { - return true; // Python loggers are always enabled; they decide internally whether or not to log. - } - - void log(Level level, int line, const std::string& message) { - if (!Py_IsInitialized()) { - // Python logger is unavailable - fallback to console logger - _fallbackLogger->log(level, line, message); - } else { - PyGILState_STATE state = PyGILState_Ensure(); - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - try { - switch (level) { - case Logger::LEVEL_DEBUG: - py::call(_captive, "DEBUG", message.c_str()); - break; - case Logger::LEVEL_INFO: - py::call(_captive, "INFO", message.c_str()); - break; - case Logger::LEVEL_WARN: - py::call(_captive, "WARNING", message.c_str()); - break; - case Logger::LEVEL_ERROR: - py::call(_captive, "ERROR", message.c_str()); - break; - } - } catch (const py::error_already_set& e) { - PyErr_Print(); - _fallbackLogger->log(level, line, message); - } - PyErr_Restore(type, value, traceback); - PyGILState_Release(state); - } - } -}; - -class LoggerWrapperFactory : public LoggerFactory, public CaptivePythonObjectMixin { - std::unique_ptr _fallbackLoggerFactory{new ConsoleLoggerFactory}; - - public: - LoggerWrapperFactory(py::object pyLogger) : CaptivePythonObjectMixin(pyLogger.ptr()) {} - - Logger* getLogger(const std::string& fileName) { - const auto fallbackLogger = _fallbackLoggerFactory->getLogger(fileName); - if (_captive == py::object().ptr()) { - return fallbackLogger; - } else { - return new LoggerWrapper(_captive, fallbackLogger); - } - } -}; - -static ClientConfiguration& ClientConfiguration_setLogger(ClientConfiguration& conf, py::object logger) { - conf.setLogger(new LoggerWrapperFactory(logger)); - return conf; -} - -void export_config() { - using namespace boost::python; - - class_("ClientConfiguration") - .def("authentication", &ClientConfiguration_setAuthentication, return_self<>()) - .def("operation_timeout_seconds", &ClientConfiguration::getOperationTimeoutSeconds) - .def("operation_timeout_seconds", &ClientConfiguration::setOperationTimeoutSeconds, return_self<>()) - .def("connection_timeout", &ClientConfiguration::getConnectionTimeout) - .def("connection_timeout", &ClientConfiguration::setConnectionTimeout, return_self<>()) - .def("io_threads", &ClientConfiguration::getIOThreads) - .def("io_threads", &ClientConfiguration::setIOThreads, return_self<>()) - .def("message_listener_threads", &ClientConfiguration::getMessageListenerThreads) - .def("message_listener_threads", &ClientConfiguration::setMessageListenerThreads, return_self<>()) - .def("concurrent_lookup_requests", &ClientConfiguration::getConcurrentLookupRequest) - .def("concurrent_lookup_requests", &ClientConfiguration::setConcurrentLookupRequest, return_self<>()) - .def("log_conf_file_path", &ClientConfiguration::getLogConfFilePath, - return_value_policy()) - .def("log_conf_file_path", &ClientConfiguration::setLogConfFilePath, return_self<>()) - .def("use_tls", &ClientConfiguration::isUseTls) - .def("use_tls", &ClientConfiguration::setUseTls, return_self<>()) - .def("tls_trust_certs_file_path", &ClientConfiguration::getTlsTrustCertsFilePath, - return_value_policy()) - .def("tls_trust_certs_file_path", &ClientConfiguration::setTlsTrustCertsFilePath, return_self<>()) - .def("tls_allow_insecure_connection", &ClientConfiguration::isTlsAllowInsecureConnection) - .def("tls_allow_insecure_connection", &ClientConfiguration::setTlsAllowInsecureConnection, - return_self<>()) - .def("tls_validate_hostname", &ClientConfiguration::setValidateHostName, return_self<>()) - .def("listener_name", &ClientConfiguration::setListenerName, return_self<>()) - .def("set_logger", &ClientConfiguration_setLogger, return_self<>()); - - class_("ProducerConfiguration") - .def("producer_name", &ProducerConfiguration::getProducerName, - return_value_policy()) - .def("producer_name", &ProducerConfiguration::setProducerName, return_self<>()) - .def("schema", &ProducerConfiguration::getSchema, return_value_policy()) - .def("schema", &ProducerConfiguration::setSchema, return_self<>()) - .def("send_timeout_millis", &ProducerConfiguration::getSendTimeout) - .def("send_timeout_millis", &ProducerConfiguration::setSendTimeout, return_self<>()) - .def("initial_sequence_id", &ProducerConfiguration::getInitialSequenceId) - .def("initial_sequence_id", &ProducerConfiguration::setInitialSequenceId, return_self<>()) - .def("compression_type", &ProducerConfiguration::getCompressionType) - .def("compression_type", &ProducerConfiguration::setCompressionType, return_self<>()) - .def("max_pending_messages", &ProducerConfiguration::getMaxPendingMessages) - .def("max_pending_messages", &ProducerConfiguration::setMaxPendingMessages, return_self<>()) - .def("max_pending_messages_across_partitions", - &ProducerConfiguration::getMaxPendingMessagesAcrossPartitions) - .def("max_pending_messages_across_partitions", - &ProducerConfiguration::setMaxPendingMessagesAcrossPartitions, return_self<>()) - .def("block_if_queue_full", &ProducerConfiguration::getBlockIfQueueFull) - .def("block_if_queue_full", &ProducerConfiguration::setBlockIfQueueFull, return_self<>()) - .def("partitions_routing_mode", &ProducerConfiguration::getPartitionsRoutingMode) - .def("partitions_routing_mode", &ProducerConfiguration::setPartitionsRoutingMode, return_self<>()) - .def("lazy_start_partitioned_producers", &ProducerConfiguration::getLazyStartPartitionedProducers) - .def("lazy_start_partitioned_producers", &ProducerConfiguration::setLazyStartPartitionedProducers, - return_self<>()) - .def("batching_enabled", &ProducerConfiguration::getBatchingEnabled, - return_value_policy()) - .def("batching_enabled", &ProducerConfiguration::setBatchingEnabled, return_self<>()) - .def("batching_max_messages", &ProducerConfiguration::getBatchingMaxMessages, - return_value_policy()) - .def("batching_max_messages", &ProducerConfiguration::setBatchingMaxMessages, return_self<>()) - .def("batching_max_allowed_size_in_bytes", &ProducerConfiguration::getBatchingMaxAllowedSizeInBytes, - return_value_policy()) - .def("batching_max_allowed_size_in_bytes", &ProducerConfiguration::setBatchingMaxAllowedSizeInBytes, - return_self<>()) - .def("batching_max_publish_delay_ms", &ProducerConfiguration::getBatchingMaxPublishDelayMs, - return_value_policy()) - .def("batching_max_publish_delay_ms", &ProducerConfiguration::setBatchingMaxPublishDelayMs, - return_self<>()) - .def("chunking_enabled", &ProducerConfiguration::isChunkingEnabled) - .def("chunking_enabled", &ProducerConfiguration::setChunkingEnabled, return_self<>()) - .def("property", &ProducerConfiguration::setProperty, return_self<>()) - .def("batching_type", &ProducerConfiguration::setBatchingType, return_self<>()) - .def("batching_type", &ProducerConfiguration::getBatchingType) - .def("encryption_key", &ProducerConfiguration::addEncryptionKey, return_self<>()) - .def("crypto_key_reader", &ProducerConfiguration_setCryptoKeyReader, return_self<>()); - - class_("ConsumerConfiguration") - .def("consumer_type", &ConsumerConfiguration::getConsumerType) - .def("consumer_type", &ConsumerConfiguration::setConsumerType, return_self<>()) - .def("schema", &ConsumerConfiguration::getSchema, return_value_policy()) - .def("schema", &ConsumerConfiguration::setSchema, return_self<>()) - .def("message_listener", &ConsumerConfiguration_setMessageListener, return_self<>()) - .def("receiver_queue_size", &ConsumerConfiguration::getReceiverQueueSize) - .def("receiver_queue_size", &ConsumerConfiguration::setReceiverQueueSize) - .def("max_total_receiver_queue_size_across_partitions", - &ConsumerConfiguration::getMaxTotalReceiverQueueSizeAcrossPartitions) - .def("max_total_receiver_queue_size_across_partitions", - &ConsumerConfiguration::setMaxTotalReceiverQueueSizeAcrossPartitions) - .def("consumer_name", &ConsumerConfiguration::getConsumerName, - return_value_policy()) - .def("consumer_name", &ConsumerConfiguration::setConsumerName) - .def("unacked_messages_timeout_ms", &ConsumerConfiguration::getUnAckedMessagesTimeoutMs) - .def("unacked_messages_timeout_ms", &ConsumerConfiguration::setUnAckedMessagesTimeoutMs) - .def("negative_ack_redelivery_delay_ms", &ConsumerConfiguration::getNegativeAckRedeliveryDelayMs) - .def("negative_ack_redelivery_delay_ms", &ConsumerConfiguration::setNegativeAckRedeliveryDelayMs) - .def("broker_consumer_stats_cache_time_ms", - &ConsumerConfiguration::getBrokerConsumerStatsCacheTimeInMs) - .def("broker_consumer_stats_cache_time_ms", - &ConsumerConfiguration::setBrokerConsumerStatsCacheTimeInMs) - .def("pattern_auto_discovery_period", &ConsumerConfiguration::getPatternAutoDiscoveryPeriod) - .def("pattern_auto_discovery_period", &ConsumerConfiguration::setPatternAutoDiscoveryPeriod) - .def("read_compacted", &ConsumerConfiguration::isReadCompacted) - .def("read_compacted", &ConsumerConfiguration::setReadCompacted) - .def("property", &ConsumerConfiguration::setProperty, return_self<>()) - .def("subscription_initial_position", &ConsumerConfiguration::getSubscriptionInitialPosition) - .def("subscription_initial_position", &ConsumerConfiguration::setSubscriptionInitialPosition) - .def("crypto_key_reader", &ConsumerConfiguration_setCryptoKeyReader, return_self<>()) - .def("replicate_subscription_state_enabled", - &ConsumerConfiguration::setReplicateSubscriptionStateEnabled) - .def("replicate_subscription_state_enabled", - &ConsumerConfiguration::isReplicateSubscriptionStateEnabled) - .def("max_pending_chunked_message", &ConsumerConfiguration::getMaxPendingChunkedMessage) - .def("max_pending_chunked_message", &ConsumerConfiguration::setMaxPendingChunkedMessage, - return_self<>()) - .def("auto_ack_oldest_chunked_message_on_queue_full", - &ConsumerConfiguration::isAutoAckOldestChunkedMessageOnQueueFull) - .def("auto_ack_oldest_chunked_message_on_queue_full", - &ConsumerConfiguration::setAutoAckOldestChunkedMessageOnQueueFull, return_self<>()); - - class_("ReaderConfiguration") - .def("reader_listener", &ReaderConfiguration_setReaderListener, return_self<>()) - .def("schema", &ReaderConfiguration::getSchema, return_value_policy()) - .def("schema", &ReaderConfiguration::setSchema, return_self<>()) - .def("receiver_queue_size", &ReaderConfiguration::getReceiverQueueSize) - .def("receiver_queue_size", &ReaderConfiguration::setReceiverQueueSize) - .def("reader_name", &ReaderConfiguration::getReaderName, return_value_policy()) - .def("reader_name", &ReaderConfiguration::setReaderName) - .def("subscription_role_prefix", &ReaderConfiguration::getSubscriptionRolePrefix, - return_value_policy()) - .def("subscription_role_prefix", &ReaderConfiguration::setSubscriptionRolePrefix) - .def("read_compacted", &ReaderConfiguration::isReadCompacted) - .def("read_compacted", &ReaderConfiguration::setReadCompacted) - .def("crypto_key_reader", &ReaderConfiguration_setCryptoKeyReader, return_self<>()); -} diff --git a/pulsar-client-cpp/python/src/consumer.cc b/pulsar-client-cpp/python/src/consumer.cc deleted file mode 100644 index 811ceb3ddf553..0000000000000 --- a/pulsar-client-cpp/python/src/consumer.cc +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" - -void Consumer_unsubscribe(Consumer& consumer) { - waitForAsyncResult([&consumer](ResultCallback callback) { consumer.unsubscribeAsync(callback); }); -} - -Message Consumer_receive(Consumer& consumer) { - Message msg; - - waitForAsyncValue(std::function( - [&consumer](ReceiveCallback callback) { consumer.receiveAsync(callback); }), - msg); - - return msg; -} - -Message Consumer_receive_timeout(Consumer& consumer, int timeoutMs) { - Message msg; - Result res; - Py_BEGIN_ALLOW_THREADS res = consumer.receive(msg, timeoutMs); - Py_END_ALLOW_THREADS - - CHECK_RESULT(res); - return msg; -} - -void Consumer_acknowledge(Consumer& consumer, const Message& msg) { consumer.acknowledgeAsync(msg, nullptr); } - -void Consumer_acknowledge_message_id(Consumer& consumer, const MessageId& msgId) { - consumer.acknowledgeAsync(msgId, nullptr); -} - -void Consumer_negative_acknowledge(Consumer& consumer, const Message& msg) { - consumer.negativeAcknowledge(msg); -} - -void Consumer_negative_acknowledge_message_id(Consumer& consumer, const MessageId& msgId) { - consumer.negativeAcknowledge(msgId); -} - -void Consumer_acknowledge_cumulative(Consumer& consumer, const Message& msg) { - consumer.acknowledgeCumulativeAsync(msg, nullptr); -} - -void Consumer_acknowledge_cumulative_message_id(Consumer& consumer, const MessageId& msgId) { - consumer.acknowledgeCumulativeAsync(msgId, nullptr); -} - -void Consumer_close(Consumer& consumer) { - waitForAsyncResult([&consumer](ResultCallback callback) { consumer.closeAsync(callback); }); -} - -void Consumer_pauseMessageListener(Consumer& consumer) { CHECK_RESULT(consumer.pauseMessageListener()); } - -void Consumer_resumeMessageListener(Consumer& consumer) { CHECK_RESULT(consumer.resumeMessageListener()); } - -void Consumer_seek(Consumer& consumer, const MessageId& msgId) { - waitForAsyncResult([msgId, &consumer](ResultCallback callback) { consumer.seekAsync(msgId, callback); }); -} - -void Consumer_seek_timestamp(Consumer& consumer, uint64_t timestamp) { - waitForAsyncResult( - [timestamp, &consumer](ResultCallback callback) { consumer.seekAsync(timestamp, callback); }); -} - -bool Consumer_is_connected(Consumer& consumer) { return consumer.isConnected(); } - -MessageId Consumer_get_last_message_id(Consumer& consumer) { - MessageId msgId; - Result res; - Py_BEGIN_ALLOW_THREADS res = consumer.getLastMessageId(msgId); - Py_END_ALLOW_THREADS - - CHECK_RESULT(res); - return msgId; -} - -void export_consumer() { - using namespace boost::python; - - class_("Consumer", no_init) - .def("topic", &Consumer::getTopic, "return the topic this consumer is subscribed to", - return_value_policy()) - .def("subscription_name", &Consumer::getSubscriptionName, return_value_policy()) - .def("unsubscribe", &Consumer_unsubscribe) - .def("receive", &Consumer_receive) - .def("receive", &Consumer_receive_timeout) - .def("acknowledge", &Consumer_acknowledge) - .def("acknowledge", &Consumer_acknowledge_message_id) - .def("acknowledge_cumulative", &Consumer_acknowledge_cumulative) - .def("acknowledge_cumulative", &Consumer_acknowledge_cumulative_message_id) - .def("negative_acknowledge", &Consumer_negative_acknowledge) - .def("negative_acknowledge", &Consumer_negative_acknowledge_message_id) - .def("close", &Consumer_close) - .def("pause_message_listener", &Consumer_pauseMessageListener) - .def("resume_message_listener", &Consumer_resumeMessageListener) - .def("redeliver_unacknowledged_messages", &Consumer::redeliverUnacknowledgedMessages) - .def("seek", &Consumer_seek) - .def("seek", &Consumer_seek_timestamp) - .def("is_connected", &Consumer_is_connected) - .def("get_last_message_id", &Consumer_get_last_message_id); -} diff --git a/pulsar-client-cpp/python/src/cryptoKeyReader.cc b/pulsar-client-cpp/python/src/cryptoKeyReader.cc deleted file mode 100644 index 2c46b6fb5af19..0000000000000 --- a/pulsar-client-cpp/python/src/cryptoKeyReader.cc +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" - -CryptoKeyReaderWrapper::CryptoKeyReaderWrapper() {} - -CryptoKeyReaderWrapper::CryptoKeyReaderWrapper(const std::string& publicKeyPath, - const std::string& privateKeyPath) { - this->cryptoKeyReader = DefaultCryptoKeyReader::create(publicKeyPath, privateKeyPath); -} - -void export_cryptoKeyReader() { - using namespace boost::python; - - class_("CryptoKeyReader", init()); -} \ No newline at end of file diff --git a/pulsar-client-cpp/python/src/enums.cc b/pulsar-client-cpp/python/src/enums.cc deleted file mode 100644 index 92f08a1684737..0000000000000 --- a/pulsar-client-cpp/python/src/enums.cc +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" - -void export_enums() { - using namespace boost::python; - - enum_("PartitionsRoutingMode") - .value("UseSinglePartition", ProducerConfiguration::UseSinglePartition) - .value("RoundRobinDistribution", ProducerConfiguration::RoundRobinDistribution) - .value("CustomPartition", ProducerConfiguration::CustomPartition); - - enum_("CompressionType") - .value("NONE", CompressionNone) // Don't use 'None' since it's a keyword in py3 - .value("LZ4", CompressionLZ4) - .value("ZLib", CompressionZLib) - .value("ZSTD", CompressionZSTD) - .value("SNAPPY", CompressionSNAPPY); - - enum_("ConsumerType") - .value("Exclusive", ConsumerExclusive) - .value("Shared", ConsumerShared) - .value("Failover", ConsumerFailover) - .value("KeyShared", ConsumerKeyShared); - - enum_("Result", "Collection of return codes") - .value("Ok", ResultOk) - .value("UnknownError", ResultUnknownError) - .value("InvalidConfiguration", ResultInvalidConfiguration) - .value("Timeout", ResultTimeout) - .value("LookupError", ResultLookupError) - .value("ConnectError", ResultConnectError) - .value("ReadError", ResultReadError) - .value("AuthenticationError", ResultAuthenticationError) - .value("AuthorizationError", ResultAuthorizationError) - .value("ErrorGettingAuthenticationData", ResultErrorGettingAuthenticationData) - .value("BrokerMetadataError", ResultBrokerMetadataError) - .value("BrokerPersistenceError", ResultBrokerPersistenceError) - .value("ChecksumError", ResultChecksumError) - .value("ConsumerBusy", ResultConsumerBusy) - .value("NotConnected", ResultNotConnected) - .value("AlreadyClosed", ResultAlreadyClosed) - .value("InvalidMessage", ResultInvalidMessage) - .value("ConsumerNotInitialized", ResultConsumerNotInitialized) - .value("ProducerNotInitialized", ResultProducerNotInitialized) - .value("ProducerBusy", ResultProducerBusy) - .value("TooManyLookupRequestException", ResultTooManyLookupRequestException) - .value("InvalidTopicName", ResultInvalidTopicName) - .value("InvalidUrl", ResultInvalidUrl) - .value("ServiceUnitNotReady", ResultServiceUnitNotReady) - .value("OperationNotSupported", ResultOperationNotSupported) - .value("ProducerBlockedQuotaExceededError", ResultProducerBlockedQuotaExceededError) - .value("ProducerBlockedQuotaExceededException", ResultProducerBlockedQuotaExceededException) - .value("ProducerQueueIsFull", ResultProducerQueueIsFull) - .value("MessageTooBig", ResultMessageTooBig) - .value("TopicNotFound", ResultTopicNotFound) - .value("SubscriptionNotFound", ResultSubscriptionNotFound) - .value("ConsumerNotFound", ResultConsumerNotFound) - .value("UnsupportedVersionError", ResultUnsupportedVersionError) - .value("TopicTerminated", ResultTopicTerminated) - .value("CryptoError", ResultCryptoError) - .value("IncompatibleSchema", ResultIncompatibleSchema) - .value("ConsumerAssignError", ResultConsumerAssignError) - .value("CumulativeAcknowledgementNotAllowedError", ResultCumulativeAcknowledgementNotAllowedError) - .value("TransactionCoordinatorNotFoundError", ResultTransactionCoordinatorNotFoundError) - .value("InvalidTxnStatusError", ResultInvalidTxnStatusError) - .value("NotAllowedError", ResultNotAllowedError) - .value("TransactionConflict", ResultTransactionConflict) - .value("TransactionNotFound", ResultTransactionNotFound) - .value("ProducerFenced", ResultProducerFenced) - .value("MemoryBufferIsFull", ResultMemoryBufferIsFull) - .value("Interrupted", pulsar::ResultInterrupted); - - enum_("SchemaType", "Supported schema types") - .value("NONE", pulsar::NONE) - .value("STRING", pulsar::STRING) - .value("INT8", pulsar::INT8) - .value("INT16", pulsar::INT16) - .value("INT32", pulsar::INT32) - .value("INT64", pulsar::INT64) - .value("FLOAT", pulsar::FLOAT) - .value("DOUBLE", pulsar::DOUBLE) - .value("BYTES", pulsar::BYTES) - .value("JSON", pulsar::JSON) - .value("PROTOBUF", pulsar::PROTOBUF) - .value("AVRO", pulsar::AVRO) - .value("AUTO_CONSUME", pulsar::AUTO_CONSUME) - .value("AUTO_PUBLISH", pulsar::AUTO_PUBLISH) - .value("KEY_VALUE", pulsar::KEY_VALUE); - - enum_("InitialPosition", "Supported initial position") - .value("Latest", InitialPositionLatest) - .value("Earliest", InitialPositionEarliest); - - enum_("BatchingType", "Supported batching types") - .value("Default", ProducerConfiguration::DefaultBatching) - .value("KeyBased", ProducerConfiguration::KeyBasedBatching); -} diff --git a/pulsar-client-cpp/python/src/exceptions.cc b/pulsar-client-cpp/python/src/exceptions.cc deleted file mode 100644 index efca661b7c06e..0000000000000 --- a/pulsar-client-cpp/python/src/exceptions.cc +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include "utils.h" - -static PyObject* basePulsarException = nullptr; -std::map exceptions; - -PyObject* createExceptionClass(const char* name, PyObject* baseTypeObj = PyExc_Exception) { - using namespace boost::python; - - std::string fullName = "_pulsar."; - fullName += name; - - PyObject* typeObj = PyErr_NewException(const_cast(fullName.c_str()), baseTypeObj, nullptr); - if (!typeObj) throw_error_already_set(); - scope().attr(name) = handle<>(borrowed(typeObj)); - return typeObj; -} - -PyObject* get_exception_class(Result result) { - auto it = exceptions.find(result); - if (it != exceptions.end()) { - return it->second; - } else { - std::cerr << "Error result exception not found: " << result << std::endl; - abort(); - } -} - -void export_exceptions() { - using namespace boost::python; - - basePulsarException = createExceptionClass("PulsarException"); - - exceptions[ResultUnknownError] = createExceptionClass("UnknownError", basePulsarException); - exceptions[ResultInvalidConfiguration] = - createExceptionClass("InvalidConfiguration", basePulsarException); - exceptions[ResultTimeout] = createExceptionClass("Timeout", basePulsarException); - exceptions[ResultLookupError] = createExceptionClass("LookupError", basePulsarException); - exceptions[ResultConnectError] = createExceptionClass("ConnectError", basePulsarException); - exceptions[ResultReadError] = createExceptionClass("ReadError", basePulsarException); - exceptions[ResultAuthenticationError] = createExceptionClass("AuthenticationError", basePulsarException); - exceptions[ResultAuthorizationError] = createExceptionClass("AuthorizationError", basePulsarException); - exceptions[ResultErrorGettingAuthenticationData] = - createExceptionClass("ErrorGettingAuthenticationData", basePulsarException); - exceptions[ResultBrokerMetadataError] = createExceptionClass("BrokerMetadataError", basePulsarException); - exceptions[ResultBrokerPersistenceError] = - createExceptionClass("BrokerPersistenceError", basePulsarException); - exceptions[ResultChecksumError] = createExceptionClass("ChecksumError", basePulsarException); - exceptions[ResultConsumerBusy] = createExceptionClass("ConsumerBusy", basePulsarException); - exceptions[ResultNotConnected] = createExceptionClass("NotConnected", basePulsarException); - exceptions[ResultAlreadyClosed] = createExceptionClass("AlreadyClosed", basePulsarException); - exceptions[ResultInvalidMessage] = createExceptionClass("InvalidMessage", basePulsarException); - exceptions[ResultConsumerNotInitialized] = - createExceptionClass("ConsumerNotInitialized", basePulsarException); - exceptions[ResultProducerNotInitialized] = - createExceptionClass("ProducerNotInitialized", basePulsarException); - exceptions[ResultProducerBusy] = createExceptionClass("ProducerBusy", basePulsarException); - exceptions[ResultTooManyLookupRequestException] = - createExceptionClass("TooManyLookupRequestException", basePulsarException); - exceptions[ResultInvalidTopicName] = createExceptionClass("InvalidTopicName", basePulsarException); - exceptions[ResultInvalidUrl] = createExceptionClass("InvalidUrl", basePulsarException); - exceptions[ResultServiceUnitNotReady] = createExceptionClass("ServiceUnitNotReady", basePulsarException); - exceptions[ResultOperationNotSupported] = - createExceptionClass("OperationNotSupported", basePulsarException); - exceptions[ResultProducerBlockedQuotaExceededError] = - createExceptionClass("ProducerBlockedQuotaExceededError", basePulsarException); - exceptions[ResultProducerBlockedQuotaExceededException] = - createExceptionClass("ProducerBlockedQuotaExceededException", basePulsarException); - exceptions[ResultProducerQueueIsFull] = createExceptionClass("ProducerQueueIsFull", basePulsarException); - exceptions[ResultMessageTooBig] = createExceptionClass("MessageTooBig", basePulsarException); - exceptions[ResultTopicNotFound] = createExceptionClass("TopicNotFound", basePulsarException); - exceptions[ResultSubscriptionNotFound] = - createExceptionClass("SubscriptionNotFound", basePulsarException); - exceptions[ResultConsumerNotFound] = createExceptionClass("ConsumerNotFound", basePulsarException); - exceptions[ResultUnsupportedVersionError] = - createExceptionClass("UnsupportedVersionError", basePulsarException); - exceptions[ResultTopicTerminated] = createExceptionClass("TopicTerminated", basePulsarException); - exceptions[ResultCryptoError] = createExceptionClass("CryptoError", basePulsarException); - exceptions[ResultIncompatibleSchema] = createExceptionClass("IncompatibleSchema", basePulsarException); - exceptions[ResultConsumerAssignError] = createExceptionClass("ConsumerAssignError", basePulsarException); - exceptions[ResultCumulativeAcknowledgementNotAllowedError] = - createExceptionClass("CumulativeAcknowledgementNotAllowedError", basePulsarException); - exceptions[ResultTransactionCoordinatorNotFoundError] = - createExceptionClass("TransactionCoordinatorNotFoundError", basePulsarException); - exceptions[ResultInvalidTxnStatusError] = - createExceptionClass("InvalidTxnStatusError", basePulsarException); - exceptions[ResultNotAllowedError] = createExceptionClass("NotAllowedError", basePulsarException); - exceptions[ResultTransactionConflict] = createExceptionClass("TransactionConflict", basePulsarException); - exceptions[ResultTransactionNotFound] = createExceptionClass("TransactionNotFound", basePulsarException); - exceptions[ResultProducerFenced] = createExceptionClass("ProducerFenced", basePulsarException); - exceptions[ResultMemoryBufferIsFull] = createExceptionClass("MemoryBufferIsFull", basePulsarException); - exceptions[ResultInterrupted] = createExceptionClass("Interrupted", basePulsarException); -} diff --git a/pulsar-client-cpp/python/src/message.cc b/pulsar-client-cpp/python/src/message.cc deleted file mode 100644 index b93380bc7afb9..0000000000000 --- a/pulsar-client-cpp/python/src/message.cc +++ /dev/null @@ -1,171 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" - -#include -#include -#include - -std::string MessageId_str(const MessageId& msgId) { - std::stringstream ss; - ss << msgId; - return ss.str(); -} - -bool MessageId_eq(const MessageId& a, const MessageId& b) { return a == b; } - -bool MessageId_ne(const MessageId& a, const MessageId& b) { return a != b; } - -bool MessageId_lt(const MessageId& a, const MessageId& b) { return a < b; } - -bool MessageId_le(const MessageId& a, const MessageId& b) { return a <= b; } - -bool MessageId_gt(const MessageId& a, const MessageId& b) { return a > b; } - -bool MessageId_ge(const MessageId& a, const MessageId& b) { return a >= b; } - -boost::python::object MessageId_serialize(const MessageId& msgId) { - std::string serialized; - msgId.serialize(serialized); - return boost::python::object( - boost::python::handle<>(PyBytes_FromStringAndSize(serialized.c_str(), serialized.length()))); -} - -std::string Message_str(const Message& msg) { - std::stringstream ss; - ss << msg; - return ss.str(); -} - -boost::python::object Message_data(const Message& msg) { - return boost::python::object( - boost::python::handle<>(PyBytes_FromStringAndSize((const char*)msg.getData(), msg.getLength()))); -} - -boost::python::object Message_properties(const Message& msg) { - boost::python::dict pyProperties; - for (const auto& item : msg.getProperties()) { - pyProperties[item.first] = item.second; - } - return boost::python::object(std::move(pyProperties)); -} - -std::string Topic_name_str(const Message& msg) { - std::stringstream ss; - ss << msg.getTopicName(); - return ss.str(); -} - -std::string schema_version_str(const Message& msg) { - std::stringstream ss; - ss << msg.getSchemaVersion(); - return ss.str(); -} - -const MessageId& Message_getMessageId(const Message& msg) { return msg.getMessageId(); } - -void deliverAfter(MessageBuilder* const builder, PyObject* obj_delta) { - PyDateTime_Delta const* pydelta = reinterpret_cast(obj_delta); - - long days = pydelta->days; - const bool is_negative = days < 0; - if (is_negative) { - days = -days; - } - - // Create chrono duration object - std::chrono::milliseconds duration = std::chrono::duration_cast( - std::chrono::hours(24) * days + std::chrono::seconds(pydelta->seconds) + - std::chrono::microseconds(pydelta->microseconds)); - - if (is_negative) { - duration = duration * -1; - } - - builder->setDeliverAfter(duration); -} - -void export_message() { - using namespace boost::python; - - PyDateTime_IMPORT; - - MessageBuilder& (MessageBuilder::*MessageBuilderSetContentString)(const std::string&) = - &MessageBuilder::setContent; - - class_("MessageBuilder") - .def("content", MessageBuilderSetContentString, return_self<>()) - .def("property", &MessageBuilder::setProperty, return_self<>()) - .def("properties", &MessageBuilder::setProperties, return_self<>()) - .def("sequence_id", &MessageBuilder::setSequenceId, return_self<>()) - .def("deliver_after", &deliverAfter, return_self<>()) - .def("deliver_at", &MessageBuilder::setDeliverAt, return_self<>()) - .def("partition_key", &MessageBuilder::setPartitionKey, return_self<>()) - .def("event_timestamp", &MessageBuilder::setEventTimestamp, return_self<>()) - .def("replication_clusters", &MessageBuilder::setReplicationClusters, return_self<>()) - .def("disable_replication", &MessageBuilder::disableReplication, return_self<>()) - .def("build", &MessageBuilder::build); - - class_("MessageStringMap").def(map_indexing_suite()); - - static const MessageId& _MessageId_earliest = MessageId::earliest(); - static const MessageId& _MessageId_latest = MessageId::latest(); - - class_("MessageId") - .def(init()) - .def("__str__", &MessageId_str) - .def("__eq__", &MessageId_eq) - .def("__ne__", &MessageId_ne) - .def("__le__", &MessageId_le) - .def("__lt__", &MessageId_lt) - .def("__ge__", &MessageId_ge) - .def("__gt__", &MessageId_gt) - .def("ledger_id", &MessageId::ledgerId) - .def("entry_id", &MessageId::entryId) - .def("batch_index", &MessageId::batchIndex) - .def("partition", &MessageId::partition) - .add_static_property("earliest", make_getter(&_MessageId_earliest)) - .add_static_property("latest", make_getter(&_MessageId_latest)) - .def("serialize", &MessageId_serialize) - .def("deserialize", &MessageId::deserialize) - .staticmethod("deserialize"); - - class_("Message") - .def("properties", &Message_properties) - .def("data", &Message_data) - .def("length", &Message::getLength) - .def("partition_key", &Message::getPartitionKey, return_value_policy()) - .def("publish_timestamp", &Message::getPublishTimestamp) - .def("event_timestamp", &Message::getEventTimestamp) - .def("message_id", &Message_getMessageId, return_value_policy()) - .def("__str__", &Message_str) - .def("topic_name", &Topic_name_str) - .def("redelivery_count", &Message::getRedeliveryCount) - .def("schema_version", &schema_version_str); - - MessageBatch& (MessageBatch::*MessageBatchParseFromString)(const std::string& payload, - uint32_t batchSize) = &MessageBatch::parseFrom; - - class_("MessageBatch") - .def("with_message_id", &MessageBatch::withMessageId, return_self<>()) - .def("parse_from", MessageBatchParseFromString, return_self<>()) - .def("messages", &MessageBatch::messages, return_value_policy()); - - class_ >("Messages").def(vector_indexing_suite >()); -} diff --git a/pulsar-client-cpp/python/src/producer.cc b/pulsar-client-cpp/python/src/producer.cc deleted file mode 100644 index d1a11cf7863d3..0000000000000 --- a/pulsar-client-cpp/python/src/producer.cc +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" - -#include - -extern boost::python::object MessageId_serialize(const MessageId& msgId); - -boost::python::object Producer_send(Producer& producer, const Message& message) { - MessageId messageId; - - waitForAsyncValue(std::function( - [&](SendCallback callback) { producer.sendAsync(message, callback); }), - messageId); - - return MessageId_serialize(messageId); -} - -void Producer_sendAsyncCallback(PyObject* callback, Result res, const MessageId& msgId) { - if (callback == Py_None) { - return; - } - - PyGILState_STATE state = PyGILState_Ensure(); - - try { - py::call(callback, res, py::object(&msgId)); - } catch (const py::error_already_set& e) { - PyErr_Print(); - } - - Py_XDECREF(callback); - PyGILState_Release(state); -} - -void Producer_sendAsync(Producer& producer, const Message& message, py::object callback) { - PyObject* pyCallback = callback.ptr(); - Py_XINCREF(pyCallback); - - Py_BEGIN_ALLOW_THREADS producer.sendAsync( - message, - std::bind(Producer_sendAsyncCallback, pyCallback, std::placeholders::_1, std::placeholders::_2)); - Py_END_ALLOW_THREADS -} - -void Producer_flush(Producer& producer) { - waitForAsyncResult([&](ResultCallback callback) { producer.flushAsync(callback); }); -} - -void Producer_close(Producer& producer) { - waitForAsyncResult([&](ResultCallback callback) { producer.closeAsync(callback); }); -} - -bool Producer_is_connected(Producer& producer) { return producer.isConnected(); } - -void export_producer() { - using namespace boost::python; - - class_("Producer", no_init) - .def("topic", &Producer::getTopic, "return the topic to which producer is publishing to", - return_value_policy()) - .def("producer_name", &Producer::getProducerName, - "return the producer name which could have been assigned by the system or specified by the " - "client", - return_value_policy()) - .def("last_sequence_id", &Producer::getLastSequenceId) - .def("send", &Producer_send, - "Publish a message on the topic associated with this Producer.\n" - "\n" - "This method will block until the message will be accepted and persisted\n" - "by the broker. In case of errors, the client library will try to\n" - "automatically recover and use a different broker.\n" - "\n" - "If it wasn't possible to successfully publish the message within the sendTimeout,\n" - "an error will be returned.\n" - "\n" - "This method is equivalent to asyncSend() and wait until the callback is triggered.\n" - "\n" - "@param msg message to publish\n") - .def("send_async", &Producer_sendAsync) - .def("flush", &Producer_flush, - "Flush all the messages buffered in the client and wait until all messages have been\n" - "successfully persisted\n") - .def("close", &Producer_close) - .def("is_connected", &Producer_is_connected); -} diff --git a/pulsar-client-cpp/python/src/pulsar.cc b/pulsar-client-cpp/python/src/pulsar.cc deleted file mode 100644 index a82a53381b953..0000000000000 --- a/pulsar-client-cpp/python/src/pulsar.cc +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" - -void export_client(); -void export_message(); -void export_producer(); -void export_consumer(); -void export_reader(); -void export_config(); -void export_enums(); -void export_authentication(); -void export_schema(); -void export_cryptoKeyReader(); -void export_exceptions(); - -PyObject* get_exception_class(Result result); - -static void translateException(const PulsarException& ex) { - std::string err = "Pulsar error: "; - err += strResult(ex._result); - PyErr_SetString(get_exception_class(ex._result), err.c_str()); -} - -BOOST_PYTHON_MODULE(_pulsar) { - py::register_exception_translator(translateException); - - // Initialize thread support so that we can grab the GIL mutex - // from pulsar library threads - PyEval_InitThreads(); - - export_client(); - export_message(); - export_producer(); - export_consumer(); - export_reader(); - export_config(); - export_enums(); - export_authentication(); - export_schema(); - export_cryptoKeyReader(); - export_exceptions(); -} diff --git a/pulsar-client-cpp/python/src/reader.cc b/pulsar-client-cpp/python/src/reader.cc deleted file mode 100644 index 70873c8d26af2..0000000000000 --- a/pulsar-client-cpp/python/src/reader.cc +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" - -Message Reader_readNext(Reader& reader) { - Message msg; - Result res; - - // TODO: There is currently no readNextAsync() version for the Reader. - // Once that's available, we should also convert these ad-hoc loops. - while (true) { - Py_BEGIN_ALLOW_THREADS - // Use 100ms timeout to periodically check whether the - // interpreter was interrupted - res = reader.readNext(msg, 100); - Py_END_ALLOW_THREADS - - if (res != ResultTimeout) { - // In case of timeout we keep calling receive() to simulate a - // blocking call until a message is available, while breaking - // every once in a while to check the Python signal status - break; - } - - if (PyErr_CheckSignals() == -1) { - PyErr_SetInterrupt(); - return msg; - } - } - - CHECK_RESULT(res); - return msg; -} - -Message Reader_readNextTimeout(Reader& reader, int timeoutMs) { - Message msg; - Result res; - Py_BEGIN_ALLOW_THREADS res = reader.readNext(msg, timeoutMs); - Py_END_ALLOW_THREADS - - CHECK_RESULT(res); - return msg; -} - -bool Reader_hasMessageAvailable(Reader& reader) { - bool available = false; - - waitForAsyncValue( - std::function( - [&](HasMessageAvailableCallback callback) { reader.hasMessageAvailableAsync(callback); }), - available); - - return available; -} - -void Reader_close(Reader& reader) { - waitForAsyncResult([&](ResultCallback callback) { reader.closeAsync(callback); }); -} - -void Reader_seek(Reader& reader, const MessageId& msgId) { - waitForAsyncResult([&](ResultCallback callback) { reader.seekAsync(msgId, callback); }); -} - -void Reader_seek_timestamp(Reader& reader, uint64_t timestamp) { - waitForAsyncResult([&](ResultCallback callback) { reader.seekAsync(timestamp, callback); }); -} - -bool Reader_is_connected(Reader& reader) { return reader.isConnected(); } - -void export_reader() { - using namespace boost::python; - - class_("Reader", no_init) - .def("topic", &Reader::getTopic, return_value_policy()) - .def("read_next", &Reader_readNext) - .def("read_next", &Reader_readNextTimeout) - .def("has_message_available", &Reader_hasMessageAvailable) - .def("close", &Reader_close) - .def("seek", &Reader_seek) - .def("seek", &Reader_seek_timestamp) - .def("is_connected", &Reader_is_connected); -} diff --git a/pulsar-client-cpp/python/src/schema.cc b/pulsar-client-cpp/python/src/schema.cc deleted file mode 100644 index cdfcda6aff14b..0000000000000 --- a/pulsar-client-cpp/python/src/schema.cc +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "utils.h" - -void export_schema() { - using namespace boost::python; - - class_("SchemaInfo", init()) - .def("schema_type", &SchemaInfo::getSchemaType) - .def("name", &SchemaInfo::getName, return_value_policy()) - .def("schema", &SchemaInfo::getSchema, return_value_policy()); -} diff --git a/pulsar-client-cpp/python/src/utils.cc b/pulsar-client-cpp/python/src/utils.cc deleted file mode 100644 index cf8f6f4b47645..0000000000000 --- a/pulsar-client-cpp/python/src/utils.cc +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include "utils.h" - -void waitForAsyncResult(std::function func) { - Result res = ResultOk; - bool b; - Promise promise; - Future future = promise.getFuture(); - - Py_BEGIN_ALLOW_THREADS func(WaitForCallback(promise)); - Py_END_ALLOW_THREADS - - bool isComplete; - while (true) { - // Check periodically for Python signals - Py_BEGIN_ALLOW_THREADS isComplete = future.get(b, std::ref(res), std::chrono::milliseconds(100)); - Py_END_ALLOW_THREADS - - if (isComplete) { - CHECK_RESULT(res); - return; - } - - if (PyErr_CheckSignals() == -1) { - PyErr_SetInterrupt(); - return; - } - } -} diff --git a/pulsar-client-cpp/python/src/utils.h b/pulsar-client-cpp/python/src/utils.h deleted file mode 100644 index 4b69ff82e6c45..0000000000000 --- a/pulsar-client-cpp/python/src/utils.h +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#pragma once - -#include - -#include -#include -#include - -using namespace pulsar; - -namespace py = boost::python; - -struct PulsarException { - Result _result; - PulsarException(Result res) : _result(res) {} -}; - -inline void CHECK_RESULT(Result res) { - if (res != ResultOk) { - throw PulsarException(res); - } -} - -void waitForAsyncResult(std::function func); - -template -inline void waitForAsyncValue(std::function func, T& value) { - Result res = ResultOk; - Promise promise; - Future future = promise.getFuture(); - - Py_BEGIN_ALLOW_THREADS func(WaitForCallbackValue(promise)); - Py_END_ALLOW_THREADS - - bool isComplete; - while (true) { - // Check periodically for Python signals - Py_BEGIN_ALLOW_THREADS isComplete = future.get(res, std::ref(value), std::chrono::milliseconds(100)); - Py_END_ALLOW_THREADS - - if (isComplete) { - CHECK_RESULT(res); - return; - } - - if (PyErr_CheckSignals() == -1) { - PyErr_SetInterrupt(); - return; - } - } -} - -struct AuthenticationWrapper { - AuthenticationPtr auth; - - AuthenticationWrapper(); - AuthenticationWrapper(const std::string& dynamicLibPath, const std::string& authParamsString); -}; - -struct CryptoKeyReaderWrapper { - CryptoKeyReaderPtr cryptoKeyReader; - - CryptoKeyReaderWrapper(); - CryptoKeyReaderWrapper(const std::string& publicKeyPath, const std::string& privateKeyPath); -}; - -class CaptivePythonObjectMixin { - protected: - PyObject* _captive; - - CaptivePythonObjectMixin(PyObject* captive) { - _captive = captive; - PyGILState_STATE state = PyGILState_Ensure(); - Py_XINCREF(_captive); - PyGILState_Release(state); - } - - ~CaptivePythonObjectMixin() { - if (Py_IsInitialized()) { - PyGILState_STATE state = PyGILState_Ensure(); - Py_XDECREF(_captive); - PyGILState_Release(state); - } - } -}; diff --git a/pulsar-client-cpp/python/test_consumer.py b/pulsar-client-cpp/python/test_consumer.py deleted file mode 100755 index 8c2985e6ca227..0000000000000 --- a/pulsar-client-cpp/python/test_consumer.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python3 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - - -import pulsar - -client = pulsar.Client('pulsar://localhost:6650') -consumer = client.subscribe('my-topic', "my-subscription", - properties={ - "consumer-name": "test-consumer-name", - "consumer-id": "test-consumer-id" - }) - -while True: - msg = consumer.receive() - print("Received message '{0}' id='{1}'".format(msg.data().decode('utf-8'), msg.message_id())) - consumer.acknowledge(msg) - -client.close() diff --git a/pulsar-client-cpp/python/test_producer.py b/pulsar-client-cpp/python/test_producer.py deleted file mode 100755 index c9c8ca1f83ddf..0000000000000 --- a/pulsar-client-cpp/python/test_producer.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python3 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from pulsar import BatchingType -import pulsar - - -client = pulsar.Client('pulsar://localhost:6650') - -producer = client.create_producer( - 'my-topic', - block_if_queue_full=True, - batching_enabled=True, - batching_max_publish_delay_ms=10, - properties={ - "producer-name": "test-producer-name", - "producer-id": "test-producer-id" - }, - batching_type=BatchingType.KeyBased - ) - -for i in range(10): - try: - producer.send('hello'.encode('utf-8'), None) - except Exception as e: - print("Failed to send message: %s", e) - -producer.flush() -producer.close() diff --git a/pulsar-client-cpp/run-unit-tests.sh b/pulsar-client-cpp/run-unit-tests.sh deleted file mode 100755 index d4e7ec7284d67..0000000000000 --- a/pulsar-client-cpp/run-unit-tests.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -e -git config --global --add safe.directory /pulsar - -ROOT_DIR=$(git rev-parse --show-toplevel) -cd $ROOT_DIR/pulsar-client-cpp - -JAVA_HOME=/usr ./pulsar-test-service-start.sh - - -pushd tests - -export RETRY_FAILED="${RETRY_FAILED:-1}" - -if [ -f /gtest-parallel/gtest-parallel ]; then - gtest_workers=10 - # use nproc to set workers to 2 x the number of available cores if nproc is available - if [ -x "$(command -v nproc)" ]; then - gtest_workers=$(( $(nproc) * 2 )) - fi - # set maximum workers to 10 - gtest_workers=$(( gtest_workers > 10 ? 10 : gtest_workers )) - echo "---- Run unit tests in parallel (workers=$gtest_workers) (retry_failed=${RETRY_FAILED})" - tests="" - if [ $# -eq 1 ]; then - tests="--gtest_filter=$1" - echo "Running tests: $1" - fi - python3 /gtest-parallel/gtest-parallel $tests --dump_json_test_results=/tmp/gtest_parallel_results.json \ - --workers=$gtest_workers --retry_failed=$RETRY_FAILED -d /tmp \ - ./main --gtest_filter='-CustomLoggerTest*' - # The customized logger might affect other tests - ./main --gtest_filter='CustomLoggerTest*' - RES=$? -else - ./main - RES=$? -fi - -popd - -if [ $RES -eq 0 ]; then - pushd python - echo "---- Build Python Wheel file" - python3 setup.py bdist_wheel - - echo "---- Installing Python Wheel file" - ls -lha dist - WHEEL_FILE=$(ls dist/ | grep whl) - echo "${WHEEL_FILE}" - echo "dist/${WHEEL_FILE}[all]" - pip3 install dist/${WHEEL_FILE}[all] - - echo "---- Running Python unit tests" - - # Running tests from a different directory to avoid importing directly - # from the current dir, but rather using the installed wheel file - cp *_test.py /tmp - pushd /tmp - - python3 custom_logger_test.py - RES=$? - echo "custom_logger_test.py: $RES" - - python3 pulsar_test.py - RES=$? - echo "pulsar_test.py: $RES" - - echo "---- Running Python Function Instance unit tests" - bash $ROOT_DIR/pulsar-functions/instance/src/scripts/run_python_instance_tests.sh - RES=$? - echo "run_python_instance_tests.sh: $RES" - - popd - popd -fi - -./pulsar-test-service-stop.sh - -exit $RES diff --git a/pulsar-client-cpp/templates/Version.h.in b/pulsar-client-cpp/templates/Version.h.in deleted file mode 100644 index d52121ac8c19e..0000000000000 --- a/pulsar-client-cpp/templates/Version.h.in +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * @PVM_COMMENT@ - */ -#ifndef PULSAR_VERSION_H_ -#define PULSAR_VERSION_H_ - -#define PULSAR_VERSION @PVM@ - -#endif /* PULSAR_VERSION_H_ */ diff --git a/pulsar-client-cpp/test-conf/.htpasswd b/pulsar-client-cpp/test-conf/.htpasswd deleted file mode 100644 index 2aa3a4772abba..0000000000000 --- a/pulsar-client-cpp/test-conf/.htpasswd +++ /dev/null @@ -1 +0,0 @@ -admin:$apr1$FG4AO6aX$KGYPuMoLUou3i6vUkPUUf. diff --git a/pulsar-client-cpp/test-conf/client-ssl.conf b/pulsar-client-cpp/test-conf/client-ssl.conf deleted file mode 100644 index 6ca0e5a6d36a4..0000000000000 --- a/pulsar-client-cpp/test-conf/client-ssl.conf +++ /dev/null @@ -1,26 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Pulsar Client configuration -webServiceUrl=https://localhost:8443/ -brokerServiceUrl=pulsar+ssl://localhost:6651/ -tlsAllowInsecureConnection=false -tlsTrustCertsFilePath=/tmp/pulsar-test-data/certs/cacert.pem -authPlugin=org.apache.pulsar.client.impl.auth.AuthenticationTls -authParams=tlsCertFile:/tmp/pulsar-test-data/certs/client-cert.pem,tlsKeyFile:/tmp/pulsar-test-data/certs/client-key.pem diff --git a/pulsar-client-cpp/test-conf/client.conf b/pulsar-client-cpp/test-conf/client.conf deleted file mode 100644 index 978fc21d95e55..0000000000000 --- a/pulsar-client-cpp/test-conf/client.conf +++ /dev/null @@ -1,27 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Pulsar Client configuration -webServiceUrl=http://localhost:8765/ -brokerServiceUrl=pulsar://localhost:8885/ -#authPlugin= -#authParams= -#useTls= -#tlsAllowInsecureConnection -#tlsTrustCertsFilePath diff --git a/pulsar-client-cpp/test-conf/standalone-ssl.conf b/pulsar-client-cpp/test-conf/standalone-ssl.conf deleted file mode 100644 index 47548824a44d2..0000000000000 --- a/pulsar-client-cpp/test-conf/standalone-ssl.conf +++ /dev/null @@ -1,309 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -### --- General broker settings --- ### - -# Zookeeper quorum connection string -zookeeperServers= - -# Configuration Store connection string -configurationStoreServers= - -brokerServicePort=6650 -brokerServicePortTls=6651 - -# Port to use to server HTTP request -webServicePort=8080 -webServicePortTls=8443 - -# Hostname or IP address the service binds on, default is 0.0.0.0. -bindAddress=0.0.0.0 - -# Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getCanonicalHostName() is used. -advertisedAddress=localhost - -# Name of the cluster to which this broker belongs to -clusterName=standalone - -# Zookeeper session timeout in milliseconds -zooKeeperSessionTimeoutMillis=30000 - -# Time to wait for broker graceful shutdown. After this time elapses, the process will be killed -brokerShutdownTimeoutMs=3000 - -# Enable backlog quota check. Enforces action on topic when the quota is reached -backlogQuotaCheckEnabled=true - -# How often to check for topics that have reached the quota -backlogQuotaCheckIntervalInSeconds=60 - -# Default per-topic backlog quota limit -backlogQuotaDefaultLimitGB=10 - -# Enable the deletion of inactive topics -brokerDeleteInactiveTopicsEnabled=true - -# How often to check for inactive topics -brokerDeleteInactiveTopicsFrequencySeconds=60 - -# How frequently to proactively check and purge expired messages -messageExpiryCheckIntervalInMinutes=5 - -# Enable check for minimum allowed client library version -clientLibraryVersionCheckEnabled=false - -# Allow client libraries with no version information -clientLibraryVersionCheckAllowUnversioned=true - -# Path for the file used to determine the rotation status for the broker when responding -# to service discovery health checks -statusFilePath=/usr/local/apache/htdocs - -# Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending -# messages to consumer once, this limit reaches until consumer starts acknowledging messages back -# Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restriction -maxUnackedMessagesPerConsumer=50000 - -subscriptionRedeliveryTrackerEnabled=true - -### --- Authentication --- ### - -# Enable TLS -tlsEnabled=true -tlsCertificateFilePath=/tmp/pulsar-test-data/certs/broker-cert.pem -tlsKeyFilePath=/tmp/pulsar-test-data/certs/broker-key.pem -tlsTrustCertsFilePath=/tmp/pulsar-test-data/certs/cacert.pem -tlsAllowInsecureConnection=false - -anonymousUserRole=anonymous - -# Enable authentication -authenticationEnabled=true - -# Authentication provider name list, which is comma separated list of class names -authenticationProviders=org.apache.pulsar.broker.authentication.AuthenticationProviderTls,org.apache.pulsar.broker.authentication.AuthenticationProviderToken,org.apache.pulsar.broker.authentication.AuthenticationProviderBasic - -# Enforce authorization -authorizationEnabled=true - -tokenSecretKey=file:///tmp/pulsar-test-data/tokens/secret.key - -# Role names that are treated as "super-user", meaning they will be able to do all admin -# operations and publish/consume from all topics -superUserRoles=localhost,superUser,admin - -# Authentication settings of the broker itself. Used when the broker connects to other brokers, -# either in same or other clusters -brokerClientAuthenticationPlugin= -brokerClientAuthenticationParameters= - -### --- BookKeeper Client --- ### - -# Authentication plugin to use when connecting to bookies -bookkeeperClientAuthenticationPlugin= - -# BookKeeper auth plugin implementatation specifics parameters name and values -bookkeeperClientAuthenticationParametersName= -bookkeeperClientAuthenticationParameters= - -# Timeout for BK add / read operations -bookkeeperClientTimeoutInSeconds=30 - -# Speculative reads are initiated if a read request doesn't complete within a certain time -# Using a value of 0, is disabling the speculative reads -bookkeeperClientSpeculativeReadTimeoutInMillis=0 - -# Enable bookies health check. Bookies that have more than the configured number of failure within -# the interval will be quarantined for some time. During this period, new ledgers won't be created -# on these bookies -bookkeeperClientHealthCheckEnabled=true -bookkeeperClientHealthCheckIntervalSeconds=60 -bookkeeperClientHealthCheckErrorThresholdPerInterval=5 -bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800 - -# Enable rack-aware bookie selection policy. BK will chose bookies from different racks when -# forming a new bookie ensemble -bookkeeperClientRackawarePolicyEnabled=true - -# Enable region-aware bookie selection policy. BK will chose bookies from -# different regions and racks when forming a new bookie ensemble -# If enabled, the value of bookkeeperClientRackawarePolicyEnabled is ignored -bookkeeperClientRegionawarePolicyEnabled=false - -# Minimum number of racks per write quorum. BK rack-aware bookie selection policy will try to -# get bookies from at least 'bookkeeperClientMinNumRacksPerWriteQuorum' racks for a write quorum. -bookkeeperClientMinNumRacksPerWriteQuorum=1 - -# Enforces rack-aware bookie selection policy to pick bookies from 'bookkeeperClientMinNumRacksPerWriteQuorum' -# racks for a writeQuorum. -# If BK can't find bookie then it would throw BKNotEnoughBookiesException instead of picking random one. -bookkeeperClientEnforceMinNumRacksPerWriteQuorum=false - -# Enable/disable reordering read sequence on reading entries. -bookkeeperClientReorderReadSequenceEnabled=false - -# Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie -# outside the specified groups will not be used by the broker -bookkeeperClientIsolationGroups= - -### --- Managed Ledger --- ### - -# Number of bookies to use when creating a ledger -managedLedgerDefaultEnsembleSize=1 - -# Number of copies to store for each message -managedLedgerDefaultWriteQuorum=1 - -# Number of guaranteed copies (acks to wait before write is complete) -managedLedgerDefaultAckQuorum=1 - -# Amount of memory to use for caching data payload in managed ledger. This memory -# is allocated from JVM direct memory and it's shared across all the topics -# running in the same broker -managedLedgerCacheSizeMB=1024 - -# Threshold to which bring down the cache level when eviction is triggered -managedLedgerCacheEvictionWatermark=0.9 - -# Rate limit the amount of writes generated by consumer acking the messages -managedLedgerDefaultMarkDeleteRateLimit=0.1 - -# Max number of entries to append to a ledger before triggering a rollover -# A ledger rollover is triggered after the min rollover time has passed -# and one of the following conditions is true: -# * The max rollover time has been reached -# * The max entries have been written to the ledger -# * The max ledger size has been written to the ledger -managedLedgerMaxEntriesPerLedger=50000 - -# Minimum time between ledger rollover for a topic -managedLedgerMinLedgerRolloverTimeMinutes=10 - -# Maximum time before forcing a ledger rollover for a topic -managedLedgerMaxLedgerRolloverTimeMinutes=240 - -# Max number of entries to append to a cursor ledger -managedLedgerCursorMaxEntriesPerLedger=50000 - -# Max time before triggering a rollover on a cursor ledger -managedLedgerCursorRolloverTimeInSeconds=14400 - - - -### --- Load balancer --- ### - -# Enable load balancer -loadBalancerEnabled=false - -# Strategy to assign a new bundle -loadBalancerPlacementStrategy=weightedRandomSelection - -# Percentage of change to trigger load report update -loadBalancerReportUpdateThresholdPercentage=10 - -# maximum interval to update load report -loadBalancerReportUpdateMaxIntervalMinutes=15 - -# Frequency of report to collect -loadBalancerHostUsageCheckIntervalMinutes=1 - -# Load shedding interval. Broker periodically checks whether some traffic should be offload from -# some over-loaded broker to other under-loaded brokers -loadBalancerSheddingIntervalMinutes=30 - -# Prevent the same topics to be shed and moved to other broker more than once within this timeframe -loadBalancerSheddingGracePeriodMinutes=30 - -# Usage threshold to determine a broker as under-loaded -loadBalancerBrokerUnderloadedThresholdPercentage=1 - -# Usage threshold to determine a broker as over-loaded -loadBalancerBrokerOverloadedThresholdPercentage=85 - -# Interval to update namespace bundle resource quota -loadBalancerResourceQuotaUpdateIntervalMinutes=15 - -# Usage threshold to determine a broker is having just right level of load -loadBalancerBrokerComfortLoadLevelPercentage=65 - -# enable/disable namespace bundle auto split -loadBalancerAutoBundleSplitEnabled=false - -# interval to detect & split hot namespace bundle -loadBalancerNamespaceBundleSplitIntervalMinutes=15 - -# maximum topics in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxTopics=1000 - -# maximum sessions (producers + consumers) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxSessions=1000 - -# maximum msgRate (in + out) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxMsgRate=1000 - -# maximum bandwidth (in + out) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxBandwidthMbytes=100 - -# maximum number of bundles in a namespace -loadBalancerNamespaceMaximumBundles=128 - -### --- Replication --- ### - -# Enable replication metrics -replicationMetricsEnabled=true - -# Max number of connections to open for each broker in a remote cluster -# More connections host-to-host lead to better throughput over high-latency -# links. -replicationConnectionsPerBroker=16 - -# Replicator producer queue size -replicationProducerQueueSize=1000 - -# Default message retention time. 0 means retention is disabled. -1 means data is not removed by time quota -defaultRetentionTimeInMinutes=0 - -# Default retention size. 0 means retention is disabled. -1 means data is not removed by size quota -defaultRetentionSizeInMB=0 - -# How often to check whether the connections are still alive -keepAliveIntervalSeconds=30 - -# Enable topic auto creation if new producer or consumer connected (disable auto creation with value false) -allowAutoTopicCreation=true - -# The type of topic that is allowed to be automatically created.(partitioned/non-partitioned) -allowAutoTopicCreationType=non-partitioned - -# The number of partitioned topics that is allowed to be automatically created if allowAutoTopicCreationType is partitioned. -defaultNumPartitions=1 - -### --- Deprecated config variables --- ### - -# Deprecated. Use configurationStoreServers -globalZookeeperServers= - -# Deprecated. Use brokerDeleteInactiveTopicsFrequencySeconds -brokerServicePurgeInactiveFrequencyInSeconds=60 - -# Given a specific limit of the max message size -maxMessageSize=1024000 - -# Disable consistent hashing to fix flaky `KeySharedConsumerTest#testMultiTopics`. -subscriptionKeySharedUseConsistentHashing=false diff --git a/pulsar-client-cpp/test-conf/standalone.conf b/pulsar-client-cpp/test-conf/standalone.conf deleted file mode 100644 index faa1277456b57..0000000000000 --- a/pulsar-client-cpp/test-conf/standalone.conf +++ /dev/null @@ -1,291 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -### --- General broker settings --- ### - -# Zookeeper quorum connection string -zookeeperServers= - -# Configuration Store connection string -configurationStoreServers= - -brokerServicePort=8885 - -# Port to use to server HTTP request -webServicePort=8765 - -# Hostname or IP address the service binds on, default is 0.0.0.0. -bindAddress=0.0.0.0 - -# Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getCanonicalHostName() is used. -advertisedAddress=localhost - -# Name of the cluster to which this broker belongs to -clusterName=standalone - -# Zookeeper session timeout in milliseconds -zooKeeperSessionTimeoutMillis=30000 - -# Time to wait for broker graceful shutdown. After this time elapses, the process will be killed -brokerShutdownTimeoutMs=3000 - -# Enable backlog quota check. Enforces action on topic when the quota is reached -backlogQuotaCheckEnabled=true - -# How often to check for topics that have reached the quota -backlogQuotaCheckIntervalInSeconds=60 - -# Default per-topic backlog quota limit -backlogQuotaDefaultLimitGB=10 - -# Enable the deletion of inactive topics -brokerDeleteInactiveTopicsEnabled=true - -# How often to check for inactive topics -brokerDeleteInactiveTopicsFrequencySeconds=60 - -# How frequently to proactively check and purge expired messages -messageExpiryCheckIntervalInMinutes=5 - -# Enable check for minimum allowed client library version -clientLibraryVersionCheckEnabled=false - -# Allow client libraries with no version information -clientLibraryVersionCheckAllowUnversioned=true - -# Path for the file used to determine the rotation status for the broker when responding -# to service discovery health checks -statusFilePath=/usr/local/apache/htdocs - -# Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending -# messages to consumer once, this limit reaches until consumer starts acknowledging messages back -# Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restriction -maxUnackedMessagesPerConsumer=50000 - -subscriptionRedeliveryTrackerEnabled=true - -### --- Authentication --- ### - -# Enable authentication -authenticationEnabled=false - -# Authentication provider name list, which is comma separated list of class names -authenticationProviders=false - -# Enforce authorization -authorizationEnabled=false - -# Role names that are treated as "super-user", meaning they will be able to do all admin -# operations and publish/consume from all topics -superUserRoles= - -# Authentication settings of the broker itself. Used when the broker connects to other brokers, -# either in same or other clusters -brokerClientAuthenticationPlugin= -brokerClientAuthenticationParameters= - - -### --- BookKeeper Client --- ### - -# Authentication plugin to use when connecting to bookies -bookkeeperClientAuthenticationPlugin= - -# BookKeeper auth plugin implementatation specifics parameters name and values -bookkeeperClientAuthenticationParametersName= -bookkeeperClientAuthenticationParameters= - -# Timeout for BK add / read operations -bookkeeperClientTimeoutInSeconds=30 - -# Speculative reads are initiated if a read request doesn't complete within a certain time -# Using a value of 0, is disabling the speculative reads -bookkeeperClientSpeculativeReadTimeoutInMillis=0 - -# Enable bookies health check. Bookies that have more than the configured number of failure within -# the interval will be quarantined for some time. During this period, new ledgers won't be created -# on these bookies -bookkeeperClientHealthCheckEnabled=true -bookkeeperClientHealthCheckIntervalSeconds=60 -bookkeeperClientHealthCheckErrorThresholdPerInterval=5 -bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800 - -# Enable rack-aware bookie selection policy. BK will chose bookies from different racks when -# forming a new bookie ensemble -bookkeeperClientRackawarePolicyEnabled=true - -# Enable region-aware bookie selection policy. BK will chose bookies from -# different regions and racks when forming a new bookie ensemble -# If enabled, the value of bookkeeperClientRackawarePolicyEnabled is ignored -bookkeeperClientRegionawarePolicyEnabled=false - -# Minimum number of racks per write quorum. BK rack-aware bookie selection policy will try to -# get bookies from at least 'bookkeeperClientMinNumRacksPerWriteQuorum' racks for a write quorum. -bookkeeperClientMinNumRacksPerWriteQuorum=1 - -# Enforces rack-aware bookie selection policy to pick bookies from 'bookkeeperClientMinNumRacksPerWriteQuorum' -# racks for a writeQuorum. -# If BK can't find bookie then it would throw BKNotEnoughBookiesException instead of picking random one. -bookkeeperClientEnforceMinNumRacksPerWriteQuorum=false - -# Enable/disable reordering read sequence on reading entries. -bookkeeperClientReorderReadSequenceEnabled=false - -# Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie -# outside the specified groups will not be used by the broker -bookkeeperClientIsolationGroups= - -### --- Managed Ledger --- ### - -# Number of bookies to use when creating a ledger -managedLedgerDefaultEnsembleSize=1 - -# Number of copies to store for each message -managedLedgerDefaultWriteQuorum=1 - -# Number of guaranteed copies (acks to wait before write is complete) -managedLedgerDefaultAckQuorum=1 - -# Amount of memory to use for caching data payload in managed ledger. This memory -# is allocated from JVM direct memory and it's shared across all the topics -# running in the same broker -managedLedgerCacheSizeMB=1024 - -# Threshold to which bring down the cache level when eviction is triggered -managedLedgerCacheEvictionWatermark=0.9 - -# Rate limit the amount of writes generated by consumer acking the messages -managedLedgerDefaultMarkDeleteRateLimit=0.1 - -# Max number of entries to append to a ledger before triggering a rollover -# A ledger rollover is triggered after the min rollover time has passed -# and one of the following conditions is true: -# * The max rollover time has been reached -# * The max entries have been written to the ledger -# * The max ledger size has been written to the ledger -managedLedgerMaxEntriesPerLedger=50000 - -# Minimum time between ledger rollover for a topic -managedLedgerMinLedgerRolloverTimeMinutes=10 - -# Maximum time before forcing a ledger rollover for a topic -managedLedgerMaxLedgerRolloverTimeMinutes=240 - -# Max number of entries to append to a cursor ledger -managedLedgerCursorMaxEntriesPerLedger=50000 - -# Max time before triggering a rollover on a cursor ledger -managedLedgerCursorRolloverTimeInSeconds=14400 - - - -### --- Load balancer --- ### - -# Enable load balancer -loadBalancerEnabled=false - -# Strategy to assign a new bundle -loadBalancerPlacementStrategy=weightedRandomSelection - -# Percentage of change to trigger load report update -loadBalancerReportUpdateThresholdPercentage=10 - -# maximum interval to update load report -loadBalancerReportUpdateMaxIntervalMinutes=15 - -# Frequency of report to collect -loadBalancerHostUsageCheckIntervalMinutes=1 - -# Load shedding interval. Broker periodically checks whether some traffic should be offload from -# some over-loaded broker to other under-loaded brokers -loadBalancerSheddingIntervalMinutes=30 - -# Prevent the same topics to be shed and moved to other broker more than once within this timeframe -loadBalancerSheddingGracePeriodMinutes=30 - -# Usage threshold to determine a broker as under-loaded -loadBalancerBrokerUnderloadedThresholdPercentage=1 - -# Usage threshold to determine a broker as over-loaded -loadBalancerBrokerOverloadedThresholdPercentage=85 - -# Interval to update namespace bundle resource quota -loadBalancerResourceQuotaUpdateIntervalMinutes=15 - -# Usage threshold to determine a broker is having just right level of load -loadBalancerBrokerComfortLoadLevelPercentage=65 - -# enable/disable namespace bundle auto split -loadBalancerAutoBundleSplitEnabled=false - -# interval to detect & split hot namespace bundle -loadBalancerNamespaceBundleSplitIntervalMinutes=15 - -# maximum topics in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxTopics=1000 - -# maximum sessions (producers + consumers) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxSessions=1000 - -# maximum msgRate (in + out) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxMsgRate=1000 - -# maximum bandwidth (in + out) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxBandwidthMbytes=100 - -# maximum number of bundles in a namespace -loadBalancerNamespaceMaximumBundles=128 - -### --- Replication --- ### - -# Enable replication metrics -replicationMetricsEnabled=true - -# Max number of connections to open for each broker in a remote cluster -# More connections host-to-host lead to better throughput over high-latency -# links. -replicationConnectionsPerBroker=16 - -# Replicator producer queue size -replicationProducerQueueSize=1000 - -# Default message retention time. 0 means retention is disabled. -1 means data is not removed by time quota -defaultRetentionTimeInMinutes=0 - -# Default retention size. 0 means retention is disabled. -1 means data is not removed by size quota -defaultRetentionSizeInMB=0 - -# How often to check whether the connections are still alive -keepAliveIntervalSeconds=30 - -# Enable topic auto creation if new producer or consumer connected (disable auto creation with value false) -allowAutoTopicCreation=true - -# The type of topic that is allowed to be automatically created.(partitioned/non-partitioned) -allowAutoTopicCreationType=non-partitioned - -# The number of partitioned topics that is allowed to be automatically created if allowAutoTopicCreationType is partitioned. -defaultNumPartitions=1 - -### --- Deprecated config variables --- ### - -# Deprecated. Use configurationStoreServers -globalZookeeperServers= - -# Deprecated. Use brokerDeleteInactiveTopicsFrequencySeconds -brokerServicePurgeInactiveFrequencyInSeconds=60 diff --git a/pulsar-client-cpp/tests/AuthBasicTest.cc b/pulsar-client-cpp/tests/AuthBasicTest.cc deleted file mode 100644 index 296eff3cd57f0..0000000000000 --- a/pulsar-client-cpp/tests/AuthBasicTest.cc +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include -#include - -#include - -using namespace pulsar; - -static const std::string serviceUrl = "pulsar://localhost:6650"; -static const std::string serviceUrlHttp = "http://localhost:8080"; - -TEST(AuthPluginBasic, testBasic) { - ClientConfiguration config = ClientConfiguration(); - AuthenticationPtr auth = pulsar::AuthBasic::create("admin", "123456"); - - ASSERT_TRUE(auth != NULL); - ASSERT_EQ(auth->getAuthMethodName(), "basic"); - - pulsar::AuthenticationDataPtr data; - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataFromCommand(), true); - ASSERT_EQ(data->getCommandData(), "admin:123456"); - ASSERT_EQ(data->hasDataForTls(), false); - ASSERT_EQ(data->hasDataForHttp(), true); - ASSERT_EQ(auth.use_count(), 1); - - config.setAuth(auth); - Client client(serviceUrl, config); - - std::string topicName = "persistent://private/auth/test-basic"; - std::string subName = "subscription-name"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - producer.close(); -} - -TEST(AuthPluginBasic, testBasicWithHttp) { - ClientConfiguration config = ClientConfiguration(); - AuthenticationPtr auth = pulsar::AuthBasic::create("admin", "123456"); - - ASSERT_TRUE(auth != NULL); - ASSERT_EQ(auth->getAuthMethodName(), "basic"); - - pulsar::AuthenticationDataPtr data; - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataFromCommand(), true); - ASSERT_EQ(data->getCommandData(), "admin:123456"); - ASSERT_EQ(data->hasDataForTls(), false); - ASSERT_EQ(data->hasDataForHttp(), true); - - config.setAuth(auth); - Client client(serviceUrlHttp, config); - - std::string topicName = "persistent://private/auth/test-basic"; - std::string subName = "subscription-name"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - producer.close(); -} - -TEST(AuthPluginBasic, testNoAuth) { - ClientConfiguration config = ClientConfiguration(); - Client client(serviceUrl, config); - - std::string topicName = "persistent://private/auth/test-basic"; - std::string subName = "subscription-name"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultAuthorizationError, result); -} - -TEST(AuthPluginBasic, testNoAuthWithHttp) { - ClientConfiguration config = ClientConfiguration(); - Client client(serviceUrlHttp, config); - - std::string topicName = "persistent://private/auth/test-basic"; - std::string subName = "subscription-name"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultConnectError, result); -} - -TEST(AuthPluginBasic, testLoadAuth) { - AuthenticationPtr auth = pulsar::AuthBasic::create("admin", "123456"); - ASSERT_TRUE(auth != NULL); - ASSERT_EQ(auth->getAuthMethodName(), "basic"); - pulsar::AuthenticationDataPtr data; - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataFromCommand(), true); - ASSERT_EQ(data->getCommandData(), "admin:123456"); - ASSERT_EQ(data->hasDataForTls(), false); - ASSERT_EQ(data->hasDataForHttp(), true); - - auth = pulsar::AuthBasic::create("{\"username\":\"super-user\",\"password\":\"123789\"}"); - ASSERT_TRUE(auth != NULL); - ASSERT_EQ(auth->getAuthMethodName(), "basic"); - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataFromCommand(), true); - ASSERT_EQ(data->getCommandData(), "super-user:123789"); - ASSERT_EQ(data->hasDataForTls(), false); - ASSERT_EQ(data->hasDataForHttp(), true); - - ParamMap p = ParamMap(); - p["username"] = "super-user-2"; - p["password"] = "456789"; - auth = pulsar::AuthBasic::create(p); - ASSERT_TRUE(auth != NULL); - ASSERT_EQ(auth->getAuthMethodName(), "basic"); - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataFromCommand(), true); - ASSERT_EQ(data->getCommandData(), "super-user-2:456789"); - ASSERT_EQ(data->hasDataForTls(), false); - ASSERT_EQ(data->hasDataForHttp(), true); -} diff --git a/pulsar-client-cpp/tests/AuthPluginTest.cc b/pulsar-client-cpp/tests/AuthPluginTest.cc deleted file mode 100644 index 3a8354fd53c83..0000000000000 --- a/pulsar-client-cpp/tests/AuthPluginTest.cc +++ /dev/null @@ -1,485 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "pulsar/Authentication.h" -#include -#include -#include -#include -#include -#include -#include - -#include -#include "lib/Future.h" -#include "lib/Utils.h" -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -int globalTestTlsMessagesCounter = 0; -static const std::string serviceUrlTls = "pulsar+ssl://localhost:6651"; -static const std::string serviceUrlHttps = "https://localhost:8443"; - -static const std::string caPath = "../../pulsar-broker/src/test/resources/authentication/tls/cacert.pem"; -static const std::string clientPublicKeyPath = - "../../pulsar-broker/src/test/resources/authentication/tls/client-cert.pem"; -static const std::string clientPrivateKeyPath = - "../../pulsar-broker/src/test/resources/authentication/tls/client-key.pem"; - -static void sendCallBackTls(Result r, const MessageId& msgId) { - ASSERT_EQ(r, ResultOk); - globalTestTlsMessagesCounter++; -} - -TEST(AuthPluginTest, testTls) { - ClientConfiguration config = ClientConfiguration(); - config.setTlsTrustCertsFilePath(caPath); - config.setTlsAllowInsecureConnection(false); - AuthenticationPtr auth = pulsar::AuthTls::create(clientPublicKeyPath, clientPrivateKeyPath); - - ASSERT_TRUE(auth != NULL); - ASSERT_EQ(auth->getAuthMethodName(), "tls"); - - pulsar::AuthenticationDataPtr data; - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->getCommandData(), "none"); - ASSERT_EQ(data->hasDataForTls(), true); - ASSERT_EQ(auth.use_count(), 1); - - config.setAuth(auth); - Client client(serviceUrlTls, config); - - std::string topicName = "persistent://private/auth/test-tls"; - std::string subName = "subscription-name"; - int numOfMessages = 10; - - Producer producer; - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.unsubscribe(); - client.subscribe(topicName, subName, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - // Send Asynchronously - std::string prefix = "test-tls-message-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, &sendCallBackTls); - LOG_INFO("sending message " << messageContent); - } - - Message receivedMsg; - int i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_INFO("Received Message with [ content - " - << receivedMsg.getDataAsString() << "] [ messageID = " << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - // Number of messages produced - ASSERT_EQ(globalTestTlsMessagesCounter, numOfMessages); - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); -} - -TEST(AuthPluginTest, testTlsDetectPulsarSsl) { - ClientConfiguration config = ClientConfiguration(); - config.setTlsTrustCertsFilePath(caPath); - config.setTlsAllowInsecureConnection(false); - config.setAuth(pulsar::AuthTls::create(clientPublicKeyPath, clientPrivateKeyPath)); - - Client client(serviceUrlTls, config); - - std::string topicName = "persistent://private/auth/test-tls-detect"; - - Producer producer; - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); -} - -TEST(AuthPluginTest, testTlsDetectPulsarSslWithHostNameValidation) { - ClientConfiguration config = ClientConfiguration(); - config.setTlsTrustCertsFilePath(caPath); - config.setTlsAllowInsecureConnection(false); - config.setValidateHostName(true); - config.setAuth(pulsar::AuthTls::create(clientPublicKeyPath, clientPrivateKeyPath)); - - Client client(serviceUrlTls, config); - std::string topicName = "persistent://private/auth/testTlsDetectPulsarSslWithHostNameValidation"; - - Producer producer; - Result res = client.createProducer(topicName, producer); - ASSERT_EQ(ResultConnectError, res); -} - -TEST(AuthPluginTest, testTlsDetectHttps) { - ClientConfiguration config = ClientConfiguration(); - config.setUseTls(true); // shouldn't be needed soon - config.setTlsTrustCertsFilePath(caPath); - config.setTlsAllowInsecureConnection(false); - config.setAuth(pulsar::AuthTls::create(clientPublicKeyPath, clientPrivateKeyPath)); - - Client client(serviceUrlHttps, config); - - std::string topicName = "persistent://private/auth/test-tls-detect-https"; - - Producer producer; - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); -} - -TEST(AuthPluginTest, testTlsDetectHttpsWithHostNameValidation) { - ClientConfiguration config = ClientConfiguration(); - config.setUseTls(true); // shouldn't be needed soon - config.setTlsTrustCertsFilePath(caPath); - config.setTlsAllowInsecureConnection(false); - config.setAuth(pulsar::AuthTls::create(clientPublicKeyPath, clientPrivateKeyPath)); - config.setValidateHostName(true); - - Client client(serviceUrlHttps, config); - - std::string topicName = "persistent://private/auth/test-tls-detect-https-with-hostname-validation"; - - Producer producer; - Result res = client.createProducer(topicName, producer); - ASSERT_NE(ResultOk, res); -} - -namespace testAthenz { -std::string principalToken; -void mockZTS(Latch& latch, int port) { - LOG_INFO("-- MockZTS started"); - boost::asio::io_service io; - boost::asio::ip::tcp::iostream stream; - boost::asio::ip::tcp::acceptor acceptor(io, - boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), port)); - - LOG_INFO("-- MockZTS waiting for connnection"); - latch.countdown(); - acceptor.accept(*stream.rdbuf()); - LOG_INFO("-- MockZTS got connection"); - - std::string headerLine; - while (getline(stream, headerLine)) { - std::vector kv; - boost::algorithm::split(kv, headerLine, boost::is_any_of(" ")); - if (kv[0] == "Athenz-Principal-Auth:") { - principalToken = kv[1]; - } - - if (headerLine == "\r" || headerLine == "\n" || headerLine == "\r\n") { - std::string mockToken = "{\"token\":\"mockToken\",\"expiryTime\":4133980800}"; - stream << "HTTP/1.1 200 OK" << std::endl; - stream << "Host: localhost" << std::endl; - stream << "Content-Type: application/json" << std::endl; - stream << "Content-Length: " << mockToken.size() << std::endl; - stream << std::endl; - stream << mockToken << std::endl; - break; - } - } - - LOG_INFO("-- MockZTS exiting"); -} -} // namespace testAthenz - -TEST(AuthPluginTest, testAthenz) { - Latch latch(1); - std::thread zts(std::bind(&testAthenz::mockZTS, std::ref(latch), 9999)); - pulsar::AuthenticationDataPtr data; - std::string params = R"({ - "tenantDomain": "pulsar.test.tenant", - "tenantService": "service", - "providerDomain": "pulsar.test.provider", - "privateKey": "file:)" + - clientPrivateKeyPath + R"(", - "ztsUrl": "http://localhost:9999" - })"; - - LOG_INFO("PARAMS: " << params); - latch.wait(); - pulsar::AuthenticationPtr auth = pulsar::AuthAthenz::create(params); - ASSERT_EQ(auth->getAuthMethodName(), "athenz"); - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataForHttp(), true); - ASSERT_EQ(data->hasDataFromCommand(), true); - ASSERT_EQ(data->getHttpHeaders(), "Athenz-Role-Auth: mockToken"); - ASSERT_EQ(data->getCommandData(), "mockToken"); - zts.join(); - std::vector kvs; - boost::algorithm::split(kvs, testAthenz::principalToken, boost::is_any_of(";")); - for (std::vector::iterator itr = kvs.begin(); itr != kvs.end(); itr++) { - std::vector kv; - boost::algorithm::split(kv, *itr, boost::is_any_of("=")); - if (kv[0] == "d") { - ASSERT_EQ(kv[1], "pulsar.test.tenant"); - } else if (kv[0] == "n") { - ASSERT_EQ(kv[1], "service"); - } - } -} - -TEST(AuthPluginTest, testDisable) { - pulsar::AuthenticationDataPtr data; - - pulsar::AuthenticationPtr auth = pulsar::AuthFactory::Disabled(); - ASSERT_TRUE(auth != NULL); - ASSERT_EQ(auth->getAuthMethodName(), "none"); - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->getCommandData(), "none"); - ASSERT_EQ(auth.use_count(), 1); -} - -TEST(AuthPluginTest, testAuthFactoryTls) { - pulsar::AuthenticationDataPtr data; - AuthenticationPtr auth = pulsar::AuthFactory::create( - "tls", "tlsCertFile:" + clientPublicKeyPath + ",tlsKeyFile:" + clientPrivateKeyPath); - ASSERT_EQ(auth->getAuthMethodName(), "tls"); - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataForTls(), true); - ASSERT_EQ(data->getTlsCertificates(), clientPublicKeyPath); - ASSERT_EQ(data->getTlsPrivateKey(), clientPrivateKeyPath); - - ClientConfiguration config = ClientConfiguration(); - config.setAuth(auth); - config.setTlsTrustCertsFilePath(caPath); - config.setTlsAllowInsecureConnection(false); - Client client(serviceUrlTls, config); - - std::string topicName = "persistent://private/auth/test-tls-factory"; - Producer producer; - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); -} - -TEST(AuthPluginTest, testAuthFactoryAthenz) { - Latch latch(1); - std::thread zts(std::bind(&testAthenz::mockZTS, std::ref(latch), 9998)); - pulsar::AuthenticationDataPtr data; - std::string params = R"({ - "tenantDomain": "pulsar.test2.tenant", - "tenantService": "service", - "providerDomain": "pulsar.test.provider", - "privateKey": "file:)" + - clientPrivateKeyPath + R"(", - "ztsUrl": "http://localhost:9998" - })"; - LOG_INFO("PARAMS: " << params); - latch.wait(); - pulsar::AuthenticationPtr auth = pulsar::AuthFactory::create("athenz", params); - ASSERT_EQ(auth->getAuthMethodName(), "athenz"); - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataForHttp(), true); - ASSERT_EQ(data->hasDataFromCommand(), true); - ASSERT_EQ(data->getHttpHeaders(), "Athenz-Role-Auth: mockToken"); - ASSERT_EQ(data->getCommandData(), "mockToken"); - - LOG_INFO("Calling zts.join()"); - zts.join(); - LOG_INFO("Done zts.join()"); - - std::vector kvs; - boost::algorithm::split(kvs, testAthenz::principalToken, boost::is_any_of(";")); - for (std::vector::iterator itr = kvs.begin(); itr != kvs.end(); itr++) { - std::vector kv; - boost::algorithm::split(kv, *itr, boost::is_any_of("=")); - if (kv[0] == "d") { - ASSERT_EQ(kv[1], "pulsar.test2.tenant"); - } else if (kv[0] == "n") { - ASSERT_EQ(kv[1], "service"); - } - } -} - -TEST(AuthPluginTest, testOauth2) { - // test success get token from oauth2 server. - pulsar::AuthenticationDataPtr data; - std::string params = R"({ - "type": "client_credentials", - "issuer_url": "https://dev-kt-aa9ne.us.auth0.com", - "client_id": "Xd23RHsUnvUlP7wchjNYOaIfazgeHd9x", - "client_secret": "rT7ps7WY8uhdVuBTKWZkttwLdQotmdEliaM5rLfmgNibvqziZ-g07ZH52N_poGAb", - "audience": "https://dev-kt-aa9ne.us.auth0.com/api/v2/"})"; - - int expectedTokenLength = 3379; - LOG_INFO("PARAMS: " << params); - pulsar::AuthenticationPtr auth = pulsar::AuthOauth2::create(params); - - ASSERT_EQ(auth->getAuthMethodName(), "token"); - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataForHttp(), true); - ASSERT_EQ(data->hasDataFromCommand(), true); - ASSERT_EQ(data->getCommandData().length(), expectedTokenLength); -} - -TEST(AuthPluginTest, testOauth2WrongSecret) { - pulsar::AuthenticationDataPtr data; - - std::string params = R"({ - "type": "client_credentials", - "issuer_url": "https://dev-kt-aa9ne.us.auth0.com", - "client_id": "Xd23RHsUnvUlP7wchjNYOaIfazgeHd9x", - "client_secret": "rT7ps7WY8uhdVuBTKWZkttwLdQotmdEliaM5rLfmgNibvqziZ", - "audience": "https://dev-kt-aa9ne.us.auth0.com/api/v2/"})"; - - LOG_INFO("PARAMS: " << params); - pulsar::AuthenticationPtr auth = pulsar::AuthOauth2::create(params); - ASSERT_EQ(auth->getAuthMethodName(), "token"); - ASSERT_EQ(auth->getAuthData(data), ResultAuthenticationError); -} - -TEST(AuthPluginTest, testOauth2CredentialFile) { - // test success get token from oauth2 server. - pulsar::AuthenticationDataPtr data; - std::string params = R"({ - "type": "client_credentials", - "issuer_url": "https://dev-kt-aa9ne.us.auth0.com", - "private_key": "../../pulsar-broker/src/test/resources/authentication/token/cpp_credentials_file.json", - "audience": "https://dev-kt-aa9ne.us.auth0.com/api/v2/"})"; - - int expectedTokenLength = 3379; - LOG_INFO("PARAMS: " << params); - pulsar::AuthenticationPtr auth = pulsar::AuthOauth2::create(params); - ASSERT_EQ(auth->getAuthMethodName(), "token"); - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataForHttp(), true); - ASSERT_EQ(data->hasDataFromCommand(), true); - ASSERT_EQ(data->getCommandData().length(), expectedTokenLength); -} - -TEST(AuthPluginTest, testOauth2RequestBody) { - ParamMap params; - params["issuer_url"] = "https://dev-kt-aa9ne.us.auth0.com"; - params["client_id"] = "Xd23RHsUnvUlP7wchjNYOaIfazgeHd9x"; - params["client_secret"] = "rT7ps7WY8uhdVuBTKWZkttwLdQotmdEliaM5rLfmgNibvqziZ-g07ZH52N_poGAb"; - params["audience"] = "https://dev-kt-aa9ne.us.auth0.com/api/v2/"; - - auto createExpectedResult = [&] { - auto paramsCopy = params; - paramsCopy.emplace("grant_type", "client_credentials"); - paramsCopy.erase("issuer_url"); - return paramsCopy; - }; - - const auto expectedResult1 = createExpectedResult(); - ClientCredentialFlow flow1(params); - ASSERT_EQ(flow1.generateParamMap(), expectedResult1); - - params["scope"] = "test-scope"; - const auto expectedResult2 = createExpectedResult(); - ClientCredentialFlow flow2(params); - ASSERT_EQ(flow2.generateParamMap(), expectedResult2); -} - -TEST(AuthPluginTest, testInitialize) { - std::string issuerUrl = "https://dev-kt-aa9ne.us.auth0.com"; - std::string expectedTokenEndPoint = issuerUrl + "/oauth/token"; - - ParamMap params; - params["issuer_url"] = issuerUrl; - params["client_id"] = "Xd23RHsUnvUlP7wchjNYOaIfazgeHd9x"; - params["client_secret"] = "rT7ps7WY8uhdVuBTKWZkttwLdQotmdEliaM5rLfmgNibvqziZ-g07ZH52N_poGAb"; - params["audience"] = "https://dev-kt-aa9ne.us.auth0.com/api/v2/"; - - ClientCredentialFlow flow1(params); - flow1.initialize(); - ASSERT_EQ(flow1.getTokenEndPoint(), expectedTokenEndPoint); - - params["issuer_url"] = issuerUrl + "/"; - ClientCredentialFlow flow2(params); - flow2.initialize(); - ASSERT_EQ(flow2.getTokenEndPoint(), expectedTokenEndPoint); -} - -TEST(AuthPluginTest, testOauth2Failure) { - ParamMap params; - auto addKeyValue = [&](const std::string& key, const std::string& value) { - params[key] = value; - LOG_INFO("Configure \"" << key << "\" to \"" << value << "\""); - }; - - auto createClient = [&]() -> Client { - ClientConfiguration conf; - conf.setAuth(AuthOauth2::create(params)); - return {"pulsar://localhost:6650", conf}; - }; - - const std::string topic = "AuthPluginTest-testOauth2Failure"; - Producer producer; - - // No issuer_url - auto client1 = createClient(); - ASSERT_EQ(client1.createProducer(topic, producer), ResultAuthenticationError); - client1.close(); - - // Invalid issuer_url - addKeyValue("issuer_url", "hello"); - auto client2 = createClient(); - ASSERT_EQ(client2.createProducer(topic, producer), ResultAuthenticationError); - client2.close(); - - addKeyValue("issuer_url", "https://google.com"); - auto client3 = createClient(); - ASSERT_EQ(client3.createProducer(topic, producer), ResultAuthenticationError); - client3.close(); - - // No client id and secret - addKeyValue("issuer_url", "https://dev-kt-aa9ne.us.auth0.com"); - auto client4 = createClient(); - ASSERT_EQ(client4.createProducer(topic, producer), ResultAuthenticationError); - client4.close(); - - // Invalid client_id and client_secret - addKeyValue("client_id", "my_id"); - addKeyValue("client_secret", "my-secret"); - auto client5 = createClient(); - ASSERT_EQ(client5.createProducer(topic, producer), ResultAuthenticationError); - client5.close(); -} diff --git a/pulsar-client-cpp/tests/AuthTokenTest.cc b/pulsar-client-cpp/tests/AuthTokenTest.cc deleted file mode 100644 index ede5e818d3da6..0000000000000 --- a/pulsar-client-cpp/tests/AuthTokenTest.cc +++ /dev/null @@ -1,200 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "lib/Future.h" -#include "lib/Utils.h" -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -static const std::string serviceUrl = "pulsar://localhost:6650"; -static const std::string serviceUrlHttp = "http://localhost:8080"; - -static const std::string tokenPath = "/tmp/pulsar-test-data/tokens/token.txt"; - -static std::string getToken() { - std::ifstream file(tokenPath); - std::string str((std::istreambuf_iterator(file)), std::istreambuf_iterator()); - return str; -} - -TEST(AuthPluginToken, testToken) { - ClientConfiguration config = ClientConfiguration(); - std::string token = getToken(); - AuthenticationPtr auth = pulsar::AuthToken::createWithToken(token); - - ASSERT_TRUE(auth != NULL); - ASSERT_EQ(auth->getAuthMethodName(), "token"); - - pulsar::AuthenticationDataPtr data; - ASSERT_EQ(auth->getAuthData(data), pulsar::ResultOk); - ASSERT_EQ(data->hasDataFromCommand(), true); - ASSERT_EQ(data->getCommandData(), token); - ASSERT_EQ(data->hasDataForTls(), false); - ASSERT_EQ(data->hasDataForHttp(), true); - ASSERT_EQ(auth.use_count(), 1); - - config.setAuth(auth); - Client client(serviceUrl, config); - - std::string topicName = "persistent://private/auth/test-token"; - std::string subName = "subscription-name"; - int numOfMessages = 10; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - result = client.subscribe(topicName, subName, consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.unsubscribe(); - client.subscribe(topicName, subName, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - // Send Asynchronously - std::string prefix = "test-token-message-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, NULL); - LOG_INFO("sending message " << messageContent); - } - - producer.flush(); - - Message receivedMsg; - for (int i = 0; i < numOfMessages; i++) { - Result res = consumer.receive(receivedMsg); - ASSERT_EQ(ResultOk, res); - - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_INFO("Received Message with [ content - " - << receivedMsg.getDataAsString() << "] [ messageID = " << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } -} - -TEST(AuthPluginToken, testTokenWithHttpUrl) { - ClientConfiguration config = ClientConfiguration(); - std::string token = getToken(); - config.setAuth(pulsar::AuthToken::createWithToken(token)); - Client client(serviceUrlHttp, config); - - std::string topicName = "persistent://private/auth/test-token-http"; - std::string subName = "subscription-name"; - int numOfMessages = 10; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - result = client.subscribe(topicName, subName, consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.unsubscribe(); - client.subscribe(topicName, subName, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - // Send Asynchronously - std::string prefix = "test-token-message-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, NULL); - LOG_INFO("sending message " << messageContent); - } - - producer.flush(); - - Message receivedMsg; - for (int i = 0; i < numOfMessages; i++) { - Result res = consumer.receive(receivedMsg); - ASSERT_EQ(ResultOk, res); - - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_INFO("Received Message with [ content - " - << receivedMsg.getDataAsString() << "] [ messageID = " << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } -} - -TEST(AuthPluginToken, testNoAuth) { - ClientConfiguration config; - Client client(serviceUrl, config); - - std::string topicName = "persistent://private/auth/test-token"; - std::string subName = "subscription-name"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultAuthorizationError, result); - - Consumer consumer; - result = client.subscribe(topicName, subName, consumer); - ASSERT_EQ(ResultAuthorizationError, result); -} - -TEST(AuthPluginToken, testNoAuthWithHttp) { - ClientConfiguration config; - Client client(serviceUrlHttp, config); - - std::string topicName = "persistent://private/auth/test-token"; - std::string subName = "subscription-name"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultConnectError, result); - - Consumer consumer; - result = client.subscribe(topicName, subName, consumer); - ASSERT_EQ(ResultConnectError, result); -} diff --git a/pulsar-client-cpp/tests/BackoffTest.cc b/pulsar-client-cpp/tests/BackoffTest.cc deleted file mode 100644 index 8ecf5665a9740..0000000000000 --- a/pulsar-client-cpp/tests/BackoffTest.cc +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include "Backoff.h" -#include "PulsarFriend.h" - -using namespace pulsar; -using boost::posix_time::milliseconds; -using boost::posix_time::seconds; - -static bool checkExactAndDecrementTimer(Backoff& backoff, const unsigned int& t2) { - const unsigned int& t1 = backoff.next().total_milliseconds(); - boost::posix_time::ptime& firstBackOffTime = PulsarFriend::getFirstBackoffTime(backoff); - firstBackOffTime -= milliseconds(t2); - return t1 == t2; -} - -static bool withinTenPercentAndDecrementTimer(Backoff& backoff, const unsigned int& t2) { - const unsigned int& t1 = backoff.next().total_milliseconds(); - boost::posix_time::ptime& firstBackOffTime = PulsarFriend::getFirstBackoffTime(backoff); - firstBackOffTime -= milliseconds(t2); - return (t1 >= t2 * 0.9 && t1 <= t2); -} - -TEST(BackoffTest, mandatoryStopTestNegativeTest) { - Backoff backoff(milliseconds(100), seconds(60), milliseconds(1900)); - ASSERT_EQ(backoff.next().total_milliseconds(), 100); - backoff.next().total_milliseconds(); // 200 - backoff.next().total_milliseconds(); // 400 - backoff.next().total_milliseconds(); // 800 - ASSERT_FALSE(withinTenPercentAndDecrementTimer(backoff, 400)); -} - -TEST(BackoffTest, firstBackoffTimerTest) { - Backoff backoff(milliseconds(100), seconds(60), milliseconds(1900)); - ASSERT_EQ(backoff.next().total_milliseconds(), 100); - boost::posix_time::ptime firstBackOffTime = PulsarFriend::getFirstBackoffTime(backoff); - std::this_thread::sleep_for(std::chrono::milliseconds(300)); - TimeDuration diffBackOffTime = PulsarFriend::getFirstBackoffTime(backoff) - firstBackOffTime; - ASSERT_EQ(diffBackOffTime, milliseconds(0)); // no change since reset not called - - backoff.reset(); - ASSERT_EQ(backoff.next().total_milliseconds(), 100); - diffBackOffTime = PulsarFriend::getFirstBackoffTime(backoff) - firstBackOffTime; - ASSERT_TRUE(diffBackOffTime >= milliseconds(300) && diffBackOffTime < seconds(1)); -} - -TEST(BackoffTest, basicTest) { - Backoff backoff(milliseconds(5), seconds(60), seconds(60)); - ASSERT_TRUE(checkExactAndDecrementTimer(backoff, 5)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 10)); - - backoff.reset(); - ASSERT_TRUE(checkExactAndDecrementTimer(backoff, 5)); -} - -TEST(BackoffTest, maxTest) { - Backoff backoff(milliseconds(5), milliseconds(20), milliseconds(20)); - ASSERT_TRUE(checkExactAndDecrementTimer(backoff, 5)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 10)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 5)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 20)); -} - -TEST(BackoffTest, mandatoryStopTest) { - Backoff backoff(milliseconds(100), seconds(60), milliseconds(1900)); - ASSERT_TRUE(checkExactAndDecrementTimer(backoff, 100)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 400)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 800)); - // would have been 1600 w/o the mandatory stop - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 400)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 3200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 6400)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 12800)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 25600)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 51200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 60000)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 60000)); - - backoff.reset(); - ASSERT_TRUE(checkExactAndDecrementTimer(backoff, 100)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 400)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 800)); - // would have been 1600 w/o the mandatory stop - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 400)); - - backoff.reset(); - ASSERT_TRUE(checkExactAndDecrementTimer(backoff, 100)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 400)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 800)); - - backoff.reset(); - ASSERT_TRUE(checkExactAndDecrementTimer(backoff, 100)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 400)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 800)); -} - -TEST(BackoffTest, ignoringMandatoryStopTest) { - Backoff backoff(milliseconds(100), seconds(60), milliseconds(0)); - ASSERT_TRUE(checkExactAndDecrementTimer(backoff, 100)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 400)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 800)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 1600)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 3200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 6400)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 12800)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 25600)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 51200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 60000)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 60000)); - - backoff.reset(); - ASSERT_TRUE(checkExactAndDecrementTimer(backoff, 100)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 400)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 800)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 1600)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 3200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 6400)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 12800)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 25600)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 51200)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 60000)); - ASSERT_TRUE(withinTenPercentAndDecrementTimer(backoff, 60000)); -} diff --git a/pulsar-client-cpp/tests/BasicEndToEndTest.cc b/pulsar-client-cpp/tests/BasicEndToEndTest.cc deleted file mode 100644 index 3431978a7ecc8..0000000000000 --- a/pulsar-client-cpp/tests/BasicEndToEndTest.cc +++ /dev/null @@ -1,4106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "HttpHelper.h" -#include "PulsarFriend.h" -#include "CustomRoutingPolicy.h" - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -std::mutex mutex_; -static int globalCount = 0; -static long globalResendMessageCount = 0; -std::string lookupUrl = "pulsar://localhost:6650"; -static std::string adminUrl = "http://localhost:8080/"; -static int uniqueCounter = 0; - -std::string unique_str() { - long nanos = std::chrono::duration_cast( - std::chrono::steady_clock::now().time_since_epoch()) - .count(); - - return std::to_string(uniqueCounter++) + "_" + std::to_string(nanos); -} - -static void messageListenerFunction(Consumer consumer, const Message &msg) { - globalCount++; - consumer.acknowledge(msg); -} - -static void messageListenerFunctionWithoutAck(Consumer consumer, const Message &msg, Latch &latch, - const std::string &content) { - globalCount++; - ASSERT_EQ(content, msg.getDataAsString()); - latch.countdown(); -} - -static void sendCallBack(Result r, const MessageId &msgId, std::string prefix, int *count) { - static std::mutex sendMutex_; - sendMutex_.lock(); - ASSERT_EQ(r, ResultOk); - *count += 1; - sendMutex_.unlock(); -} - -static void receiveCallBack(Result r, const Message &msg, std::string &messageContent, bool checkContent, - bool *isFailed, int *count) { - static std::mutex receiveMutex_; - receiveMutex_.lock(); - - if (r == ResultOk) { - LOG_DEBUG("received msg " << msg.getDataAsString() << " expected: " << messageContent - << " count =" << *count); - if (checkContent) { - ASSERT_EQ(messageContent, msg.getDataAsString()); - } - *count += 1; - } else { - *isFailed = true; - } - receiveMutex_.unlock(); -} - -static void sendCallBackWithDelay(Result r, const MessageId &msgId, std::string prefix, double percentage, - uint64_t delayInMicros, int *count) { - if ((rand() % 100) <= percentage) { - std::this_thread::sleep_for(std::chrono::microseconds(delayInMicros)); - } - sendCallBack(r, msgId, prefix, count); -} - -TEST(BasicEndToEndTest, testBatchMessages) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "persistent://public/default/test-batch-messages"; - std::string subName = "subscription-name"; - Producer producer; - - // Enable batching on producer side - int batchSize = 2; - int numOfMessages = 1000; - - ProducerConfiguration conf; - conf.setCompressionType(CompressionLZ4); - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingEnabled(true); - conf.setBlockIfQueueFull(true); - conf.setProperty("producer-name", "test-producer-name"); - conf.setProperty("producer-id", "test-producer-id"); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setProperty("consumer-name", "test-consumer-name"); - consumerConfig.setProperty("consumer-id", "test-consumer-id"); - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consumerConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.unsubscribe(); - client.subscribe(topicName, subName, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - // Send Asynchronously - std::string prefix = "msg-batch-"; - int msgCount = 0; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync( - msg, std::bind(&sendCallBack, std::placeholders::_1, std::placeholders::_2, prefix, &msgCount)); - LOG_DEBUG("sending message " << messageContent); - } - - Message receivedMsg; - int i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_INFO("Received Message with [ content - " - << receivedMsg.getDataAsString() << "] [ messageID = " << receivedMsg.getMessageId() << "]"); - LOG_INFO("msg-index " << receivedMsg.getProperty("msgIndex") << ", expected " << std::to_string(i)); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - // Number of messages produced - ASSERT_EQ(msgCount, numOfMessages); - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); -} - -void resendMessage(Result r, const MessageId msgId, Producer producer) { - Lock lock(mutex_); - if (r != ResultOk) { - LOG_DEBUG("globalResendMessageCount" << globalResendMessageCount); - if (++globalResendMessageCount >= 3) { - return; - } - } - lock.unlock(); - producer.sendAsync(MessageBuilder().build(), - std::bind(resendMessage, std::placeholders::_1, std::placeholders::_2, producer)); -} - -TEST(BasicEndToEndTest, testProduceConsume) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "persistent://public/default/test-produce-consume"; - std::string subName = "my-sub-name"; - Producer producer; - - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - // Send synchronously - std::string content = "msg-1-content"; - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(MessageId(-1, -1, -1, -1), msg.getMessageId()); - result = producer.send(msg); - ASSERT_EQ(ResultOk, result); - ASSERT_NE(MessageId(-1, -1, -1, -1), msg.getMessageId()); - - Message receivedMsg; - consumer.receive(receivedMsg); - ASSERT_EQ(content, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.unsubscribe()); - ASSERT_EQ(ResultAlreadyClosed, consumer.close()); - ASSERT_EQ(ResultOk, producer.close()); - ASSERT_EQ(ResultOk, client.close()); -} - -TEST(BasicEndToEndTest, testRedeliveryCount) { - ClientConfiguration config; - Client client(lookupUrl, config); - std::string topicName = "persistent://public/default/test-redelivery-count"; - std::string subName = "my-sub-name"; - - Producer producer; - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - Promise consumerPromise; - ConsumerConfiguration consumerConf; - consumerConf.setNegativeAckRedeliveryDelayMs(500); - consumerConf.setConsumerType(ConsumerShared); - client.subscribeAsync(topicName, subName, consumerConf, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - std::string content = "msg-content"; - Message msg = MessageBuilder().setContent(content).build(); - producer.send(msg); - - int redeliveryCount = 0; - Message msgReceived; - for (int i = 0; i < 4; i++) { - consumer.receive(msgReceived); - LOG_INFO("Received message " << msgReceived.getDataAsString()); - consumer.negativeAcknowledge(msgReceived); - redeliveryCount = msgReceived.getRedeliveryCount(); - } - - ASSERT_EQ(3, redeliveryCount); - consumer.acknowledge(msgReceived); - consumer.close(); - producer.close(); -} - -TEST(BasicEndToEndTest, testLookupThrottling) { - std::string topicName = "testLookupThrottling"; - ClientConfiguration config; - config.setConcurrentLookupRequest(0); - config.setLogger(new ConsoleLoggerFactory(Logger::LEVEL_DEBUG)); - Client client(lookupUrl, config); - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultTooManyLookupRequestException, result); - - Consumer consumer1; - result = client.subscribe(topicName, "my-sub-name", consumer1); - ASSERT_EQ(ResultTooManyLookupRequestException, result); - - client.close(); -} - -TEST(BasicEndToEndTest, testNonExistingTopic) { - Client client(lookupUrl); - Producer producer; - Result result = client.createProducer("persistent://prop//unit/ns1/testNonExistingTopic", producer); - ASSERT_EQ(ResultInvalidTopicName, result); - - Consumer consumer; - result = client.subscribe("persistent://prop//unit/ns1/testNonExistingTopic", "my-sub-name", consumer); - ASSERT_EQ(ResultInvalidTopicName, result); -} - -TEST(BasicEndToEndTest, testNonPersistentTopic) { - std::string topicName = "non-persistent://public/default/testNonPersistentTopic"; - Client client(lookupUrl); - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - result = client.subscribe(topicName, "my-sub-name", consumer); - ASSERT_EQ(ResultOk, result); -} - -TEST(BasicEndToEndTest, testV2TopicProtobuf) { - std::string topicName = "testV2TopicProtobuf"; - Client client(lookupUrl); - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - result = client.subscribe(topicName, "my-sub-name", consumer); - ASSERT_EQ(ResultOk, result); - - producer.close(); - consumer.close(); -} - -TEST(BasicEndToEndTest, testV2TopicHttp) { - std::string topicName = "testV2TopicHttp"; - Client client(adminUrl); - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - result = client.subscribe(topicName, "my-sub-name", consumer); - ASSERT_EQ(ResultOk, result); - - producer.close(); - consumer.close(); -} - -TEST(BasicEndToEndTest, testSingleClientMultipleSubscriptions) { - std::string topicName = "testSingleClientMultipleSubscriptions"; - - Client client(lookupUrl); - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer1; - result = client.subscribe(topicName, "my-sub-name", consumer1); - ASSERT_EQ(ResultOk, result); - - Consumer consumer2; - result = client.subscribe(topicName, "my-sub-name", consumer2); - ASSERT_EQ(ResultConsumerBusy, result); - // at this point connection gets destroyed because this consumer creation fails -} - -TEST(BasicEndToEndTest, testMultipleClientsMultipleSubscriptions) { - std::string topicName = "testMultipleClientsMultipleSubscriptions"; - Client client1(lookupUrl); - Client client2(lookupUrl); - - Producer producer1; - Result result = client1.createProducer(topicName, producer1); - ASSERT_EQ(ResultOk, result); - - Consumer consumer1; - result = client1.subscribe(topicName, "my-sub-name", consumer1); - ASSERT_EQ(ResultOk, result); - - Consumer consumer2; - result = client2.subscribe(topicName, "my-sub-name", consumer2); - ASSERT_EQ(ResultConsumerBusy, result); - - ASSERT_EQ(ResultOk, producer1.close()); - ASSERT_EQ(ResultOk, consumer1.close()); - ASSERT_EQ(ResultAlreadyClosed, consumer1.close()); - ASSERT_EQ(ResultConsumerNotInitialized, consumer2.close()); - ASSERT_EQ(ResultOk, client1.close()); - - // 2 seconds - std::this_thread::sleep_for(std::chrono::microseconds(2 * 1000 * 1000)); - - ASSERT_EQ(ResultOk, client2.close()); -} - -TEST(BasicEndToEndTest, testProduceAndConsumeAfterClientClose) { - std::string topicName = "testProduceAndConsumeAfterClientClose"; - Client client(lookupUrl); - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - result = client.subscribe(topicName, "my-sub-name", consumer); - - // Clean dangling subscription - consumer.unsubscribe(); - result = client.subscribe(topicName, "my-sub-name", consumer); - - ASSERT_EQ(ResultOk, result); - - // Send 10 messages synchronously - std::string msgContent = "msg-content"; - LOG_INFO("Publishing 10 messages synchronously"); - int numMsg = 0; - for (; numMsg < 10; numMsg++) { - Message msg = - MessageBuilder().setContent(msgContent).setProperty("msgIndex", std::to_string(numMsg)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - LOG_INFO("Trying to receive 10 messages"); - Message msgReceived; - for (int i = 0; i < 10; i++) { - consumer.receive(msgReceived, 1000); - LOG_DEBUG("Received message :" << msgReceived.getMessageId()); - ASSERT_EQ(msgContent, msgReceived.getDataAsString()); - ASSERT_EQ(std::to_string(i), msgReceived.getProperty("msgIndex")); - ASSERT_EQ(ResultOk, consumer.acknowledgeCumulative(msgReceived)); - } - - LOG_INFO("Closing client"); - ASSERT_EQ(ResultOk, client.close()); - - LOG_INFO("Trying to publish a message after closing the client"); - Message msg = - MessageBuilder().setContent(msgContent).setProperty("msgIndex", std::to_string(numMsg)).build(); - - ASSERT_EQ(ResultAlreadyClosed, producer.send(msg)); - - LOG_INFO("Trying to consume a message after closing the client"); - ASSERT_EQ(ResultAlreadyClosed, consumer.receive(msgReceived)); -} - -TEST(BasicEndToEndTest, testIamSoFancyCharactersInTopicName) { - Client client(lookupUrl); - Producer producer; - Result result = client.createProducer("persistent://public/default/topic@%*)(&!%$#@#$> nameSpaceName = NamespaceName::get("property", "bf1", "nameSpace"); - ASSERT_STREQ(nameSpaceName->getCluster().c_str(), "bf1"); - ASSERT_STREQ(nameSpaceName->getLocalName().c_str(), "nameSpace"); - ASSERT_STREQ(nameSpaceName->getProperty().c_str(), "property"); -} - -TEST(BasicEndToEndTest, testConsumerClose) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "testConsumerClose"; - std::string subName = "my-sub-name"; - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - ASSERT_EQ(consumer.close(), ResultOk); - ASSERT_EQ(consumer.close(), ResultAlreadyClosed); -} - -TEST(BasicEndToEndTest, testDuplicateConsumerCreationOnPartitionedTopic) { - Client client(lookupUrl); - std::string topicName = "partition-testDuplicateConsumerCreationOnPartitionedTopic"; - - // call admin api to make it partitioned - std::string url = - adminUrl + - "admin/v2/persistent/public/default/testDuplicateConsumerCreationOnPartitionedTopic/partitions"; - int res = makePutRequest(url, "5"); - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - std::this_thread::sleep_for(std::chrono::microseconds(2 * 1000 * 1000)); - - Producer producer; - ProducerConfiguration producerConfiguration; - producerConfiguration.setPartitionsRoutingMode(ProducerConfiguration::CustomPartition); - producerConfiguration.setMessageRouter(std::make_shared()); - - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - for (int i = 0; i < 10; i++) { - boost::posix_time::ptime t(boost::posix_time::microsec_clock::universal_time()); - long nanoSeconds = t.time_of_day().total_nanoseconds(); - std::stringstream ss; - ss << nanoSeconds; - Message msg = MessageBuilder().setContent(ss.str()).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - LOG_INFO("Creating Subscriber"); - std::string consumerId = "CONSUMER"; - ConsumerConfiguration tempConsConfig; - tempConsConfig.setConsumerType(ConsumerExclusive); - ConsumerConfiguration consConfig = tempConsConfig; - ASSERT_EQ(consConfig.getConsumerType(), ConsumerExclusive); - Consumer consumer; - Result subscribeResult = client.subscribe(topicName, consumerId, consConfig, consumer); - ASSERT_EQ(ResultOk, subscribeResult); - - LOG_INFO("Creating Another Subscriber"); - Consumer consumer2; - ASSERT_EQ(consumer2.getSubscriptionName(), ""); - subscribeResult = client.subscribe(topicName, consumerId, consConfig, consumer2); - ASSERT_EQ(ResultConsumerBusy, subscribeResult); - consumer.close(); - producer.close(); -} - -TEST(BasicEndToEndTest, testRoundRobinRoutingPolicy) { - Client client(lookupUrl); - std::string topicName = "persistent://public/default/partition-testRoundRobinRoutingPolicy"; - // call admin api to make it partitioned - std::string url = - adminUrl + "admin/v2/persistent/public/default/partition-testRoundRobinRoutingPolicy/partitions"; - int res = makePutRequest(url, "5"); - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - Producer producer; - ProducerConfiguration tempProducerConfiguration; - tempProducerConfiguration.setPartitionsRoutingMode(ProducerConfiguration::RoundRobinDistribution); - tempProducerConfiguration.setMessageRouter(std::make_shared()); - ProducerConfiguration producerConfiguration = tempProducerConfiguration; - Result result = client.createProducer(topicName, producerConfiguration, producer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(producer.getTopic(), topicName); - - // Topic is partitioned into 5 partitions so each partition will receive two messages - LOG_INFO("Creating Subscriber"); - std::string consumerId = "CONSUMER"; - ConsumerConfiguration consConfig; - consConfig.setConsumerType(ConsumerExclusive); - consConfig.setReceiverQueueSize(2); - ASSERT_FALSE(consConfig.hasMessageListener()); - Consumer consumer[5]; - Result subscribeResult; - for (int i = 0; i < 5; i++) { - std::stringstream partitionedTopicName; - partitionedTopicName << topicName << "-partition-" << i; - - std::stringstream partitionedConsumerId; - partitionedConsumerId << consumerId << i; - subscribeResult = client.subscribe(partitionedTopicName.str(), partitionedConsumerId.str(), - consConfig, consumer[i]); - - ASSERT_EQ(ResultOk, subscribeResult); - ASSERT_EQ(consumer[i].getTopic(), partitionedTopicName.str()); - } - - for (int i = 0; i < 10; i++) { - boost::posix_time::ptime t(boost::posix_time::microsec_clock::universal_time()); - long nanoSeconds = t.time_of_day().total_nanoseconds(); - std::stringstream ss; - ss << nanoSeconds; - Message msg = MessageBuilder().setContent(ss.str()).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - Message m; - for (int i = 0; i < 2; i++) { - for (int partitionIndex = 0; partitionIndex < 5; partitionIndex++) { - ASSERT_EQ(ResultOk, consumer[partitionIndex].receive(m)); - ASSERT_EQ(ResultOk, consumer[partitionIndex].acknowledge(m)); - } - } - - for (int partitionIndex = 0; partitionIndex < 5; partitionIndex++) { - consumer[partitionIndex].close(); - } - producer.close(); - client.shutdown(); -} - -TEST(BasicEndToEndTest, testMessageListener) { - Client client(lookupUrl); - std::string topicName = "partition-testMessageListener"; - // call admin api to make it partitioned - std::string url = - adminUrl + "admin/v2/persistent/public/default/partition-testMessageListener/partitions"; - int res = makePutRequest(url, "5"); - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - Producer producer; - ProducerConfiguration producerConfiguration; - producerConfiguration.setPartitionsRoutingMode(ProducerConfiguration::UseSinglePartition); - Result result = client.createProducer(topicName, producerConfiguration, producer); - - // Initializing global Count - globalCount = 0; - - ConsumerConfiguration consumerConfig; - consumerConfig.setMessageListener( - std::bind(messageListenerFunction, std::placeholders::_1, std::placeholders::_2)); - Consumer consumer; - result = client.subscribe(topicName, "subscription-A", consumerConfig, consumer); - - ASSERT_EQ(ResultOk, result); - for (int i = 0; i < 10; i++) { - boost::posix_time::ptime t(boost::posix_time::microsec_clock::universal_time()); - long nanoSeconds = t.time_of_day().total_nanoseconds(); - std::stringstream ss; - ss << nanoSeconds; - Message msg = MessageBuilder().setContent(ss.str()).setPartitionKey(ss.str()).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - // Sleeping for 5 seconds - std::this_thread::sleep_for(std::chrono::microseconds(5 * 1000 * 1000)); - ASSERT_EQ(globalCount, 10); - consumer.close(); - producer.close(); - client.close(); -} - -TEST(BasicEndToEndTest, testMessageListenerPause) { - Client client(lookupUrl); - std::string topicName = "partition-testMessageListenerPause"; - - // call admin api to make it partitioned - std::string url = - adminUrl + "admin/v2/persistent/public/default/partition-testMessageListener-pauses/partitions"; - int res = makePutRequest(url, "5"); - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - Producer producer; - ProducerConfiguration producerConfiguration; - producerConfiguration.setPartitionsRoutingMode(ProducerConfiguration::UseSinglePartition); - Result result = client.createProducer(topicName, producerConfiguration, producer); - - // Initializing global Count - globalCount = 0; - - ConsumerConfiguration consumerConfig; - consumerConfig.setMessageListener( - std::bind(messageListenerFunction, std::placeholders::_1, std::placeholders::_2)); - Consumer consumer; - // Removing dangling subscription from previous test failures - result = client.subscribe(topicName, "subscription-name", consumerConfig, consumer); - consumer.unsubscribe(); - - result = client.subscribe(topicName, "subscription-name", consumerConfig, consumer); - ASSERT_EQ(ResultOk, result); - int temp = 1000; - for (int i = 0; i < 10000; i++) { - if (i && i % 1000 == 0) { - std::this_thread::sleep_for(std::chrono::microseconds(2 * 1000 * 1000)); - ASSERT_EQ(globalCount, temp); - consumer.resumeMessageListener(); - std::this_thread::sleep_for(std::chrono::microseconds(2 * 1000 * 1000)); - ASSERT_EQ(globalCount, i); - temp = globalCount; - consumer.pauseMessageListener(); - } - Message msg = MessageBuilder().build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - ASSERT_EQ(globalCount, temp); - consumer.resumeMessageListener(); - // Sleeping for 2 seconds - std::this_thread::sleep_for(std::chrono::microseconds(2 * 1000 * 1000)); - - ASSERT_EQ(globalCount, 10000); - consumer.close(); - producer.close(); - client.close(); -} - -TEST(BasicEndToEndTest, testResendViaSendCallback) { - ClientConfiguration clientConfiguration; - clientConfiguration.setIOThreads(1); - Client client(lookupUrl, clientConfiguration); - std::string topicName = "testResendViaListener"; - - Producer producer; - - Promise producerPromise; - ProducerConfiguration producerConfiguration; - - // Setting timeout of 1 ms - producerConfiguration.setSendTimeout(1); - client.createProducerAsync(topicName, producerConfiguration, - WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - // Send asynchronously for 3 seconds - // Expect timeouts since we have set timeout to 1 ms - // On receiving timeout send the message using the Pulsar client IO thread via cb function. - for (int i = 0; i < 10000; i++) { - producer.sendAsync(MessageBuilder().build(), - std::bind(resendMessage, std::placeholders::_1, std::placeholders::_2, producer)); - } - // 3 seconds - std::this_thread::sleep_for(std::chrono::microseconds(3 * 1000 * 1000)); - producer.close(); - Lock lock(mutex_); - ASSERT_GE(globalResendMessageCount, 3); -} - -TEST(BasicEndToEndTest, testStatsLatencies) { - ClientConfiguration config; - config.setIOThreads(1); - config.setMessageListenerThreads(1); - config.setStatsIntervalInSeconds(5); - Client client(lookupUrl, config); - std::string topicName = "persistent://public/default/testStatsLatencies"; - std::string subName = "subscription-name"; - Producer producer; - - // Start Producer and Consumer - int numOfMessages = 1000; - - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - result = client.subscribe(topicName, subName, consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.unsubscribe(); - client.subscribe(topicName, subName, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - ProducerStatsImplPtr producerStatsImplPtr = PulsarFriend::getProducerStatsPtr(producer); - - // Send Asynchronously - std::string prefix = "msg-stats-"; - int count = 0; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, std::bind(&sendCallBackWithDelay, std::placeholders::_1, - std::placeholders::_2, prefix, 15, 2 * 1e3, &count)); - LOG_DEBUG("sending message " << messageContent); - } - - // Wait for all messages to be acked by broker - while (PulsarFriend::sum(producerStatsImplPtr->getTotalSendMap()) < numOfMessages) { - std::this_thread::sleep_for(std::chrono::microseconds(1000)); // 1 ms - } - - // Get latencies - LatencyAccumulator totalLatencyAccumulator = producerStatsImplPtr->getTotalLatencyAccumulator(); - boost::accumulators::detail::extractor_result< - LatencyAccumulator, boost::accumulators::tag::extended_p_square>::type totalLatencies = - boost::accumulators::extended_p_square(totalLatencyAccumulator); - - LatencyAccumulator latencyAccumulator = producerStatsImplPtr->getLatencyAccumulator(); - boost::accumulators::detail::extractor_result< - LatencyAccumulator, boost::accumulators::tag::extended_p_square>::type latencies = - boost::accumulators::extended_p_square(latencyAccumulator); - - // Since 15% of the messages have a delay of - ASSERT_EQ((uint64_t)latencies[1], (uint64_t)totalLatencies[1]); - ASSERT_EQ((uint64_t)latencies[2], (uint64_t)totalLatencies[2]); - ASSERT_EQ((uint64_t)latencies[3], (uint64_t)totalLatencies[3]); - - ASSERT_GE((uint64_t)latencies[1], 20 * 100); - ASSERT_GE((uint64_t)latencies[2], 20 * 100); - ASSERT_GE((uint64_t)latencies[3], 20 * 100); - - ASSERT_GE((uint64_t)totalLatencies[1], 20 * 100); - ASSERT_GE((uint64_t)totalLatencies[2], 20 * 100); - ASSERT_GE((uint64_t)totalLatencies[3], 20 * 100); - - while (producerStatsImplPtr->getNumMsgsSent() != 0) { - std::this_thread::sleep_for(std::chrono::seconds(1)); // wait till stats flush - } - - std::this_thread::sleep_for(std::chrono::seconds(1)); // 1 second - - latencyAccumulator = producerStatsImplPtr->getLatencyAccumulator(); - latencies = boost::accumulators::extended_p_square(latencyAccumulator); - - totalLatencyAccumulator = producerStatsImplPtr->getTotalLatencyAccumulator(); - totalLatencies = boost::accumulators::extended_p_square(totalLatencyAccumulator); - - ASSERT_NE((uint64_t)latencies[1], (uint64_t)totalLatencies[1]); - ASSERT_NE((uint64_t)latencies[2], (uint64_t)totalLatencies[2]); - ASSERT_NE((uint64_t)latencies[3], (uint64_t)totalLatencies[3]); - - ASSERT_EQ((uint64_t)latencies[1], 0); - ASSERT_EQ((uint64_t)latencies[2], 0); - ASSERT_EQ((uint64_t)latencies[3], 0); - - ASSERT_GE((uint64_t)totalLatencies[1], 20 * 1000); - ASSERT_GE((uint64_t)totalLatencies[2], 20 * 1000); - ASSERT_GE((uint64_t)totalLatencies[3], 20 * 1000); - - Message receivedMsg; - int i = 0; - ConsumerStatsImplPtr consumerStatsImplPtr = PulsarFriend::getConsumerStatsPtr(consumer); - - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getTotalReceivedMsgMap()), i); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getTotalAckedMsgMap()), i - 1); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getTotalAckedMsgMap()), i); - } - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); -} - -TEST(BasicEndToEndTest, testProduceMessageSize) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "testProduceMessageSize"; - std::string subName = "my-sub-name"; - Producer producer1; - Producer producer2; - - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer1); - ASSERT_EQ(ResultOk, result); - - Promise producerPromise2; - ProducerConfiguration conf; - conf.setCompressionType(CompressionLZ4); - conf.setBatchingEnabled(false); - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise2)); - producerFuture = producerPromise2.getFuture(); - result = producerFuture.get(producer2); - ASSERT_EQ(ResultOk, result); - - int size = ClientConnection::getMaxMessageSize() + 1000 * 100; - char *content = new char[size]; - memset(content, 0, size); - Message msg = MessageBuilder().setAllocatedContent(content, size).build(); - result = producer1.send(msg); - ASSERT_EQ(ResultMessageTooBig, result); - - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - msg = MessageBuilder().setAllocatedContent(content, size).build(); - result = producer2.send(msg); - ASSERT_EQ(ResultOk, result); - - Message receivedMsg; - consumer.receive(receivedMsg); - ASSERT_EQ(size, receivedMsg.getDataAsString().length()); - - producer1.closeAsync(0); - producer2.closeAsync(0); - consumer.close(); - client.close(); - - delete[] content; -} - -TEST(BasicEndToEndTest, testBigMessageSizeBatching) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "testBigMessageSizeBatching"; - std::string subName = "my-sub-name"; - - ProducerConfiguration conf1; - conf1.setCompressionType(CompressionNone); - conf1.setBatchingEnabled(true); - - Producer producer1; - Result result = client.createProducer(topicName, conf1, producer1); - ASSERT_EQ(ResultOk, result); - - ProducerConfiguration conf2; - conf2.setCompressionType(CompressionLZ4); - conf2.setBatchingEnabled(true); - - Producer producer2; - result = client.createProducer(topicName, conf2, producer2); - ASSERT_EQ(ResultOk, result); - - int size = ClientConnection::getMaxMessageSize() + 1000 * 100; - char *content = new char[size]; - memset(content, 0, size); - Message msg = MessageBuilder().setAllocatedContent(content, size).build(); - result = producer1.send(msg); - ASSERT_EQ(ResultMessageTooBig, result); - - msg = MessageBuilder().setAllocatedContent(content, size).build(); - result = producer2.send(msg); - ASSERT_EQ(ResultOk, result); - - producer1.close(); - producer2.close(); - client.close(); - - delete[] content; -} - -TEST(BasicEndToEndTest, testHandlerReconnectionLogic) { - Client client(adminUrl); - std::string topicName = "testHandlerReconnectionLogic"; - - Producer producer; - Consumer consumer; - - ASSERT_EQ(client.subscribe(topicName, "my-sub", consumer), ResultOk); - ASSERT_EQ(client.createProducer(topicName, producer), ResultOk); - - std::vector oldConnections; - - int numOfMessages = 10; - std::string propertyName = "msgIndex"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = "msg-" + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty(propertyName, std::to_string(i)).build(); - if (i % 3 == 1) { - ProducerImpl &pImpl = PulsarFriend::getProducerImpl(producer); - ClientConnectionPtr clientConnectionPtr; - do { - ClientConnectionWeakPtr clientConnectionWeakPtr = PulsarFriend::getClientConnection(pImpl); - clientConnectionPtr = clientConnectionWeakPtr.lock(); - std::this_thread::sleep_for(std::chrono::seconds(1)); - } while (!clientConnectionPtr); - oldConnections.push_back(clientConnectionPtr); - clientConnectionPtr->close(); - } - LOG_INFO("checking message " << i); - ASSERT_EQ(producer.send(msg), ResultOk); - } - - std::set receivedMsgContent; - std::set receivedMsgIndex; - - Message msg; - while (consumer.receive(msg, 30000) == ResultOk) { - receivedMsgContent.insert(msg.getDataAsString()); - receivedMsgIndex.insert(msg.getProperty(propertyName)); - } - - ConsumerImpl &cImpl = PulsarFriend::getConsumerImpl(consumer); - ClientConnectionWeakPtr clientConnectionWeakPtr = PulsarFriend::getClientConnection(cImpl); - ClientConnectionPtr clientConnectionPtr = clientConnectionWeakPtr.lock(); - oldConnections.push_back(clientConnectionPtr); - clientConnectionPtr->close(); - - while (consumer.receive(msg, 30000) == ResultOk) { - consumer.acknowledge(msg); - receivedMsgContent.insert(msg.getDataAsString()); - receivedMsgIndex.insert(msg.getProperty(propertyName)); - } - - ASSERT_EQ(receivedMsgContent.size(), 10); - ASSERT_EQ(receivedMsgIndex.size(), 10); - - for (int i = 0; i < numOfMessages; i++) { - ASSERT_TRUE(receivedMsgContent.find("msg-" + std::to_string(i)) != receivedMsgContent.end()); - ASSERT_TRUE(receivedMsgIndex.find(std::to_string(i)) != receivedMsgIndex.end()); - } -} - -void testHandlerReconnectionPartitionProducers(bool lazyStartPartitionedProducers, bool batchingEnabled) { - Client client(adminUrl); - std::string uniqueChunk = unique_str(); - std::string topicName = "testHandlerReconnectionLogicLazyProducers" + uniqueChunk; - - std::string url = adminUrl + "admin/v2/persistent/public/default/" + topicName + "/partitions"; - int res = makePutRequest(url, "1"); - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - ProducerConfiguration producerConf; - producerConf.setSendTimeout(10000); - producerConf.setLazyStartPartitionedProducers(lazyStartPartitionedProducers); - producerConf.setBatchingEnabled(batchingEnabled); - Producer producer; - - ASSERT_EQ(client.createProducer(topicName, producerConf, producer), ResultOk); - - std::vector oldConnections; - - int numOfMessages = 10; - std::string propertyName = "msgIndex"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = "msg-" + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty(propertyName, std::to_string(i)).build(); - if (i % 3 == 1) { - ProducerImpl &pImpl = PulsarFriend::getInternalProducerImpl(producer, 0); - ClientConnectionPtr clientConnectionPtr; - do { - ClientConnectionWeakPtr clientConnectionWeakPtr = PulsarFriend::getClientConnection(pImpl); - clientConnectionPtr = clientConnectionWeakPtr.lock(); - std::this_thread::sleep_for(std::chrono::seconds(1)); - } while (!clientConnectionPtr); - oldConnections.push_back(clientConnectionPtr); - clientConnectionPtr->close(); - } - ASSERT_EQ(producer.send(msg), ResultOk); - } -} - -TEST(BasicEndToEndTest, testHandlerReconnectionPartitionedProducersWithoutBatching) { - testHandlerReconnectionPartitionProducers(false, false); -} - -TEST(BasicEndToEndTest, testHandlerReconnectionPartitionedProducersWithBatching) { - testHandlerReconnectionPartitionProducers(false, true); -} - -TEST(BasicEndToEndTest, testHandlerReconnectionLazyPartitionedProducersWithoutBatching) { - testHandlerReconnectionPartitionProducers(true, false); -} - -TEST(BasicEndToEndTest, testHandlerReconnectionLazyPartitionedProducersWithBatching) { - testHandlerReconnectionPartitionProducers(true, true); -} - -TEST(BasicEndToEndTest, testRSAEncryption) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicNames[] = {"my-rsaenctopic", "persistent://public/default-4/my-rsaenctopic"}; - std::string subName = "my-sub-name"; - Producer producer; - - std::string PUBLIC_CERT_FILE_PATH = - "../../pulsar-broker/src/test/resources/certificate/public-key.client-rsa.pem"; - - std::string PRIVATE_CERT_FILE_PATH = - "../../pulsar-broker/src/test/resources/certificate/private-key.client-rsa.pem"; - - std::shared_ptr keyReader = - std::make_shared(PUBLIC_CERT_FILE_PATH, PRIVATE_CERT_FILE_PATH); - ProducerConfiguration conf; - conf.setCompressionType(CompressionLZ4); - conf.addEncryptionKey("client-rsa.pem"); - conf.setCryptoKeyReader(keyReader); - - for (const auto &topicName : topicNames) { - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - ConsumerConfiguration consConfig; - consConfig.setCryptoKeyReader(keyReader); - // consConfig.setCryptoFailureAction(ConsumerCryptoFailureAction::CONSUME); - - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // Send 1000 messages synchronously - std::string msgContent = "msg-content"; - LOG_INFO("Publishing 1000 messages synchronously"); - int msgNum = 0; - for (; msgNum < 1000; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - LOG_INFO("Trying to receive 1000 messages"); - Message msgReceived; - for (msgNum = 0; msgNum < 1000; msgNum++) { - consumer.receive(msgReceived, 1000); - LOG_DEBUG("Received message :" << msgReceived.getMessageId()); - std::stringstream expected; - expected << msgContent << msgNum; - ASSERT_EQ(expected.str(), msgReceived.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(msgReceived)); - } - - ASSERT_EQ(ResultOk, consumer.unsubscribe()); - ASSERT_EQ(ResultAlreadyClosed, consumer.close()); - ASSERT_EQ(ResultOk, producer.close()); - } - ASSERT_EQ(ResultOk, client.close()); -} - -TEST(BasicEndToEndTest, testEncryptionFailure) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "my-rsaencfailtopic"; - std::string subName = "my-sub-name"; - Producer producer; - - std::string PUBLIC_CERT_FILE_PATH = - "../../pulsar-broker/src/test/resources/certificate/public-key.client-rsa-test.pem"; - - std::string PRIVATE_CERT_FILE_PATH = - "../../pulsar-broker/src/test/resources/certificate/private-key.client-rsa-test.pem"; - - std::shared_ptr keyReader = - std::make_shared(PUBLIC_CERT_FILE_PATH, PRIVATE_CERT_FILE_PATH); - - ConsumerConfiguration consConfig; - - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consConfig, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - Result result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - std::string msgContent = "msg-content"; - int msgNum = 0; - int totalMsgs = 10; - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(msgContent).build(); - - // 1. Non existing key - - { - ProducerConfiguration prodConf; - prodConf.setCryptoKeyReader(keyReader); - prodConf.setBatchingEnabled(false); - prodConf.addEncryptionKey("client-non-existing-rsa.pem"); - - Promise producerPromise; - client.createProducerAsync(topicName, prodConf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - ASSERT_EQ(ResultCryptoError, producer.send(msg)); - } - - // 2. Add valid key - { - PUBLIC_CERT_FILE_PATH = - "../../pulsar-broker/src/test/resources/certificate/public-key.client-rsa.pem"; - - PRIVATE_CERT_FILE_PATH = - "../../pulsar-broker/src/test/resources/certificate/private-key.client-rsa.pem"; - keyReader = - std::make_shared(PUBLIC_CERT_FILE_PATH, PRIVATE_CERT_FILE_PATH); - ProducerConfiguration prodConf; - prodConf.setCryptoKeyReader(keyReader); - prodConf.setBatchingEnabled(false); - prodConf.addEncryptionKey("client-rsa.pem"); - - Promise producerPromise; - client.createProducerAsync(topicName, prodConf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - msgNum++; - for (; msgNum < totalMsgs; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - } - - // 3. Key reader is not set by consumer - Message msgReceived; - ASSERT_EQ(ResultTimeout, consumer.receive(msgReceived, 5000)); - ASSERT_EQ(ResultOk, consumer.close()); - - // 4. Set consumer config to consume even if decryption fails - consConfig.setCryptoFailureAction(ConsumerCryptoFailureAction::CONSUME); - - Promise consumerPromise2; - client.subscribeAsync(topicName, subName, consConfig, WaitForCallbackValue(consumerPromise2)); - consumerFuture = consumerPromise2.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, consumer.receive(msgReceived, 1000)); - - // Received message 0. Skip message comparision since its encrypted - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(ResultOk, consumer.close()); - - // 5. Set valid keyreader and consume messages - msgNum = 1; - consConfig.setCryptoKeyReader(keyReader); - consConfig.setCryptoFailureAction(ConsumerCryptoFailureAction::FAIL); - Promise consumerPromise3; - client.subscribeAsync(topicName, subName, consConfig, WaitForCallbackValue(consumerPromise3)); - consumerFuture = consumerPromise3.getFuture(); - result = consumerFuture.get(consumer); - - for (; msgNum < totalMsgs - 1; msgNum++) { - ASSERT_EQ(ResultOk, consumer.receive(msgReceived, 1000)); - LOG_DEBUG("Received message :" << msgReceived.getMessageId()); - std::stringstream expected; - expected << msgContent << msgNum; - ASSERT_EQ(expected.str(), msgReceived.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(msgReceived)); - } - ASSERT_EQ(ResultOk, consumer.close()); - - // 6. Discard message if decryption fails - ConsumerConfiguration consConfig2; - consConfig2.setCryptoFailureAction(ConsumerCryptoFailureAction::DISCARD); - - Promise consumerPromise4; - client.subscribeAsync(topicName, subName, consConfig2, WaitForCallbackValue(consumerPromise4)); - consumerFuture = consumerPromise4.getFuture(); - result = consumerFuture.get(consumer); - - // Since messag is discarded, no message will be received. - ASSERT_EQ(ResultTimeout, consumer.receive(msgReceived, 5000)); -} - -TEST(BasicEndToEndTest, testEventTime) { - ClientConfiguration config; - Client client(lookupUrl, config); - std::string topicName = "test-event-time"; - Producer producer; - ProducerConfiguration producerConf; - producerConf.setBatchingEnabled(true); - Result result = client.createProducer(topicName, producerConf, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - result = client.subscribe(topicName, "sub", consumer); - ASSERT_EQ(ResultOk, result); - - producer.send(MessageBuilder().setContent("test").setEventTimestamp(5).build()); - - Message msg; - result = consumer.receive(msg); - ASSERT_EQ(ResultOk, result); - - ASSERT_EQ(msg.getEventTimestamp(), 5); - - consumer.close(); - producer.close(); -} - -TEST(BasicEndToEndTest, testSeek) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "persistent://public/default/testSeek"; - std::string subName = "sub-testSeek"; - Producer producer; - - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consConfig; - consConfig.setReceiverQueueSize(1); - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consConfig, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - // Send 1000 messages synchronously - std::string msgContent = "msg-content"; - LOG_INFO("Publishing 100 messages synchronously"); - int msgNum = 0; - for (; msgNum < 100; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - LOG_INFO("Trying to receive 100 messages"); - Message msgReceived; - for (msgNum = 0; msgNum < 100; msgNum++) { - consumer.receive(msgReceived, 3000); - LOG_DEBUG("Received message :" << msgReceived.getMessageId()); - std::stringstream expected; - expected << msgContent << msgNum; - ASSERT_EQ(expected.str(), msgReceived.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(msgReceived)); - } - - // seek to earliest, expected receive first message. - result = consumer.seek(MessageId::earliest()); - // Sleeping for 500ms to wait for consumer re-connect - std::this_thread::sleep_for(std::chrono::microseconds(500 * 1000)); - - ASSERT_EQ(ResultOk, result); - consumer.receive(msgReceived, 3000); - LOG_ERROR("Received message :" << msgReceived.getMessageId()); - std::stringstream expected; - msgNum = 0; - expected << msgContent << msgNum; - ASSERT_EQ(expected.str(), msgReceived.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(msgReceived)); - ASSERT_EQ(ResultOk, consumer.unsubscribe()); - ASSERT_EQ(ResultAlreadyClosed, consumer.close()); - ASSERT_EQ(ResultOk, producer.close()); - ASSERT_EQ(ResultOk, client.close()); -} - -TEST(BasicEndToEndTest, testSeekOnPartitionedTopic) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "persistent://public/default/testSeekOnPartitionedTopic"; - - std::string url = - adminUrl + "admin/v2/persistent/public/default/testSeekOnPartitionedTopic" + "/partitions"; - int res = makePutRequest(url, "3"); - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - std::string subName = "sub-testSeekOnPartitionedTopic"; - Producer producer; - - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consConfig; - consConfig.setReceiverQueueSize(1); - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consConfig, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - uint64_t timestampMillis = TimeUtils::currentTimeMillis(); - - // Send 100 messages synchronously - std::string msgContent = "msg-content"; - LOG_INFO("Publishing 100 messages synchronously"); - int msgNum = 0; - for (; msgNum < 100; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - LOG_INFO("Trying to receive 100 messages"); - Message msgReceived; - for (msgNum = 0; msgNum < 100; msgNum++) { - consumer.receive(msgReceived, 3000); - LOG_DEBUG("Received message :" << msgReceived.getMessageId()); - std::stringstream expected; - expected << msgContent << msgNum; - ASSERT_EQ(expected.str(), msgReceived.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(msgReceived)); - } - - // seek to the time before sending messages, expected receive first message. - result = consumer.seek(timestampMillis); - // Sleeping for 500ms to wait for consumer re-connect - std::this_thread::sleep_for(std::chrono::microseconds(500 * 1000)); - - ASSERT_EQ(ResultOk, result); - consumer.receive(msgReceived, 3000); - LOG_ERROR("Received message :" << msgReceived.getMessageId()); - std::stringstream expected; - msgNum = 0; - expected << msgContent << msgNum; - ASSERT_EQ(expected.str(), msgReceived.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(msgReceived)); - ASSERT_EQ(ResultOk, consumer.unsubscribe()); - ASSERT_EQ(ResultAlreadyClosed, consumer.close()); - ASSERT_EQ(ResultOk, producer.close()); - ASSERT_EQ(ResultOk, client.close()); -} - -TEST(BasicEndToEndTest, testUnAckedMessageTimeout) { - Client client(lookupUrl); - std::string topicName = "testUnAckedMessageTimeout"; - std::string subName = "my-sub-name"; - std::string content = "msg-content"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consConfig; - consConfig.setUnAckedMessagesTimeoutMs(10 * 1000); - result = client.subscribe(topicName, subName, consConfig, consumer); - ASSERT_EQ(ResultOk, result); - - Message msg = MessageBuilder().setContent(content).build(); - result = producer.send(msg); - ASSERT_EQ(ResultOk, result); - - Message receivedMsg1; - MessageId msgId1; - consumer.receive(receivedMsg1); - msgId1 = receivedMsg1.getMessageId(); - ASSERT_EQ(content, receivedMsg1.getDataAsString()); - - Message receivedMsg2; - MessageId msgId2; - consumer.receive(receivedMsg2, 30 * 1000); - msgId2 = receivedMsg2.getMessageId(); - ASSERT_EQ(content, receivedMsg2.getDataAsString()); - - ASSERT_EQ(msgId1, msgId2); - - consumer.unsubscribe(); - consumer.close(); - producer.close(); - client.close(); -} - -static long messagesReceived = 0; - -static void unackMessageListenerFunction(Consumer consumer, const Message &msg) { messagesReceived++; } - -TEST(BasicEndToEndTest, testPartitionTopicUnAckedMessageTimeout) { - Client client(lookupUrl); - long unAckedMessagesTimeoutMs = 10000; - - std::string uniqueChunk = unique_str(); - std::string topicName = - "persistent://public/default/testPartitionTopicUnAckedMessageTimeout" + uniqueChunk; - - // call admin api to make it partitioned - std::string url = adminUrl + - "admin/v2/persistent/public/default/testPartitionTopicUnAckedMessageTimeout" + - uniqueChunk + "/partitions"; - int res = makePutRequest(url, "3"); - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - std::string subName = "my-sub-name"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consConfig; - consConfig.setMessageListener( - std::bind(unackMessageListenerFunction, std::placeholders::_1, std::placeholders::_2)); - consConfig.setUnAckedMessagesTimeoutMs(unAckedMessagesTimeoutMs); - result = client.subscribe(topicName, subName, consConfig, consumer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - for (int i = 0; i < 10; i++) { - Message msg = MessageBuilder().setContent("test-" + std::to_string(i)).build(); - producer.sendAsync(msg, nullptr); - } - - producer.flush(); - long timeWaited = 0; - while (true) { - // maximum wait time - ASSERT_LE(timeWaited, unAckedMessagesTimeoutMs * 3); - if (messagesReceived >= 10 * 2) { - break; - } - std::this_thread::sleep_for(std::chrono::milliseconds(500)); - timeWaited += 500; - } - - client.close(); -} - -TEST(BasicEndToEndTest, testUnAckedMessageTimeoutListener) { - Client client(lookupUrl); - std::string topicName = "testUnAckedMessageTimeoutListener"; - std::string subName = "my-sub-name"; - std::string content = "msg-content"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consConfig; - consConfig.setUnAckedMessagesTimeoutMs(10 * 1000); - Latch latch(2); - consConfig.setMessageListener(std::bind(messageListenerFunctionWithoutAck, std::placeholders::_1, - std::placeholders::_2, latch, content)); - result = client.subscribe(topicName, subName, consConfig, consumer); - ASSERT_EQ(ResultOk, result); - - globalCount = 0; - - Message msg = MessageBuilder().setContent(content).build(); - result = producer.send(msg); - ASSERT_EQ(ResultOk, result); - - ASSERT_TRUE(latch.wait(std::chrono::seconds(30))); - ASSERT_GE(globalCount, 2); - - consumer.unsubscribe(); - consumer.close(); - producer.close(); - client.close(); -} - -TEST(BasicEndToEndTest, testMultiTopicsConsumerTopicNameInvalid) { - Client client(lookupUrl); - std::vector topicNames; - topicNames.reserve(3); - std::string subName = "testMultiTopicsTopicNameInvalid"; - // cluster empty - std::string topicName1 = "persistent://tenant/testMultiTopicsTopicNameInvalid"; - - // empty topics - ASSERT_EQ(0, topicNames.size()); - ConsumerConfiguration consConfig; - consConfig.setConsumerType(ConsumerShared); - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicNames, subName, consConfig, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - Result result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - LOG_INFO("subscribe on empty topics"); - consumer.close(); - - // Invalid topic names - Consumer consumer1; - std::string subName1 = "testMultiTopicsTopicNameInvalid"; - topicNames.push_back(topicName1); - Promise consumerPromise1; - client.subscribeAsync(topicNames, subName1, consConfig, WaitForCallbackValue(consumerPromise1)); - Future consumerFuture1 = consumerPromise1.getFuture(); - result = consumerFuture1.get(consumer1); - ASSERT_EQ(ResultInvalidTopicName, result); - LOG_INFO("subscribe on TopicName1 failed"); - consumer1.close(); - - client.shutdown(); -} - -TEST(BasicEndToEndTest, testMultiTopicsConsumerConnectError) { - Client client("pulsar://invalid-hostname:6650"); - std::vector topicNames; - topicNames.push_back("topic-1"); - topicNames.push_back("topic-2"); - - Consumer consumer; - Result res = client.subscribe(topicNames, "sub", consumer); - ASSERT_EQ(ResultConnectError, res); - - client.shutdown(); -} - -TEST(BasicEndToEndTest, testMultiTopicsConsumerDifferentNamespace) { - Client client(lookupUrl); - std::vector topicNames; - topicNames.reserve(3); - std::string subName = "testMultiTopicsDifferentNamespace"; - std::string topicName1 = "persistent://public/default/testMultiTopicsConsumerDifferentNamespace1"; - std::string topicName2 = "persistent://public/default-2/testMultiTopicsConsumerDifferentNamespace2"; - std::string topicName3 = "persistent://public/default-3/testMultiTopicsConsumerDifferentNamespace3"; - - topicNames.push_back(topicName1); - topicNames.push_back(topicName2); - topicNames.push_back(topicName3); - - // key: message value integer, value: a pair of (topic, message id) - using MessageInfo = std::pair; - std::map messageIndexToInfo; - int index = 0; - // Produce some messages for each topic - for (const auto &topic : topicNames) { - Producer producer; - ProducerConfiguration producerConfig; - ASSERT_EQ(ResultOk, client.createProducer(topic, producerConfig, producer)); - - const auto message = MessageBuilder().setContent(std::to_string(index)).build(); - MessageId messageId; - ASSERT_EQ(ResultOk, producer.send(message, messageId)); - messageIndexToInfo[index] = std::make_pair(topic, messageId); - LOG_INFO("Send " << index << " to " << topic << ", " << messageId); - - ASSERT_EQ(ResultOk, producer.close()); - index++; - } - - ConsumerConfiguration consConfig; - consConfig.setSubscriptionInitialPosition(InitialPositionEarliest); - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicNames, subName, consConfig, consumer)); - - for (int i = 0; i < index; i++) { - Message message; - ASSERT_EQ(ResultOk, consumer.receive(message, 3000)); - ASSERT_EQ(ResultOk, consumer.acknowledge(message)); - const int index = std::stoi(message.getDataAsString()); - LOG_INFO("Receive " << index << " from " << message.getTopicName() << "," << message.getMessageId()); - ASSERT_EQ(messageIndexToInfo.count(index), 1); - ASSERT_EQ(messageIndexToInfo[index], std::make_pair(message.getTopicName(), message.getMessageId())); - } - - consumer.close(); - - client.shutdown(); -} - -// Test subscribe 3 topics using MultiTopicsConsumer -TEST(BasicEndToEndTest, testMultiTopicsConsumerPubSub) { - Client client(lookupUrl); - std::vector topicNames; - topicNames.reserve(3); - std::string subName = "testMultiTopicsConsumer"; - std::string topicName1 = "testMultiTopicsConsumer1"; - std::string topicName2 = "testMultiTopicsConsumer2"; - std::string topicName3 = "testMultiTopicsConsumer3"; - std::string topicName4 = "testMultiTopicsConsumer4"; - - topicNames.push_back(topicName1); - topicNames.push_back(topicName2); - topicNames.push_back(topicName3); - topicNames.push_back(topicName4); - - // call admin api to make topics partitioned - std::string url1 = adminUrl + "admin/v2/persistent/public/default/testMultiTopicsConsumer1/partitions"; - std::string url2 = adminUrl + "admin/v2/persistent/public/default/testMultiTopicsConsumer2/partitions"; - std::string url3 = adminUrl + "admin/v2/persistent/public/default/testMultiTopicsConsumer3/partitions"; - - int res = makePutRequest(url1, "2"); - ASSERT_FALSE(res != 204 && res != 409); - res = makePutRequest(url2, "3"); - ASSERT_FALSE(res != 204 && res != 409); - res = makePutRequest(url3, "4"); - ASSERT_FALSE(res != 204 && res != 409); - - Producer producer1; - Result result = client.createProducer(topicName1, producer1); - ASSERT_EQ(ResultOk, result); - Producer producer2; - result = client.createProducer(topicName2, producer2); - ASSERT_EQ(ResultOk, result); - Producer producer3; - result = client.createProducer(topicName3, producer3); - ASSERT_EQ(ResultOk, result); - - Producer producer4; - result = client.createProducer(topicName4, producer4); - ASSERT_EQ(ResultOk, result); - - LOG_INFO("created 4 producers"); - - int messageNumber = 100; - ConsumerConfiguration consConfig; - consConfig.setConsumerType(ConsumerShared); - consConfig.setReceiverQueueSize(10); // size for each sub-consumer - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicNames, subName, consConfig, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - LOG_INFO("created topics consumer on 4 topics"); - - std::string msgContent = "msg-content"; - LOG_INFO("Publishing 100 messages by producer 1 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer1.send(msg)); - } - - msgContent = "msg-content2"; - LOG_INFO("Publishing 100 messages by producer 2 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer2.send(msg)); - } - - msgContent = "msg-content3"; - LOG_INFO("Publishing 100 messages by producer 3 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer3.send(msg)); - } - - msgContent = "msg-content4"; - LOG_INFO("Publishing 100 messages by producer 4 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer4.send(msg)); - } - - LOG_INFO("Consuming and acking 400 messages by multiTopicsConsumer"); - for (int i = 0; i < 4 * messageNumber; i++) { - Message m; - ASSERT_EQ(ResultOk, consumer.receive(m, 10000)); - ASSERT_EQ(ResultOk, consumer.acknowledge(m)); - } - - LOG_INFO("Consumed and acked 400 messages by multiTopicsConsumer"); - - ASSERT_EQ(ResultOk, consumer.unsubscribe()); - - client.shutdown(); -} - -TEST(BasicEndToEndTest, testPatternTopicsConsumerInvalid) { - Client client(lookupUrl); - - // invalid namespace - std::string pattern = "invalidDomain://prop/unit/ns/patternMultiTopicsConsumerInvalid.*"; - std::string subName = "testPatternMultiTopicsConsumerInvalid"; - - Consumer consumer; - Promise consumerPromise; - client.subscribeWithRegexAsync(pattern, subName, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - Result result = consumerFuture.get(consumer); - ASSERT_EQ(ResultInvalidTopicName, result); - - client.shutdown(); -} - -// create 4 topics, in which 3 topics match the pattern, -// verify PatternMultiTopicsConsumer subscribed matched topics, -// and only receive messages from matched topics. -TEST(BasicEndToEndTest, testPatternMultiTopicsConsumerPubSub) { - Client client(lookupUrl); - std::string pattern = "persistent://public/default/patternMultiTopicsConsumer.*"; - - std::string subName = "testPatternMultiTopicsConsumer"; - std::string topicName1 = "persistent://public/default/patternMultiTopicsConsumerPubSub1"; - std::string topicName2 = "persistent://public/default/patternMultiTopicsConsumerPubSub2"; - std::string topicName3 = "persistent://public/default/patternMultiTopicsConsumerPubSub3"; - // This will not match pattern - std::string topicName4 = "persistent://public/default/patternMultiTopicsNotMatchPubSub4"; - - // call admin api to make topics partitioned - std::string url1 = - adminUrl + "admin/v2/persistent/public/default/patternMultiTopicsConsumerPubSub1/partitions"; - std::string url2 = - adminUrl + "admin/v2/persistent/public/default/patternMultiTopicsConsumerPubSub2/partitions"; - std::string url3 = - adminUrl + "admin/v2/persistent/public/default/patternMultiTopicsConsumerPubSub3/partitions"; - std::string url4 = - adminUrl + "admin/v2/persistent/public/default/patternMultiTopicsNotMatchPubSub4/partitions"; - - makeDeleteRequest(url1); - int res = makePutRequest(url1, "2"); - ASSERT_FALSE(res != 204 && res != 409); - makeDeleteRequest(url2); - res = makePutRequest(url2, "3"); - ASSERT_FALSE(res != 204 && res != 409); - makeDeleteRequest(url3); - res = makePutRequest(url3, "4"); - ASSERT_FALSE(res != 204 && res != 409); - makeDeleteRequest(url4); - res = makePutRequest(url4, "4"); - ASSERT_FALSE(res != 204 && res != 409); - - Producer producer1; - Result result = client.createProducer(topicName1, producer1); - ASSERT_EQ(ResultOk, result); - Producer producer2; - result = client.createProducer(topicName2, producer2); - ASSERT_EQ(ResultOk, result); - Producer producer3; - result = client.createProducer(topicName3, producer3); - ASSERT_EQ(ResultOk, result); - Producer producer4; - result = client.createProducer(topicName4, producer4); - ASSERT_EQ(ResultOk, result); - - LOG_INFO("created 3 producers that match, with partitions: 2, 3, 4, and 1 producer not match"); - - int messageNumber = 100; - ConsumerConfiguration consConfig; - consConfig.setConsumerType(ConsumerShared); - consConfig.setReceiverQueueSize(10); // size for each sub-consumer - Consumer consumer; - Promise consumerPromise; - client.subscribeWithRegexAsync(pattern, subName, consConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - LOG_INFO("created topics consumer on a pattern that match 3 topics"); - - std::string msgContent = "msg-content"; - LOG_INFO("Publishing 100 messages by producer 1 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer1.send(msg)); - } - - msgContent = "msg-content2"; - LOG_INFO("Publishing 100 messages by producer 2 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer2.send(msg)); - } - - msgContent = "msg-content3"; - LOG_INFO("Publishing 100 messages by producer 3 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer3.send(msg)); - } - - msgContent = "msg-content4"; - LOG_INFO("Publishing 100 messages by producer 4 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer4.send(msg)); - } - - LOG_INFO("Consuming and acking 300 messages by multiTopicsConsumer"); - for (int i = 0; i < 3 * messageNumber; i++) { - Message m; - ASSERT_EQ(ResultOk, consumer.receive(m, 1000)); - ASSERT_EQ(ResultOk, consumer.acknowledge(m)); - } - LOG_INFO("Consumed and acked 300 messages by multiTopicsConsumer"); - - // verify no more to receive, because producer4 not match pattern - Message m; - ASSERT_EQ(ResultTimeout, consumer.receive(m, 1000)); - - ASSERT_EQ(ResultOk, consumer.unsubscribe()); - - client.shutdown(); -} - -// User adminUrl to create client, to protect http related services -TEST(BasicEndToEndTest, testpatternMultiTopicsHttpConsumerPubSub) { - Client client(adminUrl); - std::string pattern = "persistent://public/default/patternMultiTopicsHttpConsumer.*"; - - std::string subName = "testpatternMultiTopicsHttpConsumer"; - std::string topicName1 = "persistent://public/default/patternMultiTopicsHttpConsumerPubSub1"; - std::string topicName2 = "persistent://public/default/patternMultiTopicsHttpConsumerPubSub2"; - std::string topicName3 = "persistent://public/default/patternMultiTopicsHttpConsumerPubSub3"; - - // call admin api to make topics partitioned - std::string url1 = - adminUrl + "admin/v2/persistent/public/default/patternMultiTopicsHttpConsumerPubSub1/partitions"; - std::string url2 = - adminUrl + "admin/v2/persistent/public/default/patternMultiTopicsHttpConsumerPubSub2/partitions"; - std::string url3 = - adminUrl + "admin/v2/persistent/public/default/patternMultiTopicsHttpConsumerPubSub3/partitions"; - - makeDeleteRequest(url1); - int res = makePutRequest(url1, "2"); - ASSERT_FALSE(res != 204 && res != 409); - makeDeleteRequest(url2); - res = makePutRequest(url2, "3"); - ASSERT_FALSE(res != 204 && res != 409); - makeDeleteRequest(url3); - res = makePutRequest(url3, "4"); - ASSERT_FALSE(res != 204 && res != 409); - - Producer producer1; - Result result = client.createProducer(topicName1, producer1); - ASSERT_EQ(ResultOk, result); - Producer producer2; - result = client.createProducer(topicName2, producer2); - ASSERT_EQ(ResultOk, result); - Producer producer3; - result = client.createProducer(topicName3, producer3); - ASSERT_EQ(ResultOk, result); - - LOG_INFO("created 3 producers that match, with partitions: 2, 3, 4"); - - int messageNumber = 100; - ConsumerConfiguration consConfig; - consConfig.setConsumerType(ConsumerShared); - consConfig.setReceiverQueueSize(10); // size for each sub-consumer - Consumer consumer; - Promise consumerPromise; - client.subscribeWithRegexAsync(pattern, subName, consConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - LOG_INFO("created topics consumer on a pattern that match 3 topics"); - - std::string msgContent = "msg-content"; - LOG_INFO("Publishing 100 messages by producer 1 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer1.send(msg)); - } - - msgContent = "msg-content2"; - LOG_INFO("Publishing 100 messages by producer 2 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer2.send(msg)); - } - - msgContent = "msg-content3"; - LOG_INFO("Publishing 100 messages by producer 3 synchronously"); - for (int msgNum = 0; msgNum < messageNumber; msgNum++) { - std::stringstream stream; - stream << msgContent << msgNum; - Message msg = MessageBuilder().setContent(stream.str()).build(); - ASSERT_EQ(ResultOk, producer3.send(msg)); - } - - LOG_INFO("Consuming and acking 300 messages by multiTopicsConsumer"); - for (int i = 0; i < 3 * messageNumber; i++) { - Message m; - ASSERT_EQ(ResultOk, consumer.receive(m, 1000)); - ASSERT_EQ(ResultOk, consumer.acknowledge(m)); - } - LOG_INFO("Consumed and acked 300 messages by multiTopicsConsumer"); - - // verify no more to receive - Message m; - ASSERT_EQ(ResultTimeout, consumer.receive(m, 1000)); - - ASSERT_EQ(ResultOk, consumer.unsubscribe()); - - client.shutdown(); -} - -TEST(BasicEndToEndTest, testPatternEmptyUnsubscribe) { - Client client(lookupUrl); - std::string pattern = "persistent://public/default/patternEmptyUnsubscribe.*"; - - std::string subName = "testPatternMultiTopicsConsumer"; - - ConsumerConfiguration consConfig; - Consumer consumer; - Result result = client.subscribeWithRegex(pattern, subName, consConfig, consumer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - LOG_INFO("created topics consumer on a pattern that match 0 topics"); - - result = consumer.unsubscribe(); - LOG_INFO("unsubscribed topics consumer : " << result); - ASSERT_EQ(ResultOk, result) << "expected " << ResultOk << " but found " << result; - - // TODO: flaky test - // client.shutdown(); -} - -// create a pattern consumer, which contains no match topics at beginning. -// create 4 topics, in which 3 topics match the pattern. -// verify PatternMultiTopicsConsumer subscribed matched topics, after a while, -// and only receive messages from matched topics. -TEST(BasicEndToEndTest, testPatternMultiTopicsConsumerAutoDiscovery) { - Client client(lookupUrl); - std::string pattern = "persistent://public/default/patternTopicsAutoConsumer.*"; - Result result; - std::string subName = "testPatternTopicsAutoConsumer"; - - // 1. create a pattern consumer, which contains no match topics at beginning. - ConsumerConfiguration consConfig; - consConfig.setConsumerType(ConsumerShared); - consConfig.setReceiverQueueSize(10); // size for each sub-consumer - consConfig.setPatternAutoDiscoveryPeriod(1); // set waiting time for auto discovery - Consumer consumer; - Promise consumerPromise; - client.subscribeWithRegexAsync(pattern, subName, consConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - LOG_INFO("created pattern consumer with not match topics at beginning"); - - auto createProducer = [&client](Producer &producer, const std::string &topic, int numPartitions) { - if (numPartitions > 0) { - const std::string url = adminUrl + "admin/v2/persistent/public/default/" + topic + "/partitions"; - makeDeleteRequest(url); - int res = makePutRequest(url, std::to_string(numPartitions)); - ASSERT_TRUE(res == 204 || res == 409); - } - - const std::string fullTopicName = "persistent://public/default/" + topic; - Result result = client.createProducer(fullTopicName, producer); - ASSERT_EQ(ResultOk, result); - }; - - // 2. create 4 topics, in which 3 match the pattern. - std::vector producers(4); - createProducer(producers[0], "patternTopicsAutoConsumerPubSub1", 2); - createProducer(producers[1], "patternTopicsAutoConsumerPubSub2", 3); - createProducer(producers[2], "patternTopicsAutoConsumerPubSub3", 4); - // This will not match pattern - createProducer(producers[3], "notMatchPatternTopicsAutoConsumerPubSub4", 4); - - constexpr int messageNumber = 100; - - std::thread consumeThread([&consumer] { - LOG_INFO("Consuming and acking 300 messages by pattern topics consumer"); - for (int i = 0; i < 3 * messageNumber; i++) { - Message m; - // Ensure new topics can be discovered when the consumer is blocked by receive(Message&, int) - ASSERT_EQ(ResultOk, consumer.receive(m, 30000)); - ASSERT_EQ(ResultOk, consumer.acknowledge(m)); - } - // 5. pattern consumer already subscribed 3 topics - LOG_INFO("Consumed and acked 300 messages by pattern topics consumer"); - - // verify no more to receive, because producers[3] not match pattern - Message m; - ASSERT_EQ(ResultTimeout, consumer.receive(m, 1000)); - }); - - // 3. wait enough time to trigger auto discovery - std::this_thread::sleep_for(std::chrono::seconds(2)); - - // 4. produce data. - for (size_t i = 0; i < producers.size(); i++) { - const std::string msgContent = "msg-content" + std::to_string(i); - LOG_INFO("Publishing " << messageNumber << " messages by producer " << i << " synchronously"); - for (int j = 0; j < messageNumber; j++) { - Message msg = MessageBuilder().setContent(msgContent).build(); - ASSERT_EQ(ResultOk, producers[i].send(msg)); - } - } - - consumeThread.join(); - - consumeThread = std::thread([&consumer] { - LOG_INFO("Consuming and acking 100 messages by pattern topics consumer"); - for (int i = 0; i < messageNumber; i++) { - Message m; - // Ensure new topics can be discovered when the consumer is blocked by receive(Message&) - ASSERT_EQ(ResultOk, consumer.receive(m)); - ASSERT_EQ(ResultOk, consumer.acknowledge(m)); - } - // 9. pattern consumer subscribed a new topic - LOG_INFO("Consumed and acked 100 messages by pattern topics consumer"); - - // verify no more to receive - Message m; - ASSERT_EQ(ResultTimeout, consumer.receive(m, 1000)); - }); - // 6. Create a producer to a new topic - createProducer(producers[0], "patternTopicsAutoConsumerPubSub5", 4); - - // 7. wait enough time to trigger auto discovery - std::this_thread::sleep_for(std::chrono::seconds(2)); - - // 8. produce data - for (int i = 0; i < messageNumber; i++) { - Message msg = MessageBuilder().setContent("msg-content-5").build(); - ASSERT_EQ(ResultOk, producers[0].send(msg)); - } - - consumeThread.join(); - ASSERT_EQ(ResultOk, consumer.unsubscribe()); - client.shutdown(); -} - -TEST(BasicEndToEndTest, testSyncFlushBatchMessages) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "test-flush-batch-messages-" + std::to_string(time(NULL)); - std::string subName = "subscription-name"; - Producer producer; - - int numOfMessages = 10; - - ProducerConfiguration conf; - - conf.setBatchingEnabled(true); - // set batch message number numOfMessages, and max delay 60s - conf.setBatchingMaxMessages(numOfMessages); - conf.setBatchingMaxPublishDelayMs(60000); - - conf.setBlockIfQueueFull(true); - conf.setProperty("producer-name", "test-producer-name"); - conf.setProperty("producer-id", "test-producer-id"); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setProperty("consumer-name", "test-consumer-name"); - consumerConfig.setProperty("consumer-id", "test-consumer-id"); - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consumerConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // Send Asynchronously of half the messages - std::string prefix = "msg-batch-async"; - int msgCount = 0; - for (int i = 0; i < numOfMessages / 2; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync( - msg, std::bind(&sendCallBack, std::placeholders::_1, std::placeholders::_2, prefix, &msgCount)); - LOG_DEBUG("async sending message " << messageContent); - } - LOG_INFO("sending first half messages in async, should timeout to receive"); - - // message not reached max batch number, should not receive any data. - Message receivedMsg; - ASSERT_EQ(ResultTimeout, consumer.receive(receivedMsg, 1000)); - - // Send Asynchronously of the other half the messages - for (int i = numOfMessages / 2; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync( - msg, std::bind(&sendCallBack, std::placeholders::_1, std::placeholders::_2, prefix, &msgCount)); - LOG_DEBUG("async sending message " << messageContent); - } - LOG_INFO("sending the other half messages in async, should able to receive"); - // message not reached max batch number, should received the messages - ASSERT_EQ(ResultOk, consumer.receive(receivedMsg, 2000)); - - LOG_INFO("Receive all messages"); - // receive all the messages. - int i = 1; - while (consumer.receive(receivedMsg, 1000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_INFO("Received Message with [ content - " - << receivedMsg.getDataAsString() << "] [ messageID = " << receivedMsg.getMessageId() << "]" - << "property = " << receivedMsg.getProperty("msgIndex")); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - - LOG_INFO("Last sync send round"); - // Send sync of half the messages, this will triggerFlush, and could get the messages. - prefix = "msg-batch-sync"; - for (int i = 0; i < numOfMessages / 2; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.send(msg); - LOG_INFO("sync sending message " << messageContent); - } - // message not reached max batch number, should received the messages, and not timeout - ASSERT_EQ(ResultOk, consumer.receive(receivedMsg, 1000)); - - producer.close(); - client.shutdown(); -} - -// for partitioned reason, it may hard to verify message id. -static void simpleCallback(Result code, const MessageId &msgId) { - LOG_INFO("Received code: " << code << " -- MsgID: " << msgId); -} - -void testSyncFlushBatchMessagesPartitionedTopic(bool lazyStartPartitionedProducers) { - Client client(lookupUrl); - std::string uniqueChunk = unique_str(); - std::string topicName = "persistent://public/default/partition-testSyncFlushBatchMessages" + uniqueChunk; - // call admin api to make it partitioned - std::string url = adminUrl + "admin/v2/persistent/public/default/partition-testSyncFlushBatchMessages" + - uniqueChunk + "/partitions"; - int res = makePutRequest(url, "5"); - const int numberOfPartitions = 5; - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - Producer producer; - int numOfMessages = 20; - // lazy partitioned producers make a single call to the message router during createProducer - int initPart = lazyStartPartitionedProducers ? 1 : 0; - ProducerConfiguration tempProducerConfiguration; - tempProducerConfiguration.setMessageRouter(std::make_shared()); - ProducerConfiguration producerConfiguration = tempProducerConfiguration; - producerConfiguration.setBatchingEnabled(true); - // set batch message number numOfMessages, and max delay 60s - producerConfiguration.setBatchingMaxMessages(numOfMessages / numberOfPartitions); - producerConfiguration.setBatchingMaxPublishDelayMs(60000); - producerConfiguration.setLazyStartPartitionedProducers(lazyStartPartitionedProducers); - - Result result = client.createProducer(topicName, producerConfiguration, producer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(producer.getTopic(), topicName); - - // Topic is partitioned into 5 partitions so each partition will receive two messages - LOG_INFO("Creating Subscriber"); - std::string consumerId = "CONSUMER"; - ConsumerConfiguration consConfig; - consConfig.setConsumerType(ConsumerExclusive); - consConfig.setReceiverQueueSize(2); - ASSERT_FALSE(consConfig.hasMessageListener()); - std::vector consumer(numberOfPartitions); - Result subscribeResult; - for (int i = 0; i < numberOfPartitions; i++) { - std::stringstream partitionedTopicName; - partitionedTopicName << topicName << "-partition-" << i; - - std::stringstream partitionedConsumerId; - partitionedConsumerId << consumerId << i; - client.subscribe(partitionedTopicName.str(), partitionedConsumerId.str(), consConfig, consumer[i]); - consumer[i].unsubscribe(); - subscribeResult = client.subscribe(partitionedTopicName.str(), partitionedConsumerId.str(), - consConfig, consumer[i]); - - ASSERT_EQ(ResultOk, subscribeResult); - ASSERT_EQ(consumer[i].getTopic(), partitionedTopicName.str()); - } - - // Send asynchronously of first part the messages - std::string prefix = "msg-batch-async"; - for (int i = 0; i < numOfMessages / numberOfPartitions / 2; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, simpleCallback); - LOG_DEBUG("async sending message " << messageContent); - } - LOG_INFO("sending first part messages in async, should timeout to receive"); - - Message m; - ASSERT_EQ(ResultTimeout, consumer[initPart].receive(m, 5000)); - - for (int i = numOfMessages / numberOfPartitions / 2; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, simpleCallback); - LOG_DEBUG("async sending message " << messageContent); - } - LOG_INFO("sending second part messages in async, should be able to receive"); - - for (int i = 0; i < numOfMessages / numberOfPartitions; i++) { - for (int partitionIndex = 0; partitionIndex < numberOfPartitions; partitionIndex++) { - ASSERT_EQ(ResultOk, consumer[partitionIndex].receive(m)); - ASSERT_EQ(ResultOk, consumer[partitionIndex].acknowledge(m)); - } - } - - // Sync send of first part of the messages, this will triggerFlush, and could get the messages. - prefix = "msg-batch-sync"; - for (int i = 0; i < numOfMessages / numberOfPartitions / 2; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - LOG_DEBUG("sync sending message " << messageContent); - } - - LOG_INFO("sending first part messages in sync, should not timeout to receive"); - ASSERT_EQ(ResultOk, consumer[initPart].receive(m, 10000)); - - producer.close(); - client.shutdown(); -} - -TEST(BasicEndToEndTest, testSyncFlushBatchMessagesPartitionedTopic) { - testSyncFlushBatchMessagesPartitionedTopic(false); -} - -TEST(BasicEndToEndTest, testSyncFlushBatchMessagesPartitionedTopicLazyProducers) { - testSyncFlushBatchMessagesPartitionedTopic(true); -} - -TEST(BasicEndToEndTest, testGetTopicPartitions) { - Client client(lookupUrl); - std::string topicName = "persistent://public/default/testGetPartitions"; - - // call admin api to make it partitioned - std::string url = adminUrl + "admin/v2/persistent/public/default/testGetPartitions/partitions"; - int res = makePutRequest(url, "3"); - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - std::vector partitionsList; - Result result = client.getPartitionsForTopic(topicName, partitionsList); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(3, partitionsList.size()); - ASSERT_EQ(topicName + "-partition-0", partitionsList[0]); - ASSERT_EQ(topicName + "-partition-1", partitionsList[1]); - ASSERT_EQ(topicName + "-partition-2", partitionsList[2]); - - std::vector partitionsList2; - result = client.getPartitionsForTopic("persistent://public/default/testGetPartitions-non-partitioned", - partitionsList2); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(1, partitionsList2.size()); - ASSERT_EQ(partitionsList2[0], "persistent://public/default/testGetPartitions-non-partitioned"); - - client.shutdown(); -} - -TEST(BasicEndToEndTest, testFlushInProducer) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "test-flush-in-producer"; - std::string subName = "subscription-name"; - Producer producer; - int numOfMessages = 10; - - ProducerConfiguration conf; - conf.setBatchingEnabled(true); - // set batch message number numOfMessages, and max delay 60s - conf.setBatchingMaxMessages(numOfMessages); - conf.setBatchingMaxPublishDelayMs(60000); - - conf.setBlockIfQueueFull(true); - conf.setProperty("producer-name", "test-producer-name"); - conf.setProperty("producer-id", "test-producer-id"); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setProperty("consumer-name", "test-consumer-name"); - consumerConfig.setProperty("consumer-id", "test-consumer-id"); - Promise consumerPromise; - client.subscribe(topicName, subName, consumerConfig, consumer); - consumer.unsubscribe(); - client.subscribeAsync(topicName, subName, consumerConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // Send Asynchronously of half the messages - std::string prefix = "msg-batch-async"; - int msgCount = 0; - for (int i = 0; i < numOfMessages / 2; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync( - msg, std::bind(&sendCallBack, std::placeholders::_1, std::placeholders::_2, prefix, &msgCount)); - LOG_DEBUG("async sending message " << messageContent); - } - LOG_INFO("sending half of messages in async, should timeout to receive"); - - // message not reached max batch number, should not receive any data. - Message receivedMsg; - ASSERT_EQ(ResultTimeout, consumer.receive(receivedMsg, 2000)); - - // After flush, it should get the message - producer.flush(); - ASSERT_EQ(ResultOk, consumer.receive(receivedMsg, 2000)); - - // receive all the messages. - while (consumer.receive(receivedMsg, 2000) == ResultOk) { - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - - // Send Asynchronously of another round of the messages - for (int i = numOfMessages / 2; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync( - msg, std::bind(&sendCallBack, std::placeholders::_1, std::placeholders::_2, prefix, &msgCount)); - LOG_DEBUG("async sending message " << messageContent); - } - LOG_INFO( - "sending the other half messages in async, should still timeout, since first half already flushed"); - ASSERT_EQ(ResultTimeout, consumer.receive(receivedMsg, 2000)); - - // After flush async, it should get the message - Promise promise; - producer.flushAsync(WaitForCallback(promise)); - Promise promise1; - producer.flushAsync(WaitForCallback(promise1)); - promise.getFuture().get(result); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(ResultOk, consumer.receive(receivedMsg, 2000)); - - producer.close(); - client.shutdown(); -} - -void testFlushInPartitionedProducer(bool lazyStartPartitionedProducers) { - Client client(lookupUrl); - std::string uniqueChunk = unique_str(); - std::string topicName = - "persistent://public/default/partition-testFlushInPartitionedProducer" + uniqueChunk; - // call admin api to make it partitioned - std::string url = adminUrl + - "admin/v2/persistent/public/default/partition-testFlushInPartitionedProducer" + - uniqueChunk + "/partitions"; - int res = makePutRequest(url, "5"); - const int numberOfPartitions = 5; - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - Producer producer; - int numOfMessages = 10; - ProducerConfiguration tempProducerConfiguration; - tempProducerConfiguration.setPartitionsRoutingMode(ProducerConfiguration::RoundRobinDistribution); - ProducerConfiguration producerConfiguration = tempProducerConfiguration; - producerConfiguration.setBatchingEnabled(true); - // set batch message number numOfMessages, and max delay 60s - producerConfiguration.setBatchingMaxMessages(numOfMessages / numberOfPartitions); - producerConfiguration.setBatchingMaxPublishDelayMs(60000); - producerConfiguration.setMessageRouter(std::make_shared()); - producerConfiguration.setLazyStartPartitionedProducers(lazyStartPartitionedProducers); - - Result result = client.createProducer(topicName, producerConfiguration, producer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(producer.getTopic(), topicName); - - LOG_INFO("Creating Subscriber"); - std::string consumerId = "CONSUMER"; - ConsumerConfiguration consConfig; - consConfig.setConsumerType(ConsumerExclusive); - consConfig.setReceiverQueueSize(2); - ASSERT_FALSE(consConfig.hasMessageListener()); - std::vector consumer(numberOfPartitions); - Result subscribeResult; - for (int i = 0; i < numberOfPartitions; i++) { - std::stringstream partitionedTopicName; - partitionedTopicName << topicName << "-partition-" << i; - - std::stringstream partitionedConsumerId; - partitionedConsumerId << consumerId << i; - subscribeResult = client.subscribe(partitionedTopicName.str(), partitionedConsumerId.str(), - consConfig, consumer[i]); - consumer[i].unsubscribe(); - subscribeResult = client.subscribe(partitionedTopicName.str(), partitionedConsumerId.str(), - consConfig, consumer[i]); - ASSERT_EQ(ResultOk, subscribeResult); - ASSERT_EQ(consumer[i].getTopic(), partitionedTopicName.str()); - } - - // Send asynchronously of first part the messages - std::string prefix = "msg-batch-async"; - for (int i = 0; i < numOfMessages / 2; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, simpleCallback); - LOG_DEBUG("async sending message " << messageContent); - } - - LOG_INFO("sending first part messages in async, should timeout to receive"); - Message m; - ASSERT_EQ(ResultTimeout, consumer[0].receive(m, 2000)); - - // After flush, should be able to consume. - producer.flush(); - LOG_INFO("After flush, should be able to receive"); - ASSERT_EQ(ResultOk, consumer[0].receive(m, 2000)); - - LOG_INFO("Receive all messages."); - // receive all the messages. - for (int partitionIndex = 0; partitionIndex < numberOfPartitions; partitionIndex++) { - while (consumer[partitionIndex].receive(m, 2000) == ResultOk) { - // ASSERT_EQ(ResultOk, consumer[partitionIndex].acknowledge(m)); - ASSERT_EQ(ResultOk, consumer[partitionIndex].acknowledge(m)); - } - } - - // send message again. - for (int i = numOfMessages / 2; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, simpleCallback); - LOG_DEBUG("async sending message " << messageContent); - } - - // After flush async, it should get the message - Promise promise; - producer.flushAsync(WaitForCallback(promise)); - Promise promise1; - producer.flushAsync(WaitForCallback(promise1)); - promise.getFuture().get(result); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(ResultOk, consumer[0].receive(m, 2000)); - - producer.close(); - client.shutdown(); -} - -TEST(BasicEndToEndTest, testFlushInPartitionedProducer) { testFlushInPartitionedProducer(false); } - -TEST(BasicEndToEndTest, testFlushInLazyPartitionedProducer) { testFlushInPartitionedProducer(true); } - -TEST(BasicEndToEndTest, testReceiveAsync) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "persistent://public/default/receiveAsync"; - std::string subName = "my-sub-name"; - Producer producer; - - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - std::string content = "msg-1-content"; - int count = 0; - int totalMsgs = 5; - bool isFailed = false; - for (int i = 0; i < totalMsgs; i++) { - consumer.receiveAsync(std::bind(&receiveCallBack, std::placeholders::_1, std::placeholders::_2, - content, true, &isFailed, &count)); - } - // Send synchronously - for (int i = 0; i < totalMsgs; i++) { - Message msg = MessageBuilder().setContent(content).build(); - result = producer.send(msg); - ASSERT_EQ(ResultOk, result); - } - - // check strategically - for (int i = 0; i < 3; i++) { - if (count == totalMsgs) { - break; - } - std::this_thread::sleep_for(std::chrono::microseconds(1 * 1000 * 1000)); - } - ASSERT_FALSE(isFailed); - ASSERT_EQ(count, totalMsgs); - client.shutdown(); -} - -TEST(BasicEndToEndTest, testPartitionedReceiveAsync) { - Client client(lookupUrl); - std::string topicName = "persistent://public/default/receiveAsync-partition"; - - // call admin api to make it partitioned - std::string url = adminUrl + "admin/v2/persistent/public/default/receiveAsync-partition/partitions"; - int res = makePutRequest(url, "3"); - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - result = client.subscribe(topicName, "subscription-A", consumer); - ASSERT_EQ(ResultOk, result); - - int totalMsgs = 10; - std::string content; - int count = 0; - bool isFailed = false; - for (int i = 0; i < totalMsgs; i++) { - consumer.receiveAsync(std::bind(&receiveCallBack, std::placeholders::_1, std::placeholders::_2, - content, false, &isFailed, &count)); - } - - for (int i = 0; i < totalMsgs; i++) { - boost::posix_time::ptime t(boost::posix_time::microsec_clock::universal_time()); - long nanoSeconds = t.time_of_day().total_nanoseconds(); - std::stringstream ss; - ss << nanoSeconds; - Message msg = MessageBuilder().setContent(ss.str()).setPartitionKey(ss.str()).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - LOG_DEBUG("Message Timestamp is " << msg.getPublishTimestamp()); - LOG_DEBUG("Message is " << msg); - } - - // check strategically - for (int i = 0; i < 3; i++) { - if (count == totalMsgs) { - break; - } - std::this_thread::sleep_for(std::chrono::microseconds(1 * 1000 * 1000)); - } - ASSERT_FALSE(isFailed); - ASSERT_EQ(count, totalMsgs); - client.shutdown(); -} - -TEST(BasicEndToEndTest, testBatchMessagesReceiveAsync) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "persistent://public/default/receiveAsync-batch"; - std::string subName = "subscription-name"; - Producer producer; - - // Enable batching on producer side - int batchSize = 2; - int numOfMessages = 100; - - ProducerConfiguration conf; - conf.setCompressionType(CompressionLZ4); - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingEnabled(true); - conf.setBlockIfQueueFull(true); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.unsubscribe(); - client.subscribe(topicName, subName, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - std::string content; - int count = 0; - bool isFailed = false; - for (int i = 0; i < numOfMessages; i++) { - consumer.receiveAsync(std::bind(&receiveCallBack, std::placeholders::_1, std::placeholders::_2, - content, false, &isFailed, &count)); - } - - // Send Asynchronously - std::string prefix = "msg-batch-"; - int msgCount = 0; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync( - msg, std::bind(&sendCallBack, std::placeholders::_1, std::placeholders::_2, prefix, &msgCount)); - LOG_DEBUG("sending message " << messageContent); - } - - // check strategically - for (int i = 0; i < 3; i++) { - if (count == numOfMessages) { - break; - } - std::this_thread::sleep_for(std::chrono::microseconds(1 * 1000 * 1000)); - } - ASSERT_FALSE(isFailed); - ASSERT_EQ(count, numOfMessages); -} - -TEST(BasicEndToEndTest, testReceiveAsyncFailedConsumer) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "persistent://public/default/receiveAsync-failed"; - std::string subName = "my-sub-name"; - - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - Result result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - bool isFailedOnConsumerClosing = false; - std::string content; - int closingCunt = 0; - // callback should immediately fail - consumer.receiveAsync(std::bind(&receiveCallBack, std::placeholders::_1, std::placeholders::_2, content, - false, &isFailedOnConsumerClosing, &closingCunt)); - - // close consumer - consumer.close(); - bool isFailedOnConsumerClosed = false; - int count = 0; - // callback should immediately fail - consumer.receiveAsync(std::bind(&receiveCallBack, std::placeholders::_1, std::placeholders::_2, content, - false, &isFailedOnConsumerClosed, &count)); - - // check strategically - for (int i = 0; i < 3; i++) { - if (isFailedOnConsumerClosing && isFailedOnConsumerClosed) { - break; - } - std::this_thread::sleep_for(std::chrono::microseconds(1 * 1000 * 1000)); - } - - ASSERT_TRUE(isFailedOnConsumerClosing); - ASSERT_TRUE(isFailedOnConsumerClosed); - ASSERT_EQ(count, 0); - - client.shutdown(); -} - -TEST(BasicEndToEndTest, testPartitionedReceiveAsyncFailedConsumer) { - Client client(lookupUrl); - std::string topicName = "persistent://public/default/receiveAsync-fail-partition"; - - // call admin api to make it partitioned - std::string url = adminUrl + "admin/v2/persistent/public/default/receiveAsync-fail-partition/partitions"; - int res = makePutRequest(url, "3"); - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - Consumer consumer; - Result result = client.subscribe(topicName, "subscription-A", consumer); - ASSERT_EQ(ResultOk, result); - - bool isFailedOnConsumerClosing = false; - std::string content; - int closingCunt = 0; - // callback should immediately fail - consumer.receiveAsync(std::bind(&receiveCallBack, std::placeholders::_1, std::placeholders::_2, content, - false, &isFailedOnConsumerClosing, &closingCunt)); - // close consumer - consumer.close(); - - int count = 0; - bool isFailedOnConsumerClosed = false; - consumer.receiveAsync(std::bind(&receiveCallBack, std::placeholders::_1, std::placeholders::_2, content, - false, &isFailedOnConsumerClosed, &count)); - - // check strategically - for (int i = 0; i < 3; i++) { - if (isFailedOnConsumerClosing && isFailedOnConsumerClosed) { - break; - } - std::this_thread::sleep_for(std::chrono::microseconds(1 * 1000 * 1000)); - } - - ASSERT_TRUE(isFailedOnConsumerClosing); - ASSERT_TRUE(isFailedOnConsumerClosed); - ASSERT_EQ(count, 0); - client.shutdown(); -} - -static void expectTimeoutOnRecv(Consumer &consumer) { - Message msg; - Result res = consumer.receive(msg, 100); - if (res != ResultTimeout) { - LOG_ERROR("Received a msg when not expecting to id(" << msg.getMessageId() << ") " - << msg.getDataAsString()); - } - ASSERT_EQ(ResultTimeout, res); -} - -void testNegativeAcks(const std::string &topic, bool batchingEnabled) { - Client client(lookupUrl); - Consumer consumer; - ConsumerConfiguration conf; - conf.setNegativeAckRedeliveryDelayMs(100); - Result result = client.subscribe(topic, "test", conf, consumer); - ASSERT_EQ(ResultOk, result); - - Producer producer; - ProducerConfiguration producerConf; - producerConf.setBatchingEnabled(batchingEnabled); - result = client.createProducer(topic, producerConf, producer); - ASSERT_EQ(ResultOk, result); - - for (int i = 0; i < 10; i++) { - Message msg = MessageBuilder().setContent("test-" + std::to_string(i)).build(); - producer.sendAsync(msg, nullptr); - } - - producer.flush(); - - std::vector toNeg; - for (int i = 0; i < 10; i++) { - Message msg; - consumer.receive(msg); - - LOG_INFO("Received message " << msg.getDataAsString()); - ASSERT_EQ(msg.getDataAsString(), "test-" + std::to_string(i)); - toNeg.push_back(msg.getMessageId()); - } - // No more messages expected - expectTimeoutOnRecv(consumer); - - PulsarFriend::setNegativeAckEnabled(consumer, false); - // negatively acknowledge all at once - for (auto &&msgId : toNeg) { - consumer.negativeAcknowledge(msgId); - } - PulsarFriend::setNegativeAckEnabled(consumer, true); - - for (int i = 0; i < 10; i++) { - Message msg; - consumer.receive(msg); - LOG_INFO("-- Redelivery -- Received message " << msg.getDataAsString()); - - ASSERT_EQ(msg.getDataAsString(), "test-" + std::to_string(i)); - - consumer.acknowledge(msg); - } - - // No more messages expected - expectTimeoutOnRecv(consumer); - - client.shutdown(); -} - -TEST(BasicEndToEndTest, testNegativeAcks) { - testNegativeAcks("testNegativeAcks-" + std::to_string(time(nullptr)), false); -} - -TEST(BasicEndToEndTest, testNegativeAcksWithBatching) { - testNegativeAcks("testNegativeAcksWithBatching-" + std::to_string(time(nullptr)), true); -} - -TEST(BasicEndToEndTest, testNegativeAcksWithPartitions) { - std::string topicName = "testNegativeAcksWithPartitions-" + std::to_string(time(nullptr)); - - // call admin api to make it partitioned - std::string url = adminUrl + "admin/v2/persistent/public/default/" + topicName + "/partitions"; - int res = makePutRequest(url, "3"); - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - testNegativeAcks(topicName, true); -} - -static long regexTestMessagesReceived = 0; - -static void regexMessageListenerFunction(Consumer consumer, const Message &msg) { - regexTestMessagesReceived++; -} - -TEST(BasicEndToEndTest, testRegexTopicsWithMessageListener) { - ClientConfiguration config; - Client client(lookupUrl); - long unAckedMessagesTimeoutMs = 10000; - std::string subsName = "testRegexTopicsWithMessageListener-sub"; - std::string pattern = "persistent://public/default/testRegexTopicsWithMessageListenerTopic-.*"; - ConsumerConfiguration consumerConf; - consumerConf.setConsumerType(ConsumerShared); - consumerConf.setMessageListener( - std::bind(regexMessageListenerFunction, std::placeholders::_1, std::placeholders::_2)); - consumerConf.setUnAckedMessagesTimeoutMs(unAckedMessagesTimeoutMs); - - Producer producer; - ProducerConfiguration producerConf; - Result result = client.createProducer( - "persistent://public/default/testRegexTopicsWithMessageListenerTopic-1", producerConf, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - result = client.subscribeWithRegex(pattern, subsName, consumerConf, consumer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(consumer.getSubscriptionName(), subsName); - - for (int i = 0; i < 10; i++) { - Message msg = MessageBuilder().setContent("test-" + std::to_string(i)).build(); - producer.sendAsync(msg, nullptr); - } - - producer.flush(); - long timeWaited = 0; - while (true) { - // maximum wait time - ASSERT_LE(timeWaited, unAckedMessagesTimeoutMs * 3); - if (regexTestMessagesReceived >= 10 * 2) { - break; - } - std::this_thread::sleep_for(std::chrono::milliseconds(500)); - timeWaited += 500; - } -} - -TEST(BasicEndToEndTest, testRegexTopicsWithInitialPosition) { - ClientConfiguration config; - Client client(lookupUrl); - - std::string topicName = - "persistent://public/default/test-regex-initial-position-" + std::to_string(time(NULL)); - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - for (int i = 0; i < 10; i++) { - producer.send(MessageBuilder().setContent("test-" + std::to_string(i)).build()); - } - - std::string subsName = "testRegexTopicsWithMessageListener-sub"; - std::string pattern = topicName + ".*"; - - // Subscription gets created after messages are produced but it will start from the beginning of the topic - ConsumerConfiguration consumerConf; - consumerConf.setSubscriptionInitialPosition(InitialPositionEarliest); - - Consumer consumer; - result = client.subscribeWithRegex(pattern, subsName, consumerConf, consumer); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(consumer.getSubscriptionName(), subsName); - - for (int i = 0; i < 10; i++) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg)); - } - - client.close(); -} - -TEST(BasicEndToEndTest, testPartitionedTopicWithOnePartition) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "testPartitionedTopicWithOnePartition" + unique_str(); - std::string subsName = topicName + "-sub-"; - - // call admin api to make 1 partition - std::string url = adminUrl + "admin/v2/persistent/public/default/" + topicName + "/partitions"; - int putRes = makePutRequest(url, "1"); - LOG_INFO("res = " << putRes); - ASSERT_FALSE(putRes != 204 && putRes != 409); - - Consumer consumer1; - ConsumerConfiguration conf; - Result result = client.subscribe(topicName, subsName + "1", consumer1); - ASSERT_EQ(ResultOk, result); - - Consumer consumer2; - result = client.subscribe(topicName + "-partition-0", subsName + "2", consumer2); - ASSERT_EQ(ResultOk, result); - - LOG_INFO("created 2 consumer"); - - Producer producer1; - ProducerConfiguration producerConf; - producerConf.setBatchingEnabled(false); - result = client.createProducer(topicName, producerConf, producer1); - ASSERT_EQ(ResultOk, result); - - Producer producer2; - result = client.createProducer(topicName + "-partition-0", producerConf, producer2); - ASSERT_EQ(ResultOk, result); - - LOG_INFO("created 2 producer"); - - // create messages - int numMessages = 10; - for (int i = 0; i < numMessages; i++) { - Message msg = MessageBuilder().setContent("test-producer1-" + topicName + std::to_string(i)).build(); - producer1.send(msg); - msg = MessageBuilder().setContent("test-producer2-" + topicName + std::to_string(i)).build(); - producer2.send(msg); - } - - // produced 10 messages by each producer. - // expected receive 20 messages by each consumer. - for (int i = 0; i < numMessages * 2; i++) { - LOG_INFO("begin to receive message " << i); - - Message msg; - Result res = consumer1.receive(msg, 3000); - ASSERT_EQ(ResultOk, res); - consumer1.acknowledge(msg); - - res = consumer2.receive(msg, 3000); - ASSERT_EQ(ResultOk, res); - consumer2.acknowledge(msg); - } - - // No more messages expected - Message msg; - Result res = consumer1.receive(msg, 100); - ASSERT_EQ(ResultTimeout, res); - - res = consumer2.receive(msg, 100); - ASSERT_EQ(ResultTimeout, res); - client.shutdown(); -} - -TEST(BasicEndToEndTest, testDelayedMessages) { - std::string topicName = "testDelayedMessages-" + std::to_string(TimeUtils::currentTimeMillis()); - Client client(lookupUrl); - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consumerConf; - consumerConf.setConsumerType(ConsumerShared); - result = client.subscribe(topicName, "my-sub-name", consumerConf, consumer); - ASSERT_EQ(ResultOk, result); - - Message msg1 = - MessageBuilder().setContent("msg-1").setDeliverAfter(std::chrono::milliseconds(5000)).build(); - ASSERT_EQ(ResultOk, producer.send(msg1)); - - // 2nd message without delay - Message msg2 = MessageBuilder().setContent("msg-2").build(); - ASSERT_EQ(ResultOk, producer.send(msg2)); - - Message msgReceived; - result = consumer.receive(msgReceived); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ("msg-2", msgReceived.getDataAsString()); - - auto result1 = client.close(); - std::cout << "closed with " << result1 << std::endl; - ASSERT_EQ(ResultOk, result1); -} - -TEST(BasicEndToEndTest, testCumulativeAcknowledgeNotAllowed) { - ClientConfiguration config; - Client client(lookupUrl); - std::string topicName = "testCumulativeAcknowledgeNotAllowed"; - std::string subsName = topicName + "-sub-"; - - Consumer consumer1; - ConsumerConfiguration consumerConfiguration1; - consumerConfiguration1.setConsumerType(ConsumerShared); - - Result result = client.subscribe(topicName, subsName + "1", consumerConfiguration1, consumer1); - ASSERT_EQ(ResultOk, result); - - Consumer consumer2; - ConsumerConfiguration consumerConfiguration2; - consumerConfiguration2.setConsumerType(ConsumerKeyShared); - - result = client.subscribe(topicName, subsName + "2", consumerConfiguration2, consumer2); - ASSERT_EQ(ResultOk, result); - - Producer producer; - result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - // publish messages - int numMessages = 10; - for (int i = 0; i < numMessages; i++) { - Message msg = MessageBuilder().setContent("test-producer-" + topicName + std::to_string(i)).build(); - producer.send(msg); - } - - // test cannot use acknowledgeCumulative on Shared subscription - for (int i = 0; i < numMessages; i++) { - Message msg; - Result res = consumer1.receive(msg, 3000); - ASSERT_EQ(ResultOk, res); - if (i == 9) { - res = consumer1.acknowledgeCumulative(msg); - ASSERT_EQ(ResultCumulativeAcknowledgementNotAllowedError, res); - } - } - - // test cannot use acknowledgeCumulative on Key_Shared subscription - for (int i = 0; i < numMessages; i++) { - Message msg; - Result res = consumer2.receive(msg, 3000); - ASSERT_EQ(ResultOk, res); - if (i == 9) { - res = consumer2.acknowledgeCumulative(msg); - ASSERT_EQ(ResultCumulativeAcknowledgementNotAllowedError, res); - } - } - client.shutdown(); -} - -TEST(BasicEndToEndTest, testSendCallback) { - const std::string topicName = "persistent://public/default/BasicEndToEndTest-testSendCallback"; - - Client client(lookupUrl); - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicName, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, "SubscriptionName", consumer)); - - Latch latch(100); - std::set sentIdSet; - for (int i = 0; i < 100; i++) { - const auto msg = MessageBuilder().setContent("a").build(); - producer.sendAsync(msg, [&sentIdSet, &latch](Result result, const MessageId &id) { - ASSERT_EQ(ResultOk, result); - sentIdSet.emplace(id); - latch.countdown(); - }); - } - - std::set receivedIdSet; - for (int i = 0; i < 100; i++) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg)); - receivedIdSet.emplace(msg.getMessageId()); - consumer.acknowledge(msg); - } - - latch.wait(); - ASSERT_EQ(sentIdSet, receivedIdSet); - - consumer.close(); - producer.close(); - - const std::string partitionedTopicName = topicName + "-" + std::to_string(time(nullptr)); - const std::string url = adminUrl + "admin/v2/persistent/" + - partitionedTopicName.substr(partitionedTopicName.find("://") + 3) + "/partitions"; - const int numPartitions = 3; - - int res = makePutRequest(url, std::to_string(numPartitions)); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - std::this_thread::sleep_for(std::chrono::seconds(2)); - - ProducerConfiguration producerConfig; - producerConfig.setBatchingEnabled(false); - producerConfig.setPartitionsRoutingMode(ProducerConfiguration::RoundRobinDistribution); - ASSERT_EQ(ResultOk, client.createProducer(partitionedTopicName, producerConfig, producer)); - ASSERT_EQ(ResultOk, client.subscribe(partitionedTopicName, "SubscriptionName", consumer)); - - sentIdSet.clear(); - receivedIdSet.clear(); - - const int numMessages = numPartitions * 2; - latch = Latch(numMessages); - for (int i = 0; i < numMessages; i++) { - const auto msg = MessageBuilder().setContent("a").build(); - producer.sendAsync(msg, [&sentIdSet, &latch](Result result, const MessageId &id) { - ASSERT_EQ(ResultOk, result); - sentIdSet.emplace(id); - latch.countdown(); - }); - } - - for (int i = 0; i < numMessages; i++) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg)); - receivedIdSet.emplace(msg.getMessageId()); - consumer.acknowledge(msg); - } - - latch.wait(); - ASSERT_EQ(sentIdSet, receivedIdSet); - - std::set partitionIndexSet; - for (const auto &id : sentIdSet) { - partitionIndexSet.emplace(id.partition()); - } - std::set expectedPartitionIndexSet; - for (int i = 0; i < numPartitions; i++) { - expectedPartitionIndexSet.emplace(i); - } - ASSERT_EQ(sentIdSet, receivedIdSet); - - consumer.close(); - producer.close(); - client.close(); -} - -class AckGroupingTrackerMock : public AckGroupingTracker { - public: - explicit AckGroupingTrackerMock(bool mockAck) : mockAck_(mockAck) {} - - bool callDoImmediateAck(ClientConnectionWeakPtr connWeakPtr, uint64_t consumerId, const MessageId &msgId, - proto::CommandAck_AckType ackType) { - if (!this->mockAck_) { - // Not mocking ACK, expose this method. - return this->doImmediateAck(connWeakPtr, consumerId, msgId, ackType); - } else { - // Mocking ACK. - return true; - } - } - - bool callDoImmediateAck(ClientConnectionWeakPtr connWeakPtr, uint64_t consumerId, - const std::set &msgIds) { - if (!this->mockAck_) { - // Not mocking ACK, expose this method. - return this->doImmediateAck(connWeakPtr, consumerId, msgIds); - } else { - // Mocking ACK. - return true; - } - } - - private: - bool mockAck_; -}; // class AckGroupingTrackerMock - -TEST(BasicEndToEndTest, testAckGroupingTrackerDefaultBehavior) { - ConsumerConfiguration configConsumer; - ASSERT_EQ(configConsumer.getAckGroupingTimeMs(), 100); - ASSERT_EQ(configConsumer.getAckGroupingMaxSize(), 1000); - - AckGroupingTracker tracker; - Message msg; - ASSERT_FALSE(tracker.isDuplicate(msg.getMessageId())); -} - -TEST(BasicEndToEndTest, testAckGroupingTrackerSingleAckBehavior) { - constexpr auto numMsg = 10; - const std::string topicName = "testAckGroupingTrackerSingleAckBehavior" + std::to_string(time(nullptr)); - const std::string subName = "sub-ack-grp-single-ack-behavior"; - - // Setup client, producer and consumer. - Client client(lookupUrl); - - Producer producer; - ProducerConfiguration configProducer; - configProducer.setBatchingEnabled(false); - ASSERT_EQ(ResultOk, client.createProducer(topicName, configProducer, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - - auto &consumerImpl = PulsarFriend::getConsumerImpl(consumer); - auto connWeakPtr = PulsarFriend::getClientConnection(consumerImpl); - - // Sending and receiving messages. - for (auto count = 0; count < numMsg; ++count) { - Message msg = MessageBuilder().setContent(std::string("MSG-") + std::to_string(count)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - std::vector recvMsgId; - for (auto count = 0; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - recvMsgId.emplace_back(msg.getMessageId()); - } - - // Send ACK. - AckGroupingTrackerMock tracker(false); - tracker.start(); - for (auto msgIdx = 0; msgIdx < numMsg; ++msgIdx) { - auto connPtr = connWeakPtr.lock(); - ASSERT_NE(connPtr, nullptr); - ASSERT_TRUE(tracker.callDoImmediateAck(connWeakPtr, consumerImpl.getConsumerId(), recvMsgId[msgIdx], - proto::CommandAck::Individual)); - } - Message msg; - ASSERT_EQ(ResultTimeout, consumer.receive(msg, 1000)); - consumer.close(); - - std::this_thread::sleep_for(std::chrono::seconds(1)); - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - ASSERT_EQ(ResultTimeout, consumer.receive(msg, 1000)); - consumer.close(); -} - -TEST(BasicEndToEndTest, testAckGroupingTrackerMultiAckBehavior) { - constexpr auto numMsg = 10; - const std::string topicName = "testAckGroupingTrackerMultiAckBehavior" + std::to_string(time(nullptr)); - const std::string subName = "sub-ack-grp-multi-ack-behavior"; - - // Setup client, producer and consumer. - Client client(lookupUrl); - - Producer producer; - ProducerConfiguration configProducer; - configProducer.setBatchingEnabled(false); - ASSERT_EQ(ResultOk, client.createProducer(topicName, configProducer, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - - auto &consumerImpl = PulsarFriend::getConsumerImpl(consumer); - auto connWeakPtr = PulsarFriend::getClientConnection(consumerImpl); - - // Sending and receiving messages. - for (auto count = 0; count < numMsg; ++count) { - Message msg = MessageBuilder().setContent(std::string("MSG-") + std::to_string(count)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - std::vector recvMsgId; - for (auto count = 0; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - recvMsgId.emplace_back(msg.getMessageId()); - } - - // Send ACK. - AckGroupingTrackerMock tracker(false); - tracker.start(); - std::set restMsgId(recvMsgId.begin(), recvMsgId.end()); - ASSERT_EQ(restMsgId.size(), numMsg); - ASSERT_TRUE(tracker.callDoImmediateAck(connWeakPtr, consumerImpl.getConsumerId(), restMsgId)); - consumer.close(); - - std::this_thread::sleep_for(std::chrono::seconds(1)); - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - Message msg; - auto ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message: " << msg.getDataAsString(); - consumer.close(); -} - -TEST(BasicEndToEndTest, testAckGroupingTrackerDisabledIndividualAck) { - constexpr auto numMsg = 10; - const std::string topicName = - "testAckGroupingTrackerDisabledIndividualAck" + std::to_string(time(nullptr)); - const std::string subName = "sub-ack-grp-disabled-ind-ack"; - - // Setup client, producer and consumer. - Client client(lookupUrl); - - Producer producer; - ProducerConfiguration configProducer; - configProducer.setBatchingEnabled(false); - ASSERT_EQ(ResultOk, client.createProducer(topicName, configProducer, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - auto &consumerImpl = PulsarFriend::getConsumerImpl(consumer); - - // Sending and receiving messages. - for (auto count = 0; count < numMsg; ++count) { - Message msg = MessageBuilder().setContent(std::string("MSG-") + std::to_string(count)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - std::vector recvMsgId; - for (auto count = 0; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - recvMsgId.emplace_back(msg.getMessageId()); - } - - // Send ACK. - AckGroupingTrackerDisabled tracker(consumerImpl, consumerImpl.getConsumerId()); - for (auto &msgId : recvMsgId) { - tracker.addAcknowledge(msgId); - } - consumer.close(); - - std::this_thread::sleep_for(std::chrono::seconds(1)); - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - Message msg; - auto ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message: " << msg.getDataAsString(); - consumer.close(); -} - -TEST(BasicEndToEndTest, testAckGroupingTrackerDisabledCumulativeAck) { - constexpr auto numMsg = 10; - const std::string topicName = - "testAckGroupingTrackerDisabledCumulativeAck" + std::to_string(time(nullptr)); - const std::string subName = "sub-ack-grp-disabled-cum-ack"; - - // Setup client, producer and consumer. - Client client(lookupUrl); - - Producer producer; - ProducerConfiguration configProducer; - configProducer.setBatchingEnabled(false); - ASSERT_EQ(ResultOk, client.createProducer(topicName, configProducer, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - auto &consumerImpl = PulsarFriend::getConsumerImpl(consumer); - - // Sending and receiving messages. - for (auto count = 0; count < numMsg; ++count) { - Message msg = MessageBuilder().setContent(std::string("MSG-") + std::to_string(count)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - std::vector recvMsgId; - for (auto count = 0; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - recvMsgId.emplace_back(msg.getMessageId()); - } - - // Send ACK. - AckGroupingTrackerDisabled tracker(consumerImpl, consumerImpl.getConsumerId()); - auto &latestMsgId = *std::max_element(recvMsgId.begin(), recvMsgId.end()); - tracker.addAcknowledgeCumulative(latestMsgId); - consumer.close(); - - std::this_thread::sleep_for(std::chrono::seconds(1)); - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - Message msg; - auto ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message: " << msg.getDataAsString(); - consumer.close(); -} - -class AckGroupingTrackerEnabledMock : public AckGroupingTrackerEnabled { - public: - AckGroupingTrackerEnabledMock(ClientImplPtr clientPtr, const HandlerBasePtr &handlerPtr, - uint64_t consumerId, long ackGroupingTimeMs, long ackGroupingMaxSize) - : AckGroupingTrackerEnabled(clientPtr, handlerPtr, consumerId, ackGroupingTimeMs, - ackGroupingMaxSize) {} - const std::set &getPendingIndividualAcks() { return this->pendingIndividualAcks_; } - const long getAckGroupingTimeMs() { return this->ackGroupingTimeMs_; } - const long getAckGroupingMaxSize() { return this->ackGroupingMaxSize_; } - const MessageId getNextCumulativeAckMsgId() { return this->nextCumulativeAckMsgId_; } - const bool requireCumulativeAck() { return this->requireCumulativeAck_; } -}; // class AckGroupingTrackerEnabledMock - -TEST(BasicEndToEndTest, testAckGroupingTrackerEnabledIndividualAck) { - constexpr auto numMsg = 10; - constexpr auto ackGroupingTimeMs = 1000; - constexpr auto ackGroupingMaxSize = 5000; - const std::string topicName = - "testAckGroupingTrackerEnabledIndividualAck" + std::to_string(time(nullptr)); - const std::string subName = "sub-ack-grp-enabled-ind-ack"; - - // Setup client, producer and consumer. - Client client(lookupUrl); - auto clientImplPtr = PulsarFriend::getClientImplPtr(client); - - Producer producer; - ProducerConfiguration configProducer; - configProducer.setBatchingEnabled(false); - ASSERT_EQ(ResultOk, client.createProducer(topicName, configProducer, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - auto consumerImpl = PulsarFriend::getConsumerImplPtr(consumer); - - // Sending and receiving messages. - for (auto count = 0; count < numMsg; ++count) { - Message msg = MessageBuilder().setContent(std::string("MSG-") + std::to_string(count)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - std::vector recvMsgId; - for (auto count = 0; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - recvMsgId.emplace_back(msg.getMessageId()); - } - - auto tracker = std::make_shared( - clientImplPtr, consumerImpl, consumerImpl->getConsumerId(), ackGroupingTimeMs, ackGroupingMaxSize); - tracker->start(); - ASSERT_EQ(tracker->getPendingIndividualAcks().size(), 0); - ASSERT_EQ(tracker->getAckGroupingTimeMs(), ackGroupingTimeMs); - ASSERT_EQ(tracker->getAckGroupingMaxSize(), ackGroupingMaxSize); - for (auto &msgId : recvMsgId) { - ASSERT_FALSE(tracker->isDuplicate(msgId)); - tracker->addAcknowledge(msgId); - ASSERT_TRUE(tracker->isDuplicate(msgId)); - } - ASSERT_EQ(tracker->getPendingIndividualAcks().size(), recvMsgId.size()); - - std::this_thread::sleep_for(std::chrono::seconds(2)); - ASSERT_EQ(tracker->getPendingIndividualAcks().size(), 0); - for (auto &msgId : recvMsgId) { - ASSERT_FALSE(tracker->isDuplicate(msgId)); - } - consumer.close(); - - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - Message msg; - auto ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message: " << msg.getDataAsString(); -} - -TEST(BasicEndToEndTest, testAckGroupingTrackerEnabledCumulativeAck) { - constexpr auto numMsg = 10; - constexpr auto ackGroupingTimeMs = 1000; - constexpr auto ackGroupingMaxSize = 5000; - const std::string topicName = - "testAckGroupingTrackerEnabledCumulativeAck" + std::to_string(time(nullptr)); - const std::string subName = "sub-ack-grp-enabled-cum-ack"; - - // Setup client, producer and consumer. - Client client(lookupUrl); - auto clientImplPtr = PulsarFriend::getClientImplPtr(client); - - Producer producer; - ProducerConfiguration configProducer; - configProducer.setBatchingEnabled(false); - ASSERT_EQ(ResultOk, client.createProducer(topicName, configProducer, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - auto consumerImpl0 = PulsarFriend::getConsumerImplPtr(consumer); - - // Sending and receiving messages. - for (auto count = 0; count < numMsg; ++count) { - Message msg = MessageBuilder().setContent(std::string("MSG-") + std::to_string(count)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - std::vector recvMsgId; - for (auto count = 0; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - recvMsgId.emplace_back(msg.getMessageId()); - } - std::sort(recvMsgId.begin(), recvMsgId.end()); - - auto tracker0 = std::make_shared( - clientImplPtr, consumerImpl0, consumerImpl0->getConsumerId(), ackGroupingTimeMs, ackGroupingMaxSize); - tracker0->start(); - ASSERT_EQ(tracker0->getNextCumulativeAckMsgId(), MessageId::earliest()); - ASSERT_FALSE(tracker0->requireCumulativeAck()); - - auto targetMsgId = recvMsgId[numMsg / 2]; - for (auto idx = 0; idx <= numMsg / 2; ++idx) { - ASSERT_FALSE(tracker0->isDuplicate(recvMsgId[idx])); - } - tracker0->addAcknowledgeCumulative(targetMsgId); - for (auto idx = 0; idx <= numMsg / 2; ++idx) { - ASSERT_TRUE(tracker0->isDuplicate(recvMsgId[idx])); - } - ASSERT_EQ(tracker0->getNextCumulativeAckMsgId(), targetMsgId); - ASSERT_TRUE(tracker0->requireCumulativeAck()); - - std::this_thread::sleep_for(std::chrono::seconds(2)); - ASSERT_FALSE(tracker0->requireCumulativeAck()); - for (auto idx = 0; idx <= numMsg / 2; ++idx) { - ASSERT_TRUE(tracker0->isDuplicate(recvMsgId[idx])); - } - consumer.close(); - - std::this_thread::sleep_for(std::chrono::seconds(1)); - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - auto consumerImpl1 = PulsarFriend::getConsumerImplPtr(consumer); - std::set restMsgId(recvMsgId.begin() + numMsg / 2 + 1, recvMsgId.end()); - for (auto count = numMsg / 2 + 1; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - ASSERT_EQ(restMsgId.count(msg.getMessageId()), 1); - } - Message msg; - auto ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message: " << msg.getDataAsString(); - auto tracker1 = std::make_shared( - clientImplPtr, consumerImpl1, consumerImpl1->getConsumerId(), ackGroupingTimeMs, ackGroupingMaxSize); - tracker1->start(); - tracker1->addAcknowledgeCumulative(recvMsgId[numMsg - 1]); - tracker1->close(); - consumer.close(); - - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message ID: " << msg.getMessageId(); -} - -class UnAckedMessageTrackerEnabledMock : public UnAckedMessageTrackerEnabled { - public: - UnAckedMessageTrackerEnabledMock(long timeoutMs, const ClientImplPtr client, ConsumerImplBase &consumer) - : UnAckedMessageTrackerEnabled(timeoutMs, timeoutMs, client, consumer) {} - const long getUnAckedMessagesTimeoutMs() { return this->timeoutMs_; } - const long getTickDurationInMs() { return this->tickDurationInMs_; } - bool isEmpty() { return UnAckedMessageTrackerEnabled::isEmpty(); } - long size() { return UnAckedMessageTrackerEnabled::size(); } -}; // class UnAckedMessageTrackerEnabledMock - -TEST(BasicEndToEndTest, testUnAckedMessageTrackerDefaultBehavior) { - ConsumerConfiguration configConsumer; - ASSERT_EQ(configConsumer.getUnAckedMessagesTimeoutMs(), 0); - ASSERT_EQ(configConsumer.getTickDurationInMs(), 1000); - - UnAckedMessageTrackerDisabled tracker; - Message msg; - ASSERT_FALSE(tracker.add(msg.getMessageId())); - ASSERT_FALSE(tracker.remove(msg.getMessageId())); -} - -TEST(BasicEndToEndTest, testUnAckedMessageTrackerDisabled) { - constexpr auto numMsg = 10; - const std::string topicName = - "testUnAckedMessageTrackerDisabledIndividualAck" + std::to_string(time(nullptr)); - const std::string subName = "sub-un-acked-msg-disabled-ind-ack"; - - // Setup client, producer and consumer. - Client client(lookupUrl); - - Producer producer; - ProducerConfiguration configProducer; - configProducer.setBatchingEnabled(false); - ASSERT_EQ(ResultOk, client.createProducer(topicName, configProducer, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - - // Sending and receiving messages. - for (auto count = 0; count < numMsg; ++count) { - Message msg = MessageBuilder().setContent(std::string("MSG-") + std::to_string(count)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - UnAckedMessageTrackerDisabled tracker; - for (auto count = 0; count < numMsg; ++count) { - Message msg; - - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - ASSERT_FALSE(tracker.add(msg.getMessageId())); - - consumer.acknowledge(msg.getMessageId()); - ASSERT_FALSE(tracker.remove(msg.getMessageId())); - } - consumer.close(); - - std::this_thread::sleep_for(std::chrono::seconds(1)); - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - Message msg; - auto ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message: " << msg.getDataAsString(); - consumer.close(); - client.close(); -} - -TEST(BasicEndToEndTest, testUnAckedMessageTrackerEnabledIndividualAck) { - constexpr auto numMsg = 10; - constexpr auto unAckedMessagesTimeoutMs = 1000; - const std::string topicName = - "testUnAckedMessageTrackerEnabledIndividualAck" + std::to_string(time(nullptr)); - const std::string subName = "sub-un-acked-msg-enabled-ind-ack"; - - // Setup client, producer and consumer. - Client client(lookupUrl); - auto clientImplPtr = PulsarFriend::getClientImplPtr(client); - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicName, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - auto &consumerImpl0 = PulsarFriend::getConsumerImpl(consumer); - - // Sending and receiving messages. - for (auto count = 0; count < numMsg; ++count) { - Message msg = MessageBuilder().setContent(std::string("MSG-") + std::to_string(count)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - std::vector recvMsgId; - for (auto count = 0; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - recvMsgId.emplace_back(msg.getMessageId()); - } - - auto tracker0 = std::make_shared(unAckedMessagesTimeoutMs, - clientImplPtr, consumerImpl0); - ASSERT_EQ(tracker0->getUnAckedMessagesTimeoutMs(), unAckedMessagesTimeoutMs); - ASSERT_EQ(tracker0->getTickDurationInMs(), unAckedMessagesTimeoutMs); - - for (auto idx = 0; idx < numMsg; ++idx) { - ASSERT_TRUE(tracker0->add(recvMsgId[idx])); - } - ASSERT_EQ(numMsg, tracker0->size()); - ASSERT_FALSE(tracker0->isEmpty()); - - std::this_thread::sleep_for(std::chrono::seconds(4)); - ASSERT_EQ(0, tracker0->size()); - ASSERT_TRUE(tracker0->isEmpty()); - consumer.close(); - - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - auto &consumerImpl1 = PulsarFriend::getConsumerImpl(consumer); - std::set restMsgId(recvMsgId.begin(), recvMsgId.end()); - for (auto count = 0; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - ASSERT_EQ(restMsgId.count(msg.getMessageId()), 1); - ASSERT_EQ(ResultOk, consumer.acknowledge(msg)); - } - - auto tracker1 = std::make_shared(unAckedMessagesTimeoutMs, - clientImplPtr, consumerImpl1); - for (auto idx = 0; idx < numMsg; ++idx) { - ASSERT_TRUE(tracker1->add(recvMsgId[idx])); - ASSERT_TRUE(tracker1->remove(recvMsgId[idx])); - } - ASSERT_EQ(0, tracker1->size()); - ASSERT_TRUE(tracker1->isEmpty()); - consumer.close(); - - std::this_thread::sleep_for(std::chrono::seconds(2)); - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - Message msg; - auto ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message ID: " << msg.getMessageId(); - consumer.close(); - client.close(); -} - -TEST(BasicEndToEndTest, testUnAckedMessageTrackerEnabledCumulativeAck) { - constexpr auto numMsg = 10; - constexpr auto unAckedMessagesTimeoutMs = 1000; - const std::string topicName = - "testUnAckedMessageTrackerEnabledCumulativeAck" + std::to_string(time(nullptr)); - const std::string subName = "sub-un-acked-msg-enabled-cum-ack"; - - // Setup client, producer and consumer. - Client client(lookupUrl); - auto clientImplPtr = PulsarFriend::getClientImplPtr(client); - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicName, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - auto &consumerImpl0 = PulsarFriend::getConsumerImpl(consumer); - - // Sending and receiving messages. - for (auto count = 0; count < numMsg; ++count) { - Message msg = MessageBuilder().setContent(std::string("MSG-") + std::to_string(count)).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - std::vector recvMsgId; - for (auto count = 0; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - recvMsgId.emplace_back(msg.getMessageId()); - } - auto tracker = std::make_shared(unAckedMessagesTimeoutMs, clientImplPtr, - consumerImpl0); - for (auto idx = 0; idx < numMsg; ++idx) { - ASSERT_TRUE(tracker->add(recvMsgId[idx])); - } - ASSERT_EQ(numMsg, tracker->size()); - ASSERT_FALSE(tracker->isEmpty()); - - std::sort(recvMsgId.begin(), recvMsgId.end()); - - auto targetMsgId = recvMsgId[numMsg / 2]; - ASSERT_EQ(ResultOk, consumer.acknowledgeCumulative(targetMsgId)); - tracker->removeMessagesTill(targetMsgId); - ASSERT_EQ(numMsg - (numMsg / 2 + 1), tracker->size()); - ASSERT_FALSE(tracker->isEmpty()); - - std::this_thread::sleep_for(std::chrono::seconds(4)); - ASSERT_EQ(0, tracker->size()); - ASSERT_TRUE(tracker->isEmpty()); - consumer.close(); - - std::this_thread::sleep_for(std::chrono::seconds(1)); - ASSERT_EQ(ResultOk, client.subscribe(topicName, subName, consumer)); - for (auto count = numMsg / 2 + 1; count < numMsg; ++count) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - ASSERT_EQ(ResultOk, consumer.acknowledge(msg.getMessageId())); - } - Message msg; - auto ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message ID: " << msg.getMessageId(); - consumer.close(); - client.close(); -} diff --git a/pulsar-client-cpp/tests/BatchMessageTest.cc b/pulsar-client-cpp/tests/BatchMessageTest.cc deleted file mode 100644 index 62fd5fff25c2d..0000000000000 --- a/pulsar-client-cpp/tests/BatchMessageTest.cc +++ /dev/null @@ -1,1151 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ConsumerTest.h" -#include "CustomRoutingPolicy.h" -#include "HttpHelper.h" -#include "PulsarFriend.h" - -DECLARE_LOG_OBJECT(); - -using namespace pulsar; - -static std::string lookupUrl = "pulsar://localhost:6650"; -static std::string adminUrl = "http://localhost:8080/"; - -// ecpoch time in seconds -const long epochTime = time(NULL); - -class MessageCountSendCallback { - public: - MessageCountSendCallback(std::atomic_int& numOfMessagesProduced) - : numOfMessagesProduced_(numOfMessagesProduced) {} - - void operator()(Result result, const MessageId&) { - ASSERT_EQ(result, ResultOk); - numOfMessagesProduced_++; - } - - private: - std::atomic_int& numOfMessagesProduced_; -}; - -static void sendFailCallBack(Result r, Result expect_result) { EXPECT_EQ(r, expect_result); } - -static int globalPublishCountSuccess = 0; -static int globalPublishCountQueueFull = 0; - -static void sendCallBackExpectingErrors(Result r, const MessageId& msgId) { - if (r == ResultProducerQueueIsFull) { - globalPublishCountQueueFull++; - } else if (r == ResultOk) { - globalPublishCountSuccess++; - } -} - -TEST(BatchMessageTest, testProducerConfig) { - ProducerConfiguration conf; - try { - conf.setBatchingMaxMessages(1); - FAIL(); - } catch (const std::exception&) { - // Ok - } - ASSERT_EQ(ProducerConfiguration::DefaultBatching, conf.getBatchingType()); - conf.setBatchingType(ProducerConfiguration::KeyBasedBatching); - ASSERT_EQ(ProducerConfiguration::KeyBasedBatching, conf.getBatchingType()); -} - -TEST(BatchMessageTest, testProducerTimeout) { - std::string testName = std::to_string(epochTime) + "testProducerTimeout"; - - ClientConfiguration clientConf; - clientConf.setStatsIntervalInSeconds(1); - - Client client(lookupUrl, clientConf); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - Producer producer; - - // Enable batching on producer side - int batchSize = 3; - int numOfMessages = 4; - int timeout = 4000; - ProducerConfiguration conf; - conf.setCompressionType(CompressionLZ4); - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingMaxPublishDelayMs(timeout); - conf.setBatchingEnabled(true); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.close(); - client.subscribe(topicName, subName, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - ProducerStatsImplPtr producerStatsImplPtr = PulsarFriend::getProducerStatsPtr(producer); - // Send Asynchronously - std::string prefix = "msg-batch-test-produce-timeout-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder() - .setContent(messageContent) - .setProperty("type", "batch") - .setProperty("msgIndex", std::to_string(i)) - .build(); - LOG_DEBUG("sending message " << messageContent); - clock_t start, end; - /* Start the timer */ - start = time(NULL); - LOG_DEBUG("start = " << start); - Promise promise; - producer.sendAsync(msg, WaitForCallbackValue(promise)); - MessageId mi; - promise.getFuture().get(mi); - /* End the timer */ - end = time(NULL); - LOG_DEBUG("end = " << end); - // Greater than or equal to since there may be delay in sending messaging - ASSERT_GE((double)(end - start), timeout / 1000.0); - ASSERT_EQ(producerStatsImplPtr->getTotalMsgsSent(), i + 1); - ASSERT_EQ(PulsarFriend::sum(producerStatsImplPtr->getTotalSendMap()), i + 1); - } - - Message receivedMsg; - int i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(receivedMsg.getProperty("type"), "batch"); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); -} - -TEST(BatchMessageTest, testBatchSizeInBytes) { - std::string testName = std::to_string(epochTime) + "testBatchSizeInBytes"; - - Client client(lookupUrl); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - Producer producer; - - // Enable batching on producer side - int batchSize = 1000; - int numOfMessages = 30; - ProducerConfiguration conf; - conf.setCompressionType(CompressionLZ4); - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingMaxAllowedSizeInBytes(20); - conf.setBatchingEnabled(true); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.close(); - client.subscribe(topicName, subName, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - ProducerStatsImplPtr producerStatsImplPtr = PulsarFriend::getProducerStatsPtr(producer); - // Send Asynchronously - std::atomic_int numOfMessagesProduced{0}; - std::string prefix = "12345678"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, MessageCountSendCallback(numOfMessagesProduced)); - ASSERT_EQ(producerStatsImplPtr->getNumMsgsSent(), i + 1); - ASSERT_LT(PulsarFriend::sum(producerStatsImplPtr->getSendMap()), i + 1); - ASSERT_EQ(producerStatsImplPtr->getTotalMsgsSent(), i + 1); - ASSERT_LT(PulsarFriend::sum(producerStatsImplPtr->getTotalSendMap()), i + 1); - LOG_DEBUG("sending message " << messageContent); - } - - Message receivedMsg; - int i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_LT(pulsar::PulsarFriend::getBatchIndex(receivedMsg.getMessageId()), 2); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - - // Check stats - ASSERT_EQ(PulsarFriend::sum(producerStatsImplPtr->getSendMap()), numOfMessages); - ASSERT_EQ(PulsarFriend::sum(producerStatsImplPtr->getTotalSendMap()), numOfMessages); - - // Number of messages produced - ASSERT_EQ(numOfMessagesProduced.load(), numOfMessages); - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); -} - -TEST(BatchMessageTest, testSmallReceiverQueueSize) { - std::string testName = std::to_string(epochTime) + "testSmallReceiverQueueSize"; - - ClientConfiguration clientConf; - clientConf.setStatsIntervalInSeconds(20); - - Client client(lookupUrl, clientConf); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - Producer producer; - - // Enable batching on producer side - int batchSize = 10; - int numOfMessages = 1000; - ProducerConfiguration conf; - conf.setCompressionType(CompressionLZ4); - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingMaxPublishDelayMs(1); - conf.setBatchingEnabled(true); - conf.setMaxPendingMessages(numOfMessages + 1); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setReceiverQueueSize(41); - - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consumerConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - ProducerStatsImplPtr producerStatsImplPtr = PulsarFriend::getProducerStatsPtr(producer); - // Send Asynchronously - std::atomic_int numOfMessagesProduced{0}; - std::string prefix = testName; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, MessageCountSendCallback(numOfMessagesProduced)); - ASSERT_EQ(producerStatsImplPtr->getTotalMsgsSent(), i + 1); - ASSERT_LE(PulsarFriend::sum(producerStatsImplPtr->getTotalSendMap()), i + 1); - LOG_DEBUG("sending message " << messageContent); - } - - Message receivedMsg; - int i = 0; - for (i = 0; i < numOfMessages; i++) { - consumer.receive(receivedMsg); - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - - ConsumerStatsImplPtr consumerStatsImplPtr = PulsarFriend::getConsumerStatsPtr(consumer); - unsigned long t = consumerStatsImplPtr->getAckedMsgMap().at( - std::make_pair(ResultOk, proto::CommandAck_AckType_Individual)); - ASSERT_EQ(t, numOfMessages); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getAckedMsgMap()), numOfMessages); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getTotalAckedMsgMap()), numOfMessages); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getReceivedMsgMap()), numOfMessages); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getTotalReceivedMsgMap()), numOfMessages); - ASSERT_EQ(consumerStatsImplPtr->getTotalNumBytesRecieved(), consumerStatsImplPtr->getNumBytesRecieved()); - std::this_thread::sleep_for(std::chrono::seconds(20)); - ASSERT_NE(consumerStatsImplPtr->getTotalNumBytesRecieved(), consumerStatsImplPtr->getNumBytesRecieved()); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getTotalAckedMsgMap()), numOfMessages); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getTotalReceivedMsgMap()), numOfMessages); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getAckedMsgMap()), 0); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getReceivedMsgMap()), 0); - - // Number of messages produced - ASSERT_EQ(numOfMessagesProduced.load(), numOfMessages); - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); -} - -TEST(BatchMessageTest, testIndividualAck) { - std::string testName = std::to_string(epochTime) + "testIndividualAck"; - - ClientConfiguration clientConfig; - clientConfig.setStatsIntervalInSeconds(1); - - Client client(lookupUrl, clientConfig); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - Producer producer; - - // Enable batching on producer side - int batchSize = 5; - int numOfMessages = 10; - ProducerConfiguration conf; - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingEnabled(true); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setReceiverQueueSize(1); - - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consumerConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - // Send Asynchronously - std::atomic_int numOfMessagesProduced{0}; - std::string prefix = testName; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, MessageCountSendCallback(numOfMessagesProduced)); - LOG_DEBUG("sending message " << messageContent); - } - Message receivedMsg; - int i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - // Ack every 2nd message - if (i % 2 == 0) { - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - } - // Number of messages produced - ASSERT_EQ(numOfMessagesProduced.load(), numOfMessages); - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); - - // Unsubscribe and resubscribe - // Expecting all messages to be sent again - - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - // Ack every first 5 and 10th message - if (i <= 5 || i == 10) { - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - } - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); - - // Unsubscribe and resubscribe - // Expecting only one batch message to be resent - - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i + numOfMessages / 2); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++ + numOfMessages / 2)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - // Ack first 4 message only - if (i <= 4) { - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - } - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages / 2); - - // Unsubscribe and resubscribe - // Expecting only one batch message to be resent - - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i + numOfMessages / 2); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++ + numOfMessages / 2)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - // Ack all - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages / 2); - - // Unsubscribe and resubscribe - // Expecting no batch message to be resent - - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - // Number of messages consumed - ASSERT_NE(ResultOk, consumer.receive(receivedMsg, 5000)); - - consumer.close(); - client.close(); -} - -TEST(BatchMessageTest, testCumulativeAck) { - std::string testName = std::to_string(epochTime) + "testCumulativeAck"; - - ClientConfiguration clientConfig; - clientConfig.setStatsIntervalInSeconds(100); - - Client client(lookupUrl, clientConfig); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - Producer producer; - - // Enable batching on producer side - int batchSize = 5; - int numOfMessages = 15; - ProducerConfiguration conf; - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingEnabled(true); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setReceiverQueueSize(1); - - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consumerConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - ProducerStatsImplPtr producerStatsImplPtr = PulsarFriend::getProducerStatsPtr(producer); - - // Send Asynchronously - std::atomic_int numOfMessagesProduced{0}; - std::string prefix = testName; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, MessageCountSendCallback(numOfMessagesProduced)); - LOG_DEBUG("sending message " << messageContent); - } - - Message receivedMsg; - int i = 0; - ConsumerStatsImplPtr consumerStatsImplPtr = PulsarFriend::getConsumerStatsPtr(consumer); - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - // Cumm. Ack 7th message - if (i == 7) { - ASSERT_EQ(ResultOk, consumer.acknowledgeCumulative(receivedMsg)); - } - } - - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getReceivedMsgMap()), i + 1); - // Since last receive call times out - ASSERT_EQ(consumerStatsImplPtr->getReceivedMsgMap().at(ResultOk), i); - ASSERT_EQ(consumerStatsImplPtr->getReceivedMsgMap().at(ResultTimeout), 1); - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getAckedMsgMap()), 1); - ASSERT_EQ(producerStatsImplPtr->getNumBytesSent(), consumerStatsImplPtr->getNumBytesRecieved()); - unsigned long t = consumerStatsImplPtr->getAckedMsgMap().at( - std::make_pair(ResultOk, proto::CommandAck_AckType_Cumulative)); - ASSERT_EQ(t, 1); - - // Number of messages produced - ASSERT_EQ(numOfMessagesProduced.load(), numOfMessages); - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); - - // Unsubscribe and resubscribe - // Expecting 10 messages to be sent again - - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - consumerStatsImplPtr = PulsarFriend::getConsumerStatsPtr(consumer); - i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i + 5); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++ + 5)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - // Ack 10th message - if (i == 10) { - ASSERT_EQ(ResultOk, consumer.acknowledgeCumulative(receivedMsg)); - } - } - - ASSERT_EQ(PulsarFriend::sum(consumerStatsImplPtr->getAckedMsgMap()), 1); - t = consumerStatsImplPtr->getAckedMsgMap().at( - std::make_pair(ResultOk, proto::CommandAck_AckType_Cumulative)); - ASSERT_EQ(t, 1); - - // Number of messages consumed - ASSERT_EQ(i, 10); - - // Unsubscribe and resubscribe - // Expecting no batch message to be resent - - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - // Number of messages consumed - ASSERT_NE(ResultOk, consumer.receive(receivedMsg, 5000)); -} - -TEST(BatchMessageTest, testMixedAck) { - std::string testName = std::to_string(epochTime) + "testMixedAck"; - - Client client(lookupUrl); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - Producer producer; - - // Enable batching on producer side - int batchSize = 5; - int numOfMessages = 15; - ProducerConfiguration conf; - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingEnabled(true); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consumerConfig; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consumerConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - // Send Asynchronously - std::atomic_int numOfMessagesProduced{0}; - std::string prefix = testName; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, MessageCountSendCallback(numOfMessagesProduced)); - LOG_DEBUG("sending message " << messageContent); - } - - Message receivedMsg; - int i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - // Cumm. Ack 14th message - if (i == 14) { - ASSERT_EQ(ResultOk, consumer.acknowledgeCumulative(receivedMsg)); - } - } - // Number of messages produced - ASSERT_EQ(numOfMessagesProduced.load(), numOfMessages); - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); - - // Unsubscribe and resubscribe - // Expecting 5 messages to be sent again - - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i + 10); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++ + 10)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - // Cumm Ack 9th message - if (i == 4) { - ASSERT_EQ(ResultOk, consumer.acknowledgeCumulative(receivedMsg)); - } - } - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - - // Number of messages consumed - ASSERT_EQ(i, 5); - - // Unsubscribe and resubscribe - // Expecting no batch message to be resent - - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - // Number of messages consumed - ASSERT_NE(ResultOk, consumer.receive(receivedMsg, 5000)); -} - -// Also testing Cumulative Ack test case where greatestCumulativeAck returns -// MessageId() -TEST(BatchMessageTest, testPermits) { - std::string testName = std::to_string(epochTime) + "testPermits"; - - Client client(lookupUrl); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - Producer producer; - - // Enable batching on producer side - int batchSize = 10; - int numOfMessages = 75; - ProducerConfiguration conf; - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingMaxPublishDelayMs(5); - conf.setBatchingEnabled(true); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setReceiverQueueSize(5); - - Promise consumerPromise; - client.subscribeAsync(topicName, subName, consumerConfig, - WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.close(); - client.subscribe(topicName, subName, consumerConfig, consumer); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - // Send Asynchronously - std::atomic_int numOfMessagesProduced{0}; - std::string prefix = testName; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, MessageCountSendCallback(numOfMessagesProduced)); - LOG_DEBUG("sending message " << messageContent); - } - - std::this_thread::sleep_for(std::chrono::seconds(5)); - - Message receivedMsg; - int i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledgeCumulative(receivedMsg)); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - } - // Number of messages produced - ASSERT_EQ(numOfMessagesProduced.load(), numOfMessages); - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); - - // Since all messages are acked - // Creating 25 new non batched message - conf.setBatchingEnabled(false); - - client.createProducer(topicName, conf, producer); - - numOfMessagesProduced = 0; - // Send Asynchronously - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, MessageCountSendCallback(numOfMessagesProduced)); - LOG_DEBUG("sending message " << messageContent); - } - std::this_thread::sleep_for(std::chrono::seconds(5)); - - ASSERT_LE(ConsumerTest::getNumOfMessagesInQueue(consumer), consumerConfig.getReceiverQueueSize()); - ASSERT_GE(ConsumerTest::getNumOfMessagesInQueue(consumer), consumerConfig.getReceiverQueueSize() / 2); - - i = 0; - while (consumer.receive(receivedMsg, 5000) == ResultOk) { - std::string expectedMessageContent = prefix + std::to_string(i); - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(receivedMsg.getProperty("msgIndex"), std::to_string(i++)); - ASSERT_EQ(expectedMessageContent, receivedMsg.getDataAsString()); - ASSERT_EQ(ResultOk, consumer.acknowledgeCumulative(receivedMsg)); - } - // Number of messages produced - ASSERT_EQ(numOfMessagesProduced.load(), numOfMessages); - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages); -} - -TEST(BatchMessageTest, testPartitionedTopics) { - Client client(lookupUrl); - std::string topicName = - "persistent://public/default/test-partitioned-batch-messages-" + std::to_string(epochTime); - - // call admin api to make it partitioned - std::string url = adminUrl + "admin/v2/persistent/public/default/test-partitioned-batch-messages-" + - std::to_string(epochTime) + "/partitions"; - int res = makePutRequest(url, "7"); - - LOG_DEBUG("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - std::this_thread::sleep_for(std::chrono::seconds(2)); - - Producer producer; - // Enable batching on producer side - int batchSize = 100; - int numOfMessages = 10000; - ProducerConfiguration conf; - - conf.setCompressionType(CompressionZLib); - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingEnabled(true); - conf.setBatchingMaxPublishDelayMs(5); - conf.setBlockIfQueueFull(false); - conf.setMaxPendingMessages(10); - - Promise producerPromise; - client.createProducerAsync(topicName, conf, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - Result result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - std::string subName = "subscription-name"; - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - std::string temp = producer.getTopic(); - ASSERT_EQ(temp, topicName); - temp = consumer.getTopic(); - ASSERT_EQ(temp, topicName); - ASSERT_EQ(consumer.getSubscriptionName(), subName); - - globalPublishCountSuccess = 0; - globalPublishCountQueueFull = 0; - - // Send Asynchronously - std::string prefix = "msg-batch-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = - MessageBuilder().setContent(messageContent).setProperty("msgIndex", std::to_string(i)).build(); - producer.sendAsync(msg, &sendCallBackExpectingErrors); - LOG_DEBUG("sending message " << messageContent); - } - - Message receivedMsg; - int i = 0; - while (consumer.receive(receivedMsg, 30000) == ResultOk) { - LOG_DEBUG("Received Message with [ content - " << receivedMsg.getDataAsString() << "] [ messageID = " - << receivedMsg.getMessageId() << "]"); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMsg)); - i++; - } - - LOG_DEBUG("globalPublishCountQueueFull = " << globalPublishCountQueueFull); - LOG_DEBUG("globalPublishCountSuccess = " << globalPublishCountSuccess); - LOG_DEBUG("numOfMessages = " << numOfMessages); - - // Number of messages produced - ASSERT_EQ(globalPublishCountSuccess + globalPublishCountQueueFull, numOfMessages); - - // Number of messages consumed - ASSERT_EQ(i, numOfMessages - globalPublishCountQueueFull); -} - -TEST(BatchMessageTest, producerFailureResult) { - std::string testName = std::to_string(epochTime) + "testCumulativeAck"; - - ClientConfiguration clientConfig; - clientConfig.setStatsIntervalInSeconds(100); - - Client client(lookupUrl, clientConfig); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - Producer producer; - - int batchSize = 100; - ProducerConfiguration conf; - - conf.setCompressionType(CompressionZLib); - conf.setBatchingMaxMessages(batchSize); - conf.setBatchingEnabled(true); - conf.setBatchingMaxPublishDelayMs(50000); - conf.setBlockIfQueueFull(false); - conf.setMaxPendingMessages(10); - - Result res = Result::ResultBrokerMetadataError; - - client.createProducer(topicName, conf, producer); - Message msg = MessageBuilder().setContent("test").build(); - producer.sendAsync(msg, std::bind(&sendFailCallBack, std::placeholders::_1, res)); - PulsarFriend::producerFailMessages(producer, res); -} - -TEST(BatchMessageTest, testPraseMessageBatchEntry) { - struct Case { - std::string content; - std::string propKey; - std::string propValue; - }; - std::vector cases; - cases.push_back(Case{"example1", "prop1", "value1"}); - cases.push_back(Case{"example2", "prop2", "value2"}); - - SharedBuffer payload = SharedBuffer::allocate(128); - for (auto it = cases.begin(); it != cases.end(); ++it) { - MessageBuilder msgBuilder; - const Message& message = - msgBuilder.setContent(it->content).setProperty(it->propKey, it->propValue).build(); - Commands::serializeSingleMessageInBatchWithPayload(message, payload, 1024); - } - - MessageBatch messageBatch; - MessageId fakeId(0, 5000, 10, -1); - messageBatch.withMessageId(fakeId).parseFrom(payload, static_cast(cases.size())); - const std::vector& messages = messageBatch.messages(); - - ASSERT_EQ(messages.size(), cases.size()); - for (int i = 0; i < cases.size(); ++i) { - const Message& message = messages[i]; - const Case& expected = cases[i]; - ASSERT_EQ(message.getMessageId().batchIndex(), i); - ASSERT_EQ(message.getMessageId().ledgerId(), 5000); - ASSERT_EQ(message.getDataAsString(), expected.content); - ASSERT_EQ(message.getProperty(expected.propKey), expected.propValue); - } -} - -TEST(BatchMessageTest, testSendCallback) { - const std::string topicName = "persistent://public/default/BasicMessageTest-testSendCallback"; - - Client client(lookupUrl); - - constexpr int numMessagesOfBatch = 3; - - ProducerConfiguration producerConfig; - producerConfig.setBatchingEnabled(true); - producerConfig.setBatchingMaxMessages(numMessagesOfBatch); - producerConfig.setBatchingMaxPublishDelayMs(1000); // 1 s, it's long enough for 3 messages batched - producerConfig.setMaxPendingMessages(5); - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicName, producerConfig, producer)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicName, "SubscriptionName", consumer)); - - Latch latch(numMessagesOfBatch); - std::set sentIdSet; - for (int i = 0; i < numMessagesOfBatch; i++) { - const auto msg = MessageBuilder().setContent("a").build(); - producer.sendAsync(msg, [&sentIdSet, i, &latch](Result result, const MessageId& id) { - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(i, id.batchIndex()); - sentIdSet.emplace(id); - LOG_INFO("id of batch " << i << ": " << id); - latch.countdown(); - }); - } - - std::set receivedIdSet; - for (int i = 0; i < numMessagesOfBatch; i++) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg)); - receivedIdSet.emplace(msg.getMessageId()); - consumer.acknowledge(msg); - } - - latch.wait(); - ASSERT_EQ(sentIdSet, receivedIdSet); - - consumer.close(); - producer.close(); - client.close(); -} - -TEST(BatchMessageTest, testProducerQueueWithBatches) { - std::string testName = std::to_string(epochTime) + "testProducerQueueWithBatches"; - - ClientConfiguration clientConf; - clientConf.setStatsIntervalInSeconds(0); - - Client client(lookupUrl, clientConf); - std::string topicName = "persistent://public/default/" + testName; - - // Enable batching on producer side - ProducerConfiguration conf; - conf.setBlockIfQueueFull(false); - conf.setMaxPendingMessages(10); - conf.setBatchingMaxMessages(10000); - conf.setBatchingMaxPublishDelayMs(1000); - conf.setBatchingEnabled(true); - - Producer producer; - Result result = client.createProducer(topicName, conf, producer); - ASSERT_EQ(ResultOk, result); - - std::string prefix = "msg-batch-test-produce-timeout-"; - int rejectedMessges = 0; - for (int i = 0; i < 20; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().setContent("hello").build(); - - producer.sendAsync(msg, [&rejectedMessges](Result result, const MessageId& id) { - if (result == ResultProducerQueueIsFull) { - ++rejectedMessges; - } - }); - } - - ASSERT_EQ(rejectedMessges, 10); -} - -TEST(BatchMessageTest, testSingleMessageMetadata) { - const auto topic = "BatchMessageTest-SingleMessageMetadata-" + std::to_string(time(nullptr)); - constexpr int numMessages = 3; - - Client client(lookupUrl); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topic, "sub", consumer)); - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer( - topic, ProducerConfiguration().setBatchingMaxMessages(numMessages), producer)); - - producer.sendAsync(MessageBuilder() - .setContent("msg-0") - .setPartitionKey("key-0") - .setOrderingKey("ordering-key-0") - .setEventTimestamp(10UL) - .setProperty("k0", "v0") - .setProperty("k1", "v1") - .build(), - nullptr); - producer.sendAsync(MessageBuilder() - .setContent("msg-1") - .setOrderingKey("ordering-key-1") - .setEventTimestamp(11UL) - .setProperty("k2", "v2") - .build(), - nullptr); - producer.sendAsync(MessageBuilder().setContent("msg-2").build(), nullptr); - ASSERT_EQ(ResultOk, producer.flush()); - - Message msgs[numMessages]; - for (int i = 0; i < numMessages; i++) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 3000)); - msgs[i] = msg; - LOG_INFO("message " << i << ": " << msg.getDataAsString() - << ", key: " << (msg.hasPartitionKey() ? msg.getPartitionKey() : "(null)") - << ", ordering key: " << (msg.hasOrderingKey() ? msg.getOrderingKey() : "(null)") - << ", event time: " << (msg.getEventTimestamp()) - << ", properties count: " << msg.getProperties().size() - << ", has schema version: " << msg.hasSchemaVersion()); - } - - ASSERT_EQ(msgs[0].getDataAsString(), "msg-0"); - ASSERT_TRUE(msgs[0].hasPartitionKey()); - ASSERT_EQ(msgs[0].getPartitionKey(), "key-0"); - ASSERT_TRUE(msgs[0].hasOrderingKey()); - ASSERT_EQ(msgs[0].getOrderingKey(), "ordering-key-0"); - ASSERT_EQ(msgs[0].getEventTimestamp(), 10UL); - ASSERT_EQ(msgs[0].getProperties().size(), 2); - ASSERT_TRUE(msgs[0].hasProperty("k0")); - ASSERT_EQ(msgs[0].getProperty("k0"), "v0"); - ASSERT_TRUE(msgs[0].hasProperty("k1")); - ASSERT_EQ(msgs[0].getProperty("k1"), "v1"); - ASSERT_FALSE(msgs[0].hasSchemaVersion()); - - ASSERT_EQ(msgs[1].getDataAsString(), "msg-1"); - ASSERT_FALSE(msgs[1].hasPartitionKey()); - ASSERT_TRUE(msgs[1].hasOrderingKey()); - ASSERT_EQ(msgs[1].getOrderingKey(), "ordering-key-1"); - ASSERT_EQ(msgs[1].getEventTimestamp(), 11UL); - ASSERT_EQ(msgs[1].getProperties().size(), 1); - ASSERT_TRUE(msgs[1].hasProperty("k2")); - ASSERT_EQ(msgs[1].getProperty("k2"), "v2"); - ASSERT_FALSE(msgs[1].hasSchemaVersion()); - - ASSERT_EQ(msgs[2].getDataAsString(), "msg-2"); - ASSERT_FALSE(msgs[2].hasPartitionKey()); - ASSERT_FALSE(msgs[2].hasOrderingKey()); - ASSERT_EQ(msgs[2].getEventTimestamp(), 0UL); - ASSERT_EQ(msgs[2].getProperties().size(), 0); - ASSERT_FALSE(msgs[2].hasSchemaVersion()); - - client.close(); -} diff --git a/pulsar-client-cpp/tests/BlockingQueueTest.cc b/pulsar-client-cpp/tests/BlockingQueueTest.cc deleted file mode 100644 index 94b0a1bbdfdbc..0000000000000 --- a/pulsar-client-cpp/tests/BlockingQueueTest.cc +++ /dev/null @@ -1,237 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include - -#include -#include -#include - -class ProducerWorker { - private: - std::thread producerThread_; - BlockingQueue& queue_; - - public: - ProducerWorker(BlockingQueue& queue) : queue_(queue) {} - - void produce(int number) { producerThread_ = std::thread(&ProducerWorker::pushNumbers, this, number); } - - void pushNumbers(int number) { - for (int i = 1; i <= number; i++) { - queue_.push(i); - } - } - - void join() { producerThread_.join(); } -}; - -class ConsumerWorker { - private: - std::thread consumerThread_; - BlockingQueue& queue_; - - public: - ConsumerWorker(BlockingQueue& queue) : queue_(queue) {} - - void consume(int number) { consumerThread_ = std::thread(&ConsumerWorker::popNumbers, this, number); } - - void popNumbers(int number) { - for (int i = 1; i <= number; i++) { - int poppedElement; - queue_.pop(poppedElement); - } - } - - void join() { consumerThread_.join(); } -}; - -TEST(BlockingQueueTest, testBasic) { - size_t size = 5; - BlockingQueue queue(size); - - ProducerWorker producerWorker(queue); - producerWorker.produce(5); - - ConsumerWorker consumerWorker(queue); - consumerWorker.consume(5); - - producerWorker.join(); - consumerWorker.join(); - - size_t zero = 0; - ASSERT_EQ(zero, queue.size()); -} - -TEST(BlockingQueueTest, testQueueOperations) { - size_t size = 5; - BlockingQueue queue(size); - for (size_t i = 1; i <= size; i++) { - queue.push(i); - } - ASSERT_EQ(queue.size(), size); - - int cnt = 1; - for (BlockingQueue::const_iterator it = queue.begin(); it != queue.end(); it++) { - ASSERT_EQ(cnt, *it); - ++cnt; - } - - cnt = 1; - for (BlockingQueue::iterator it = queue.begin(); it != queue.end(); it++) { - ASSERT_EQ(cnt, *it); - ++cnt; - } - - int poppedElement; - for (size_t i = 1; i <= size; i++) { - queue.pop(poppedElement); - } - - ASSERT_FALSE(queue.peek(poppedElement)); -} - -TEST(BlockingQueueTest, testBlockingProducer) { - size_t size = 5; - BlockingQueue queue(size); - - ProducerWorker producerWorker(queue); - producerWorker.produce(8); - - ConsumerWorker consumerWorker(queue); - consumerWorker.consume(5); - - producerWorker.join(); - consumerWorker.join(); - - size_t three = 3; - ASSERT_EQ(three, queue.size()); -} - -TEST(BlockingQueueTest, testBlockingConsumer) { - size_t size = 5; - BlockingQueue queue(size); - - ProducerWorker producerWorker(queue); - producerWorker.produce(5); - - ConsumerWorker consumerWorker(queue); - consumerWorker.consume(8); - - producerWorker.pushNumbers(3); - - producerWorker.join(); - consumerWorker.join(); - - size_t zero = 0; - ASSERT_EQ(zero, queue.size()); -} - -TEST(BlockingQueueTest, testTimeout) { - size_t size = 5; - BlockingQueue queue(size); - int value; - bool popReturn = queue.pop(value, std::chrono::seconds(1)); - std::this_thread::sleep_for(std::chrono::seconds(2)); - ASSERT_FALSE(popReturn); -} - -TEST(BlockingQueueTest, testPushPopRace) { - auto test_logic = []() { - size_t size = 5; - BlockingQueue queue(size); - - std::vector> producers; - for (int i = 0; i < 5; ++i) { - producers.emplace_back(new ProducerWorker{queue}); - producers.back()->produce(1000); - } - - // wait for queue full - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - - std::vector> consumers; - for (int i = 0; i < 5; ++i) { - consumers.emplace_back(new ConsumerWorker{queue}); - consumers.back()->consume(1000); - } - - auto future = std::async(std::launch::async, [&]() { - for (auto& p : producers) p->join(); - for (auto& c : consumers) c->join(); - }); - auto ret = future.wait_for(std::chrono::seconds(5)); - if (ret == std::future_status::ready) { - std::cerr << "Exiting"; - exit(0); - } else { - std::cerr << "Threads are not exited in time"; - exit(1); - } - }; - - ASSERT_EXIT(test_logic(), ::testing::ExitedWithCode(0), "Exiting"); -} - -TEST(BlockingQueueTest, testCloseInterruptOnEmpty) { - BlockingQueue queue(10); - pulsar::Latch latch(1); - - auto thread = std::thread([&]() { - int v; - bool res = queue.pop(v); - ASSERT_FALSE(res); - latch.countdown(); - }); - - // Sleep to allow for background thread to call pop and be blocked there - std::this_thread::sleep_for(std::chrono::seconds(1)); - - queue.close(); - bool wasUnblocked = latch.wait(std::chrono::seconds(5)); - - ASSERT_TRUE(wasUnblocked); - thread.join(); -} - -TEST(BlockingQueueTest, testCloseInterruptOnFull) { - BlockingQueue queue(10); - pulsar::Latch latch(1); - - auto thread = std::thread([&]() { - int i = 0; - while (true) { - bool res = queue.push(i++); - if (!res) { - latch.countdown(); - return; - } - } - }); - - // Sleep to allow for background thread to fill the queue and be blocked there - std::this_thread::sleep_for(std::chrono::seconds(1)); - - queue.close(); - bool wasUnblocked = latch.wait(std::chrono::seconds(5)); - - ASSERT_TRUE(wasUnblocked); - thread.join(); -} diff --git a/pulsar-client-cpp/tests/CMakeLists.txt b/pulsar-client-cpp/tests/CMakeLists.txt deleted file mode 100644 index b4d840bd7b777..0000000000000 --- a/pulsar-client-cpp/tests/CMakeLists.txt +++ /dev/null @@ -1,61 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -if (NOT PROTOC_PATH) - set(PROTOC_PATH protoc) -endif() - -set(LIB_AUTOGEN_DIR ${AUTOGEN_DIR}/tests) -file(MAKE_DIRECTORY ${LIB_AUTOGEN_DIR}) -include_directories(${LIB_AUTOGEN_DIR}) - -set(PROTO_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../pulsar-client/src/test/proto) -set(PROTO_SOURCES ${LIB_AUTOGEN_DIR}/Test.pb.cc ${LIB_AUTOGEN_DIR}/ExternalTest.pb.cc) -add_custom_command( - OUTPUT ${PROTO_SOURCES} - COMMAND ${PROTOC_PATH} -I ${PROTO_DIR} ${PROTO_DIR}/Test.proto ${PROTO_DIR}/ExternalTest.proto --cpp_out=${LIB_AUTOGEN_DIR}) - -set(PROTO_SOURCE_PADDING ${LIB_AUTOGEN_DIR}/PaddingDemo.pb.cc) -add_custom_command( - OUTPUT ${PROTO_SOURCE_PADDING} - COMMAND ${PROTOC_PATH} -I . ./PaddingDemo.proto --cpp_out=${LIB_AUTOGEN_DIR} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) - -set(PROTO_SOURCES ${PROTO_SOURCES} ${PROTO_SOURCE_PADDING}) - -include_directories(${LIB_AUTOGEN_DIR}) - -find_library(GMOCK_LIBRARY_PATH gmock) -find_library(GTEST_LIBRARY_PATH gtest) -find_library(GMOCKD_LIBRARY_PATH gmockd) -find_library(GTESTD_LIBRARY_PATH gtestd) -if (NOT GMOCKD_LIBRARY_PATH) - set(GMOCKD_LIBRARY_PATH ${GMOCK_LIBRARY_PATH}) -endif() -if (NOT GTESTD_LIBRARY_PATH) - set(GTESTD_LIBRARY_PATH ${GTEST_LIBRARY_PATH}) -endif() - -file(GLOB TEST_SOURCES *.cc c/*.cc) - -add_executable(main ${TEST_SOURCES} ${PROTO_SOURCES}) - -target_include_directories(main PRIVATE ${CMAKE_SOURCE_DIR}/lib ${AUTOGEN_DIR}/lib) - -target_link_libraries(main ${CLIENT_LIBS} pulsarStatic $<$:${GMOCKD_LIBRARY_PATH}> $<$:${GTESTD_LIBRARY_PATH}> $<$>:${GMOCK_LIBRARY_PATH}> $<$>:${GTEST_LIBRARY_PATH}>) diff --git a/pulsar-client-cpp/tests/ClientDeduplicationTest.cc b/pulsar-client-cpp/tests/ClientDeduplicationTest.cc deleted file mode 100644 index c3373d5d11445..0000000000000 --- a/pulsar-client-cpp/tests/ClientDeduplicationTest.cc +++ /dev/null @@ -1,152 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include - -#include "HttpHelper.h" - -#include -#include - -using namespace pulsar; - -static std::string serviceUrl = "pulsar://localhost:6650"; -static std::string adminUrl = "http://localhost:8080/"; - -TEST(ClientDeduplicationTest, testProducerSequenceAfterReconnect) { - Client client(serviceUrl); - - std::string topicName = - "persistent://public/dedup-1/testProducerSequenceAfterReconnect-" + std::to_string(time(NULL)); - - // call admin api to create namespace and enable deduplication - std::string url = adminUrl + "admin/v2/namespaces/public/dedup-1"; - int res = makePutRequest(url, R"({"replication_clusters": ["standalone"]})"); - ASSERT_TRUE(res == 204 || res == 409); - - url = adminUrl + "admin/v2/namespaces/public/dedup-1/permissions/anonymous"; - res = makePostRequest(url, R"(["produce","consume"])"); - ASSERT_TRUE(res == 204 || res == 409); - - url = adminUrl + "admin/v2/namespaces/public/dedup-1/deduplication"; - res = makePostRequest(url, "true"); - ASSERT_TRUE(res == 204 || res == 409); - - // Ensure dedup status was refreshed - std::this_thread::sleep_for(std::chrono::seconds(1)); - - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(client.createReader(topicName, MessageId::earliest(), readerConf, reader), ResultOk); - - Producer producer; - ProducerConfiguration producerConf; - producerConf.setProducerName("my-producer-name"); - ASSERT_EQ(client.createProducer(topicName, producerConf, producer), ResultOk); - - ASSERT_EQ(producer.getLastSequenceId(), -1L); - - for (int i = 0; i < 10; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(producer.send(msg), ResultOk); - ASSERT_EQ(producer.getLastSequenceId(), i); - } - - producer.close(); - - ASSERT_EQ(ResultOk, client.createProducer(topicName, producerConf, producer)); - ASSERT_EQ(producer.getLastSequenceId(), 9); - - for (int i = 10; i < 20; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(producer.send(msg), ResultOk); - ASSERT_EQ(producer.getLastSequenceId(), i); - } - - client.close(); -} - -TEST(ClientDeduplicationTest, testProducerDeduplication) { - Client client(adminUrl); - - std::string topicName = - "persistent://public/dedup-2/testProducerDeduplication-" + std::to_string(time(NULL)); - - std::string url = adminUrl + "admin/v2/namespaces/public/dedup-2"; - int res = makePutRequest(url, R"({"replication_clusters": ["standalone"]})"); - ASSERT_TRUE(res == 204 || res == 409); - - url = adminUrl + "admin/v2/namespaces/public/dedup-2/permissions/anonymous"; - res = makePostRequest(url, R"(["produce","consume"])"); - ASSERT_TRUE(res == 204 || res == 409); - - url = adminUrl + "admin/v2/namespaces/public/dedup-2/deduplication"; - res = makePostRequest(url, "true"); - ASSERT_TRUE(res == 204 || res == 409); - - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(client.createReader(topicName, MessageId::earliest(), readerConf, reader), ResultOk); - - Producer producer; - ProducerConfiguration producerConf; - producerConf.setProducerName("my-producer-name"); - ASSERT_EQ(client.createProducer(topicName, producerConf, producer), ResultOk); - - ASSERT_EQ(producer.getLastSequenceId(), -1L); - - Consumer consumer; - ASSERT_EQ(client.subscribe(topicName, "my-subscription", consumer), ResultOk); - - ASSERT_EQ(producer.send(MessageBuilder().setContent("my-message-0").setSequenceId(0).build()), ResultOk); - ASSERT_EQ(producer.send(MessageBuilder().setContent("my-message-1").setSequenceId(1).build()), ResultOk); - ASSERT_EQ(producer.send(MessageBuilder().setContent("my-message-2").setSequenceId(2).build()), ResultOk); - - // Repeat the messages and verify they're not received by consumer - ASSERT_EQ(producer.send(MessageBuilder().setContent("my-message-1").setSequenceId(1).build()), ResultOk); - ASSERT_EQ(producer.send(MessageBuilder().setContent("my-message-2").setSequenceId(2).build()), ResultOk); - - producer.close(); - - Message msg; - for (int i = 0; i < 3; i++) { - consumer.receive(msg); - - ASSERT_EQ(msg.getDataAsString(), "my-message-" + std::to_string(i)); - consumer.acknowledge(msg); - } - - // No other messages should be received - ASSERT_EQ(consumer.receive(msg, 1000), ResultTimeout); - - ASSERT_EQ(ResultOk, client.createProducer(topicName, producerConf, producer)); - ASSERT_EQ(producer.getLastSequenceId(), 2); - - // Repeat the messages and verify they're not received by consumer - ASSERT_EQ(producer.send(MessageBuilder().setContent("my-message-1").setSequenceId(1).build()), ResultOk); - ASSERT_EQ(producer.send(MessageBuilder().setContent("my-message-2").setSequenceId(2).build()), ResultOk); - - // No other messages should be received - ASSERT_EQ(consumer.receive(msg, 1000), ResultTimeout); - - client.close(); -} diff --git a/pulsar-client-cpp/tests/ClientTest.cc b/pulsar-client-cpp/tests/ClientTest.cc deleted file mode 100644 index 216b54827eaa6..0000000000000 --- a/pulsar-client-cpp/tests/ClientTest.cc +++ /dev/null @@ -1,297 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include "HttpHelper.h" -#include "PulsarFriend.h" -#include "WaitUtils.h" - -#include -#include -#include "../lib/checksum/ChecksumProvider.h" -#include "lib/LogUtils.h" - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -static std::string lookupUrl = "pulsar://localhost:6650"; - -TEST(ClientTest, testChecksumComputation) { - std::string data = "test"; - std::string doubleData = "testtest"; - - // (1) compute checksum of specific chunk of string - int checksum1 = computeChecksum(0, (char *)data.c_str(), data.length()); - int checksum2 = computeChecksum(0, (char *)doubleData.c_str() + 4, 4); - ASSERT_EQ(checksum1, checksum2); - - //(2) compute incremental checksum - // (a) checksum on full data - int doubleChecksum = computeChecksum(0, (char *)doubleData.c_str(), doubleData.length()); - // (b) incremental checksum on multiple partial data - checksum1 = computeChecksum(0, (char *)data.c_str(), data.length()); - int incrementalChecksum = computeChecksum(checksum1, (char *)data.c_str(), data.length()); - ASSERT_EQ(incrementalChecksum, doubleChecksum); -} - -TEST(ClientTest, testSwHwChecksum) { - std::string data = "test"; - std::string doubleData = "testtest"; - - // (1) compute checksum of specific chunk of string - // (a) HW - uint32_t hwChecksum1 = crc32cHw(0, (char *)data.c_str(), data.length()); - uint32_t hwChecksum2 = crc32cHw(0, (char *)doubleData.c_str() + 4, 4); - // (b) SW - uint32_t swChecksum1 = crc32cSw(0, (char *)data.c_str(), data.length()); - uint32_t swChecksum2 = crc32cSw(0, (char *)doubleData.c_str() + 4, 4); - - ASSERT_EQ(hwChecksum1, hwChecksum2); - ASSERT_EQ(hwChecksum1, swChecksum1); - ASSERT_EQ(hwChecksum2, swChecksum2); - - //(2) compute incremental checksum - // (a.1) hw: checksum on full data - uint32_t hwDoubleChecksum = crc32cHw(0, (char *)doubleData.c_str(), doubleData.length()); - // (a.2) hw: incremental checksum on multiple partial data - hwChecksum1 = crc32cHw(0, (char *)data.c_str(), data.length()); - uint32_t hwIncrementalChecksum = crc32cHw(hwChecksum1, (char *)data.c_str(), data.length()); - // (b.1) sw: checksum on full data - uint32_t swDoubleChecksum = crc32cSw(0, (char *)doubleData.c_str(), doubleData.length()); - ASSERT_EQ(swDoubleChecksum, hwDoubleChecksum); - // (b.2) sw: incremental checksum on multiple partial data - swChecksum1 = crc32cHw(0, (char *)data.c_str(), data.length()); - uint32_t swIncrementalChecksum = crc32cSw(swChecksum1, (char *)data.c_str(), data.length()); - ASSERT_EQ(hwIncrementalChecksum, hwDoubleChecksum); - ASSERT_EQ(hwIncrementalChecksum, swIncrementalChecksum); - ASSERT_EQ(hwIncrementalChecksum, swIncrementalChecksum); -} - -TEST(ClientTest, testServerConnectError) { - const std::string topic = "test-server-connect-error"; - Client client("pulsar://localhost:65535", ClientConfiguration().setOperationTimeoutSeconds(1)); - Producer producer; - ASSERT_EQ(ResultTimeout, client.createProducer(topic, producer)); - Consumer consumer; - ASSERT_EQ(ResultTimeout, client.subscribe(topic, "sub", consumer)); - Reader reader; - ReaderConfiguration readerConf; - ASSERT_EQ(ResultTimeout, client.createReader(topic, MessageId::earliest(), readerConf, reader)); - client.close(); -} - -TEST(ClientTest, testConnectTimeout) { - // 192.0.2.0/24 is assigned for documentation, should be a deadend - const std::string blackHoleBroker = "pulsar://192.0.2.1:1234"; - const std::string topic = "test-connect-timeout"; - - Client clientLow(blackHoleBroker, ClientConfiguration().setConnectionTimeout(1000)); - Client clientDefault(blackHoleBroker); - - std::promise promiseLow; - clientLow.createProducerAsync( - topic, [&promiseLow](Result result, Producer producer) { promiseLow.set_value(result); }); - - std::promise promiseDefault; - clientDefault.createProducerAsync( - topic, [&promiseDefault](Result result, Producer producer) { promiseDefault.set_value(result); }); - - auto futureLow = promiseLow.get_future(); - ASSERT_EQ(futureLow.wait_for(std::chrono::milliseconds(1500)), std::future_status::ready); - ASSERT_EQ(futureLow.get(), ResultConnectError); - - auto futureDefault = promiseDefault.get_future(); - ASSERT_EQ(futureDefault.wait_for(std::chrono::milliseconds(10)), std::future_status::timeout); - - clientLow.close(); - clientDefault.close(); - - ASSERT_EQ(futureDefault.wait_for(std::chrono::milliseconds(10)), std::future_status::ready); - ASSERT_EQ(futureDefault.get(), ResultConnectError); -} - -TEST(ClientTest, testGetNumberOfReferences) { - Client client("pulsar://localhost:6650"); - - // Producer test - uint64_t numberOfProducers = 0; - const std::string nonPartitionedTopic = - "testGetNumberOfReferencesNonPartitionedTopic" + std::to_string(time(nullptr)); - - const std::string partitionedTopic = - "testGetNumberOfReferencesPartitionedTopic" + std::to_string(time(nullptr)); - Producer producer; - client.createProducer(nonPartitionedTopic, producer); - numberOfProducers = 1; - ASSERT_EQ(numberOfProducers, client.getNumberOfProducers()); - - producer.close(); - numberOfProducers = 0; - ASSERT_EQ(numberOfProducers, client.getNumberOfProducers()); - - // PartitionedProducer - int res = makePutRequest( - "http://localhost:8080/admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", "2"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - client.createProducer(partitionedTopic, producer); - numberOfProducers = 2; - ASSERT_EQ(numberOfProducers, client.getNumberOfProducers()); - producer.close(); - numberOfProducers = 0; - ASSERT_EQ(numberOfProducers, client.getNumberOfProducers()); - - // Consumer test - uint64_t numberOfConsumers = 0; - - Consumer consumer1; - client.subscribe(nonPartitionedTopic, "consumer-1", consumer1); - numberOfConsumers = 1; - ASSERT_EQ(numberOfConsumers, client.getNumberOfConsumers()); - - consumer1.close(); - numberOfConsumers = 0; - ASSERT_EQ(numberOfConsumers, client.getNumberOfConsumers()); - - Consumer consumer2; - Consumer consumer3; - client.subscribe(partitionedTopic, "consumer-2", consumer2); - numberOfConsumers = 2; - ASSERT_EQ(numberOfConsumers, client.getNumberOfConsumers()); - client.subscribe(nonPartitionedTopic, "consumer-3", consumer3); - numberOfConsumers = 3; - ASSERT_EQ(numberOfConsumers, client.getNumberOfConsumers()); - consumer2.close(); - consumer3.close(); - numberOfConsumers = 0; - ASSERT_EQ(numberOfConsumers, client.getNumberOfConsumers()); - - client.close(); -} - -TEST(ClientTest, testReferenceCount) { - Client client(lookupUrl); - const std::string topic = "client-test-reference-count-" + std::to_string(time(nullptr)); - - auto &producers = PulsarFriend::getProducers(client); - auto &consumers = PulsarFriend::getConsumers(client); - ReaderImplWeakPtr readerWeakPtr; - - { - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topic, producer)); - ASSERT_EQ(producers.size(), 1); - ASSERT_TRUE(producers[0].use_count() > 0); - LOG_INFO("Reference count of the producer: " << producers[0].use_count()); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topic, "my-sub", consumer)); - ASSERT_EQ(consumers.size(), 1); - ASSERT_TRUE(consumers[0].use_count() > 0); - LOG_INFO("Reference count of the consumer: " << consumers[0].use_count()); - - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(ResultOk, - client.createReader(topic + "-reader", MessageId::earliest(), readerConf, reader)); - ASSERT_EQ(consumers.size(), 2); - ASSERT_TRUE(consumers[1].use_count() > 0); - LOG_INFO("Reference count of the reader's underlying consumer: " << consumers[1].use_count()); - - readerWeakPtr = PulsarFriend::getReaderImplWeakPtr(reader); - ASSERT_TRUE(readerWeakPtr.use_count() > 0); - LOG_INFO("Reference count of the reader: " << readerWeakPtr.use_count()); - } - - ASSERT_EQ(producers.size(), 1); - ASSERT_EQ(producers[0].use_count(), 0); - ASSERT_EQ(consumers.size(), 2); - - waitUntil(std::chrono::seconds(1), [&consumers, &readerWeakPtr] { - return consumers[0].use_count() == 0 && consumers[1].use_count() == 0 && readerWeakPtr.expired(); - }); - EXPECT_EQ(consumers[0].use_count(), 0); - EXPECT_EQ(consumers[1].use_count(), 0); - EXPECT_EQ(readerWeakPtr.use_count(), 0); - client.close(); -} - -TEST(ClientTest, testWrongListener) { - const std::string topic = "client-test-wrong-listener-" + std::to_string(time(nullptr)); - auto httpCode = makePutRequest( - "http://localhost:8080/admin/v2/persistent/public/default/" + topic + "/partitions", "3"); - LOG_INFO("create " << topic << ": " << httpCode); - - Client client(lookupUrl, ClientConfiguration().setListenerName("test")); - Producer producer; - ASSERT_EQ(ResultServiceUnitNotReady, client.createProducer(topic, producer)); - ASSERT_EQ(ResultProducerNotInitialized, producer.close()); - ASSERT_EQ(PulsarFriend::getProducers(client).size(), 0); - ASSERT_EQ(ResultOk, client.close()); - - // The connection will be closed when the consumer failed, we must recreate the Client. Otherwise, the - // creation of Consumer or Reader could fail with ResultConnectError. - client = Client(lookupUrl, ClientConfiguration().setListenerName("test")); - Consumer consumer; - ASSERT_EQ(ResultServiceUnitNotReady, client.subscribe(topic, "sub", consumer)); - ASSERT_EQ(ResultConsumerNotInitialized, consumer.close()); - - ASSERT_EQ(PulsarFriend::getConsumers(client).size(), 0); - ASSERT_EQ(ResultOk, client.close()); - - client = Client(lookupUrl, ClientConfiguration().setListenerName("test")); - - Consumer multiTopicsConsumer; - ASSERT_EQ(ResultServiceUnitNotReady, - client.subscribe({topic + "-partition-0", topic + "-partition-1", topic + "-partition-2"}, - "sub", multiTopicsConsumer)); - - ASSERT_EQ(PulsarFriend::getConsumers(client).size(), 0); - ASSERT_EQ(ResultOk, client.close()); - - // Currently Reader can only read a non-partitioned topic in C++ client - client = Client(lookupUrl, ClientConfiguration().setListenerName("test")); - - // Currently Reader can only read a non-partitioned topic in C++ client - Reader reader; - ASSERT_EQ(ResultServiceUnitNotReady, - client.createReader(topic + "-partition-0", MessageId::earliest(), {}, reader)); - ASSERT_EQ(ResultConsumerNotInitialized, reader.close()); - ASSERT_EQ(PulsarFriend::getConsumers(client).size(), 0); - ASSERT_EQ(ResultOk, client.close()); -} - -TEST(ClientTest, testMultiBrokerUrl) { - const std::string topic = "client-test-multi-broker-url-" + std::to_string(time(nullptr)); - Client client("pulsar://localhost:6000,localhost"); // the 1st address is not reachable - - Producer producer; - PulsarFriend::setServiceUrlIndex(client, 0); - ASSERT_EQ(ResultOk, client.createProducer(topic, producer)); - - Consumer consumer; - PulsarFriend::setServiceUrlIndex(client, 0); - ASSERT_EQ(ResultOk, client.subscribe(topic, "sub", consumer)); - - Reader reader; - PulsarFriend::setServiceUrlIndex(client, 0); - ASSERT_EQ(ResultOk, client.createReader(topic, MessageId::earliest(), {}, reader)); - client.close(); -} diff --git a/pulsar-client-cpp/tests/CompressionCodecSnappyTest.cc b/pulsar-client-cpp/tests/CompressionCodecSnappyTest.cc deleted file mode 100644 index 27d668f36e2a1..0000000000000 --- a/pulsar-client-cpp/tests/CompressionCodecSnappyTest.cc +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include "../lib/CompressionCodecSnappy.h" - -using namespace pulsar; - -TEST(CompressionCodecSnappyTest, testEncodeAndDecode) { - CompressionCodecSnappy compressionCodecSnappy; - char data[] = "snappy compression compresses snappy"; - size_t sz = sizeof(data); - SharedBuffer source = SharedBuffer::wrap(data, sz); - SharedBuffer compressed = compressionCodecSnappy.encode(source); - ASSERT_GT(compressed.readableBytes(), 0); - - SharedBuffer uncompressed; - bool res = compressionCodecSnappy.decode(compressed, static_cast(sz), uncompressed); - ASSERT_TRUE(res); - ASSERT_EQ(uncompressed.readableBytes(), sz); - ASSERT_STREQ(data, uncompressed.data()); -} diff --git a/pulsar-client-cpp/tests/ConsumerConfigurationTest.cc b/pulsar-client-cpp/tests/ConsumerConfigurationTest.cc deleted file mode 100644 index 24f541b57ba6a..0000000000000 --- a/pulsar-client-cpp/tests/ConsumerConfigurationTest.cc +++ /dev/null @@ -1,310 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include "NoOpsCryptoKeyReader.h" - -DECLARE_LOG_OBJECT() - -#include "../lib/Future.h" -#include "../lib/Utils.h" - -using namespace pulsar; - -class DummyEventListener : public ConsumerEventListener { - public: - virtual void becameActive(Consumer consumer, int partitionId) override {} - - virtual void becameInactive(Consumer consumer, int partitionId) override {} -}; - -TEST(ConsumerConfigurationTest, testDefaultConfig) { - ConsumerConfiguration conf; - ASSERT_EQ(conf.getSchema().getSchemaType(), SchemaType::BYTES); - ASSERT_EQ(conf.getConsumerType(), ConsumerExclusive); - ASSERT_EQ(conf.hasMessageListener(), false); - ASSERT_EQ(conf.hasConsumerEventListener(), false); - ASSERT_EQ(conf.getReceiverQueueSize(), 1000); - ASSERT_EQ(conf.getMaxTotalReceiverQueueSizeAcrossPartitions(), 50000); - ASSERT_EQ(conf.getConsumerName(), ""); - ASSERT_EQ(conf.getUnAckedMessagesTimeoutMs(), 0); - ASSERT_EQ(conf.getTickDurationInMs(), 1000); - ASSERT_EQ(conf.getNegativeAckRedeliveryDelayMs(), 60000); - ASSERT_EQ(conf.getAckGroupingTimeMs(), 100); - ASSERT_EQ(conf.getAckGroupingMaxSize(), 1000); - ASSERT_EQ(conf.getBrokerConsumerStatsCacheTimeInMs(), 30000); - ASSERT_EQ(conf.isReadCompacted(), false); - ASSERT_EQ(conf.getPatternAutoDiscoveryPeriod(), 60); - ASSERT_EQ(conf.getSubscriptionInitialPosition(), InitialPositionLatest); - ASSERT_EQ(conf.getCryptoKeyReader(), CryptoKeyReaderPtr{}); - ASSERT_EQ(conf.getCryptoFailureAction(), ConsumerCryptoFailureAction::FAIL); - ASSERT_EQ(conf.isEncryptionEnabled(), false); - ASSERT_EQ(conf.isReplicateSubscriptionStateEnabled(), false); - ASSERT_EQ(conf.getProperties().empty(), true); - ASSERT_EQ(conf.getPriorityLevel(), 0); - ASSERT_EQ(conf.getMaxPendingChunkedMessage(), 10); - ASSERT_EQ(conf.isAutoAckOldestChunkedMessageOnQueueFull(), false); -} - -TEST(ConsumerConfigurationTest, testCustomConfig) { - ConsumerConfiguration conf; - - const std::string exampleSchema = - "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\"," - "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}"; - const SchemaInfo schema(AVRO, "Avro", exampleSchema, StringMap{{"schema-key", "schema-value"}}); - - conf.setSchema(schema); - ASSERT_EQ(conf.getSchema().getName(), schema.getName()); - ASSERT_EQ(conf.getSchema().getSchemaType(), schema.getSchemaType()); - ASSERT_EQ(conf.getSchema().getSchema(), schema.getSchema()); - ASSERT_EQ(conf.getSchema().getProperties(), schema.getProperties()); - - conf.setConsumerType(ConsumerKeyShared); - ASSERT_EQ(conf.getConsumerType(), ConsumerKeyShared); - - conf.setMessageListener([](Consumer consumer, const Message& msg) {}); - ASSERT_EQ(conf.hasMessageListener(), true); - - conf.setConsumerEventListener(std::make_shared()); - ASSERT_EQ(conf.hasConsumerEventListener(), true); - - conf.setReceiverQueueSize(2000); - ASSERT_EQ(conf.getReceiverQueueSize(), 2000); - - conf.setMaxTotalReceiverQueueSizeAcrossPartitions(100000); - ASSERT_EQ(conf.getMaxTotalReceiverQueueSizeAcrossPartitions(), 100000); - - conf.setConsumerName("consumer"); - ASSERT_EQ(conf.getConsumerName(), "consumer"); - - conf.setUnAckedMessagesTimeoutMs(20000); - ASSERT_EQ(conf.getUnAckedMessagesTimeoutMs(), 20000); - - conf.setTickDurationInMs(2000); - ASSERT_EQ(conf.getTickDurationInMs(), 2000); - - conf.setNegativeAckRedeliveryDelayMs(10000); - ASSERT_EQ(conf.getNegativeAckRedeliveryDelayMs(), 10000); - - conf.setAckGroupingTimeMs(200); - ASSERT_EQ(conf.getAckGroupingTimeMs(), 200); - - conf.setAckGroupingMaxSize(2000); - ASSERT_EQ(conf.getAckGroupingMaxSize(), 2000); - - conf.setBrokerConsumerStatsCacheTimeInMs(60000); - ASSERT_EQ(conf.getBrokerConsumerStatsCacheTimeInMs(), 60000); - - conf.setReadCompacted(true); - ASSERT_EQ(conf.isReadCompacted(), true); - - conf.setPatternAutoDiscoveryPeriod(120); - ASSERT_EQ(conf.getPatternAutoDiscoveryPeriod(), 120); - - conf.setSubscriptionInitialPosition(InitialPositionEarliest); - ASSERT_EQ(conf.getSubscriptionInitialPosition(), InitialPositionEarliest); - - const auto cryptoKeyReader = std::make_shared(); - conf.setCryptoKeyReader(cryptoKeyReader); - ASSERT_EQ(conf.getCryptoKeyReader(), cryptoKeyReader); - // NOTE: once CryptoKeyReader was set, the isEncryptionEnabled() would return true, it's different from - // ProducerConfiguration - ASSERT_EQ(conf.isEncryptionEnabled(), true); - - conf.setCryptoFailureAction(ConsumerCryptoFailureAction::CONSUME); - ASSERT_EQ(conf.getCryptoFailureAction(), ConsumerCryptoFailureAction::CONSUME); - - conf.setReplicateSubscriptionStateEnabled(true); - ASSERT_EQ(conf.isReplicateSubscriptionStateEnabled(), true); - - conf.setProperty("k1", "v1"); - ASSERT_EQ(conf.getProperties()["k1"], "v1"); - ASSERT_EQ(conf.hasProperty("k1"), true); - - std::map subscriptionProperties = {{"k1", "v1"}}; - conf.setSubscriptionProperties(subscriptionProperties); - ASSERT_EQ(conf.getSubscriptionProperties()["k1"], "v1"); - - conf.setPriorityLevel(1); - ASSERT_EQ(conf.getPriorityLevel(), 1); - - conf.setMaxPendingChunkedMessage(500); - ASSERT_EQ(conf.getMaxPendingChunkedMessage(), 500); - - conf.setAutoAckOldestChunkedMessageOnQueueFull(true); - ASSERT_TRUE(conf.isAutoAckOldestChunkedMessageOnQueueFull()); -} - -TEST(ConsumerConfigurationTest, testReadCompactPersistentExclusive) { - std::string lookupUrl = "pulsar://localhost:6650"; - std::string topicName = "persist-topic"; - std::string subName = "test-persist-exclusive"; - - Result result; - - ConsumerConfiguration config; - config.setReadCompacted(true); - config.setConsumerType(ConsumerExclusive); - - ClientConfiguration clientConfig; - Client client(lookupUrl, clientConfig); - - Consumer consumer; - result = client.subscribe(topicName, subName, config, consumer); - ASSERT_EQ(ResultOk, result); - consumer.close(); -} - -TEST(ConsumerConfigurationTest, testReadCompactPersistentFailover) { - std::string lookupUrl = "pulsar://localhost:6650"; - std::string topicName = "persist-topic"; - std::string subName = "test-persist-fail-over"; - - Result result; - - ConsumerConfiguration config; - config.setReadCompacted(true); - config.setConsumerType(ConsumerFailover); - - ClientConfiguration clientConfig; - Client client(lookupUrl, clientConfig); - - Consumer consumer; - result = client.subscribe(topicName, subName, config, consumer); - ASSERT_EQ(ResultOk, result); - consumer.close(); -} - -TEST(ConsumerConfigurationTest, testSubscribePersistentKeyShared) { - std::string lookupUrl = "pulsar://localhost:6650"; - std::string topicName = "persist-key-shared-topic"; - std::string subName = "test-persist-key-shared"; - - Result result; - - ConsumerConfiguration config; - // now, key-shared not support read compact - config.setReadCompacted(false); - config.setConsumerType(ConsumerKeyShared); - - ClientConfiguration clientConfig; - Client client(lookupUrl, clientConfig); - - Consumer consumer; - result = client.subscribe(topicName, subName, config, consumer); - ASSERT_EQ(ResultOk, result); - consumer.close(); -} - -TEST(ConsumerConfigurationTest, testReadCompactPersistentShared) { - std::string lookupUrl = "pulsar://localhost:6650"; - std::string topicName = "persist-topic"; - std::string subName = "test-persist-shared"; - - Result result; - - ConsumerConfiguration config; - config.setReadCompacted(true); - config.setConsumerType(ConsumerShared); - - ClientConfiguration clientConfig; - Client client(lookupUrl, clientConfig); - - Consumer consumer; - result = client.subscribe(topicName, subName, config, consumer); - ASSERT_EQ(ResultInvalidConfiguration, result); - consumer.close(); -} - -TEST(ConsumerConfigurationTest, testReadCompactNonPersistentExclusive) { - std::string lookupUrl = "pulsar://localhost:6650"; - std::string topicName = "non-persistent://public/default/testNonPersistentTopic"; - std::string subName = "test-non-persist-exclusive"; - - Result result; - - ConsumerConfiguration config; - config.setReadCompacted(true); - config.setConsumerType(ConsumerExclusive); - - ClientConfiguration clientConfig; - Client client(lookupUrl, clientConfig); - - Consumer consumer; - result = client.subscribe(topicName, subName, config, consumer); - ASSERT_EQ(ResultInvalidConfiguration, result); - consumer.close(); -} - -TEST(ConsumerConfigurationTest, testSubscriptionInitialPosition) { - std::string lookupUrl = "pulsar://localhost:6650"; - std::string topicName = "persist-topic-test-position"; - std::string subName = "test-subscription-initial-earliest-position"; - - ClientConfiguration clientConfig; - Client client(lookupUrl, clientConfig); - - LOG_INFO("create 1 producer..."); - Producer producer; - Result result; - ProducerConfiguration conf; - result = client.createProducer(topicName, conf, producer); - ASSERT_EQ(ResultOk, result); - - // Send synchronously - std::string content1 = "msg-1-content-1"; - Message msg = MessageBuilder().setContent(content1).build(); - result = producer.send(msg); - ASSERT_EQ(ResultOk, result); - - std::string content2 = "msg-2-content-2"; - msg = MessageBuilder().setContent(content2).build(); - result = producer.send(msg); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration config; - config.setSubscriptionInitialPosition(InitialPosition::InitialPositionEarliest); - result = client.subscribe(topicName, subName, config, consumer); - ASSERT_EQ(ResultOk, result); - - Message receivedMsg; - - result = consumer.receive(receivedMsg, 2000); - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(content1, receivedMsg.getDataAsString()); - - ASSERT_EQ(ResultOk, consumer.unsubscribe()); - ASSERT_EQ(ResultAlreadyClosed, consumer.close()); - ASSERT_EQ(ResultOk, producer.close()); - ASSERT_EQ(ResultOk, client.close()); -} - -TEST(ConsumerConfigurationTest, testResetAckTimeOut) { - const uint64_t milliSeconds = 50000; - ConsumerConfiguration config; - config.setUnAckedMessagesTimeoutMs(milliSeconds); - ASSERT_EQ(milliSeconds, config.getUnAckedMessagesTimeoutMs()); - - // should be able to set it back to 0. - config.setUnAckedMessagesTimeoutMs(0); - ASSERT_EQ(0, config.getUnAckedMessagesTimeoutMs()); -} diff --git a/pulsar-client-cpp/tests/ConsumerStatsTest.cc b/pulsar-client-cpp/tests/ConsumerStatsTest.cc deleted file mode 100644 index c398a532e68cc..0000000000000 --- a/pulsar-client-cpp/tests/ConsumerStatsTest.cc +++ /dev/null @@ -1,319 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include "lib/Future.h" -#include "lib/Utils.h" -#include "PulsarFriend.h" -#include "ConsumerTest.h" -#include "HttpHelper.h" -#include - -#include -#include -DECLARE_LOG_OBJECT(); - -using namespace pulsar; - -static std::string lookupUrl = "pulsar://localhost:6650"; -static std::string adminUrl = "http://localhost:8080/"; - -void partitionedCallbackFunction(Result result, BrokerConsumerStats brokerConsumerStats, long expectedBacklog, - Latch& latch, int index, bool accurate) { - ASSERT_EQ(result, ResultOk); - MultiTopicsBrokerConsumerStatsImpl* statsPtr = - (MultiTopicsBrokerConsumerStatsImpl*)(brokerConsumerStats.getImpl().get()); - LOG_DEBUG(statsPtr); - if (accurate) { - ASSERT_EQ(expectedBacklog, statsPtr->getBrokerConsumerStats(index).getMsgBacklog()); - } else { - ASSERT_LE(expectedBacklog, statsPtr->getBrokerConsumerStats(index).getMsgBacklog()); - } - latch.countdown(); -} - -void simpleCallbackFunction(Result result, BrokerConsumerStats brokerConsumerStats, Result expectedResult, - uint64_t expectedBacklog, ConsumerType expectedConsumerType) { - LOG_DEBUG(brokerConsumerStats); - ASSERT_EQ(result, expectedResult); - ASSERT_EQ(brokerConsumerStats.getMsgBacklog(), expectedBacklog); - ASSERT_EQ(brokerConsumerStats.getType(), expectedConsumerType); -} -TEST(ConsumerStatsTest, testBacklogInfo) { - long epochTime = time(NULL); - std::string testName = "testBacklogInfo-" + std::to_string(epochTime); - Client client(lookupUrl); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - ConsumerConfiguration conf; - conf.setBrokerConsumerStatsCacheTimeInMs(3 * 1000); - Consumer consumer; - Promise consumerPromise; - client.subscribeAsync(topicName, subName, conf, WaitForCallbackValue(consumerPromise)); - Future consumerFuture = consumerPromise.getFuture(); - Result result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.unsubscribe(); - client.subscribe(topicName, subName, conf, consumer); - - // Producing messages - Producer producer; - int numOfMessages = 10; - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - std::string prefix = testName + "-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().build(); - producer.send(msg); - } - - LOG_DEBUG("Calling consumer.getBrokerConsumerStats"); - consumer.getBrokerConsumerStatsAsync(std::bind(simpleCallbackFunction, std::placeholders::_1, - std::placeholders::_2, ResultOk, numOfMessages, - ConsumerExclusive)); - - for (int i = numOfMessages; i < (numOfMessages * 2); i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().build(); - producer.send(msg); - } - - std::this_thread::sleep_for(std::chrono::milliseconds(3500)); - BrokerConsumerStats consumerStats; - Result res = consumer.getBrokerConsumerStats(consumerStats); - ASSERT_EQ(res, ResultOk); - LOG_DEBUG(consumerStats); - ASSERT_EQ(consumerStats.getMsgBacklog(), 2 * numOfMessages); - ASSERT_EQ(consumerStats.getType(), ConsumerExclusive); - consumer.unsubscribe(); -} - -TEST(ConsumerStatsTest, testFailure) { - long epochTime = time(NULL); - std::string testName = "testFailure-" + std::to_string(epochTime); - Client client(lookupUrl); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - Consumer consumer; - Promise consumerPromise; - BrokerConsumerStats consumerStats; - client.subscribeAsync(topicName, subName, WaitForCallbackValue(consumerPromise)); - ASSERT_NE(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - Future consumerFuture = consumerPromise.getFuture(); - Result result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.unsubscribe(); - ASSERT_NE(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - client.subscribe(topicName, subName, consumer); - - // Producing messages - Producer producer; - int numOfMessages = 5; - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - std::string prefix = testName + "-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().build(); - producer.send(msg); - } - - ASSERT_EQ(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - - LOG_DEBUG(consumerStats); - ASSERT_EQ(consumerStats.getMsgBacklog(), numOfMessages); - - consumer.unsubscribe(); - ASSERT_NE(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); -} - -TEST(ConsumerStatsTest, testCachingMechanism) { - long epochTime = time(NULL); - std::string testName = "testCachingMechanism-" + std::to_string(epochTime); - Client client(lookupUrl); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - ConsumerConfiguration conf; - conf.setBrokerConsumerStatsCacheTimeInMs(3.5 * 1000); - Consumer consumer; - Promise consumerPromise; - BrokerConsumerStats consumerStats; - client.subscribeAsync(topicName, subName, conf, WaitForCallbackValue(consumerPromise)); - ASSERT_NE(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - Future consumerFuture = consumerPromise.getFuture(); - Result result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.unsubscribe(); - ASSERT_NE(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - client.subscribe(topicName, subName, conf, consumer); - - // Producing messages - Producer producer; - int numOfMessages = 5; - Promise producerPromise; - client.createProducerAsync(topicName, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - std::string prefix = testName + "-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().build(); - producer.send(msg); - } - - ASSERT_EQ(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - - LOG_DEBUG(consumerStats); - ASSERT_EQ(consumerStats.getMsgBacklog(), numOfMessages); - - for (int i = numOfMessages; i < (numOfMessages * 2); i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().build(); - producer.send(msg); - } - - LOG_DEBUG("Expecting cached results"); - ASSERT_TRUE(consumerStats.isValid()); - ASSERT_EQ(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - LOG_DEBUG(consumerStats); - ASSERT_EQ(consumerStats.getMsgBacklog(), numOfMessages); - - LOG_DEBUG("Still Expecting cached results"); - std::this_thread::sleep_for(std::chrono::seconds(1)); - ASSERT_TRUE(consumerStats.isValid()); - ASSERT_EQ(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - - LOG_DEBUG(consumerStats); - ASSERT_EQ(consumerStats.getMsgBacklog(), numOfMessages); - - LOG_DEBUG("Now expecting new results"); - std::this_thread::sleep_for(std::chrono::seconds(3)); - ASSERT_FALSE(consumerStats.isValid()); - ASSERT_EQ(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - - LOG_DEBUG(consumerStats); - ASSERT_EQ(consumerStats.getMsgBacklog(), numOfMessages * 2); - - consumer.unsubscribe(); - ASSERT_NE(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); -} - -TEST(ConsumerStatsTest, testAsyncCallOnPartitionedTopic) { - long epochTime = time(NULL); - std::string testName = "testAsyncCallOnPartitionedTopic-" + std::to_string(epochTime); - Client client(lookupUrl); - std::string topicName = "persistent://public/default/" + testName; - std::string subName = "subscription-name"; - - // call admin api to create partitioned topics - std::string url = adminUrl + "admin/v2/persistent/public/default/" + testName + "/partitions"; - int res = makePutRequest(url, "7"); - - LOG_INFO("res = " << res); - ASSERT_FALSE(res != 204 && res != 409); - - ConsumerConfiguration conf; - conf.setBrokerConsumerStatsCacheTimeInMs(3.5 * 1000); - Consumer consumer; - Promise consumerPromise; - BrokerConsumerStats consumerStats; - client.subscribeAsync(topicName, subName, conf, WaitForCallbackValue(consumerPromise)); - ASSERT_NE(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - Future consumerFuture = consumerPromise.getFuture(); - Result result = consumerFuture.get(consumer); - ASSERT_EQ(ResultOk, result); - - // handling dangling subscriptions - consumer.unsubscribe(); - ASSERT_NE(ResultOk, consumer.getBrokerConsumerStats(consumerStats)); - client.subscribe(topicName, subName, conf, consumer); - - // Producing messages - Producer producer; - int numOfMessages = 7 * 5; // 5 message per partition - Promise producerPromise; - ProducerConfiguration config; - config.setBatchingEnabled(false); - config.setPartitionsRoutingMode(ProducerConfiguration::RoundRobinDistribution); - client.createProducerAsync(topicName, config, WaitForCallbackValue(producerPromise)); - Future producerFuture = producerPromise.getFuture(); - result = producerFuture.get(producer); - ASSERT_EQ(ResultOk, result); - - std::string prefix = testName + "-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().build(); - producer.send(msg); - } - - // Expecting return from 4 callbacks - Latch latch(4); - consumer.getBrokerConsumerStatsAsync(std::bind(partitionedCallbackFunction, std::placeholders::_1, - std::placeholders::_2, 5, latch, 0, true)); - - // Now we have 10 messages per partition - for (int i = numOfMessages; i < (numOfMessages * 2); i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().build(); - producer.send(msg); - } - - // Expecting cached result - // Inaccurate judgment is used because it cannot guarantee that the above operations are completed within - // cache time. - consumer.getBrokerConsumerStatsAsync(std::bind(partitionedCallbackFunction, std::placeholders::_1, - std::placeholders::_2, 5, latch, 0, false)); - - std::this_thread::sleep_for(std::chrono::milliseconds(4500)); - // Expecting fresh results - consumer.getBrokerConsumerStatsAsync(std::bind(partitionedCallbackFunction, std::placeholders::_1, - std::placeholders::_2, 10, latch, 2, true)); - - Message msg; - while (consumer.receive(msg)) { - // Do nothing - } - - // Expecting the backlog to be the same since we didn't acknowledge the messages - consumer.getBrokerConsumerStatsAsync(std::bind(partitionedCallbackFunction, std::placeholders::_1, - std::placeholders::_2, 10, latch, 3, true)); - - // Wait for ten seconds only - ASSERT_TRUE(latch.wait(std::chrono::seconds(30))); -} diff --git a/pulsar-client-cpp/tests/ConsumerTest.cc b/pulsar-client-cpp/tests/ConsumerTest.cc deleted file mode 100644 index c8a07e6c84b47..0000000000000 --- a/pulsar-client-cpp/tests/ConsumerTest.cc +++ /dev/null @@ -1,934 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "pulsar/Client.h" -#include "PulsarFriend.h" -#include "lib/Future.h" -#include "lib/Utils.h" -#include "lib/LogUtils.h" -#include "lib/MultiTopicsConsumerImpl.h" -#include "HttpHelper.h" - -static const std::string lookupUrl = "pulsar://localhost:6650"; -static const std::string adminUrl = "http://localhost:8080/"; - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -class ConsumerStateEventListener : public ConsumerEventListener { - public: - ConsumerStateEventListener(std::string name) { name_ = name; } - - void becameActive(Consumer consumer, int partitionId) override { - LOG_INFO("Received consumer active event, partitionId:" << partitionId << ", name: " << name_); - activeQueue_.push(partitionId); - } - - void becameInactive(Consumer consumer, int partitionId) override { - LOG_INFO("Received consumer inactive event, partitionId:" << partitionId << ", name: " << name_); - inActiveQueue_.push(partitionId); - } - - std::queue activeQueue_; - std::queue inActiveQueue_; - std::string name_; -}; - -typedef std::shared_ptr ConsumerStateEventListenerPtr; - -void verifyConsumerNotReceiveAnyStateChanges(ConsumerStateEventListenerPtr listener) { - ASSERT_EQ(0, listener->activeQueue_.size()); - ASSERT_EQ(0, listener->inActiveQueue_.size()); -} - -void verifyConsumerActive(ConsumerStateEventListenerPtr listener, int partitionId) { - ASSERT_NE(0, listener->activeQueue_.size()); - int pid = listener->activeQueue_.front(); - listener->activeQueue_.pop(); - ASSERT_EQ(partitionId, pid); - ASSERT_EQ(0, listener->inActiveQueue_.size()); -} - -void verifyConsumerInactive(ConsumerStateEventListenerPtr listener, int partitionId) { - ASSERT_NE(0, listener->inActiveQueue_.size()); - int pid = listener->inActiveQueue_.front(); - listener->inActiveQueue_.pop(); - ASSERT_EQ(partitionId, pid); - ASSERT_EQ(0, listener->activeQueue_.size()); -} - -class ActiveInactiveListenerEvent : public ConsumerEventListener { - public: - void becameActive(Consumer consumer, int partitionId) override { - Lock lock(mutex_); - activePartitonIds_.emplace(partitionId); - inactivePartitionIds_.erase(partitionId); - } - - void becameInactive(Consumer consumer, int partitionId) override { - Lock lock(mutex_); - activePartitonIds_.erase(partitionId); - inactivePartitionIds_.emplace(partitionId); - } - - typedef std::unique_lock Lock; - std::set activePartitonIds_; - std::set inactivePartitionIds_; - std::mutex mutex_; -}; - -typedef std::shared_ptr ActiveInactiveListenerEventPtr; - -TEST(ConsumerTest, testConsumerEventWithoutPartition) { - Client client(lookupUrl); - - const std::string topicName = "testConsumerEventWithoutPartition-topic-" + std::to_string(time(nullptr)); - const std::string subName = "sub"; - const int waitTimeInMs = 1000; - // constexpr int unAckedMessagesTimeoutMs = 10000; - // constexpr int tickDurationInMs = 1000; - - // 1. two consumers on the same subscription - Consumer consumer1; - ConsumerConfiguration config1; - ConsumerStateEventListenerPtr listener1 = std::make_shared("listener-1"); - config1.setConsumerEventListener(listener1); - config1.setConsumerName("consumer-1"); - config1.setConsumerType(ConsumerType::ConsumerFailover); - - ASSERT_EQ(pulsar::ResultOk, client.subscribe(topicName, subName, config1, consumer1)); - std::this_thread::sleep_for(std::chrono::milliseconds(waitTimeInMs * 2)); - - Consumer consumer2; - ConsumerConfiguration config2; - ConsumerStateEventListenerPtr listener2 = std::make_shared("listener-2"); - config2.setConsumerEventListener(listener2); - config2.setConsumerName("consumer-2"); - config2.setConsumerType(ConsumerType::ConsumerFailover); - - ASSERT_EQ(pulsar::ResultOk, client.subscribe(topicName, subName, config2, consumer2)); - std::this_thread::sleep_for(std::chrono::milliseconds(waitTimeInMs * 2)); - - verifyConsumerActive(listener1, -1); - verifyConsumerInactive(listener2, -1); - - // clear inActiveQueue_ - std::queue().swap(listener2->inActiveQueue_); - - consumer1.close(); - std::this_thread::sleep_for(std::chrono::milliseconds(waitTimeInMs * 2)); - verifyConsumerActive(listener2, -1); - verifyConsumerNotReceiveAnyStateChanges(listener1); -} - -TEST(ConsumerTest, testConsumerEventWithPartition) { - Client client(lookupUrl); - - const int numPartitions = 4; - const std::string partitionedTopic = - "testConsumerEventWithPartition-topic-" + std::to_string(time(nullptr)); - const std::string subName = "sub"; - const int numOfMessages = 100; - constexpr int unAckedMessagesTimeoutMs = 10000; - constexpr int tickDurationInMs = 1000; - - int res = - makePutRequest(adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", - std::to_string(numPartitions)); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - // two consumers on the same subscription - Consumer consumer1; - ConsumerConfiguration config1; - ActiveInactiveListenerEventPtr listener1 = std::make_shared(); - config1.setConsumerEventListener(listener1); - config1.setConsumerName("consumer-1"); - config1.setConsumerType(ConsumerType::ConsumerFailover); - config1.setUnAckedMessagesTimeoutMs(unAckedMessagesTimeoutMs); - config1.setTickDurationInMs(tickDurationInMs); - client.subscribe(partitionedTopic, subName, config1, consumer1); - - Consumer consumer2; - ConsumerConfiguration config2; - ActiveInactiveListenerEventPtr listener2 = std::make_shared(); - config2.setConsumerEventListener(listener2); - config2.setConsumerName("consumer-2"); - config2.setConsumerType(ConsumerType::ConsumerFailover); - config1.setUnAckedMessagesTimeoutMs(unAckedMessagesTimeoutMs); - config1.setTickDurationInMs(tickDurationInMs); - client.subscribe(partitionedTopic, subName, config2, consumer2); - - // send messages - ProducerConfiguration producerConfig; - producerConfig.setBatchingEnabled(false); - producerConfig.setBlockIfQueueFull(true); - producerConfig.setPartitionsRoutingMode(ProducerConfiguration::RoundRobinDistribution); - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(partitionedTopic, producerConfig, producer)); - std::string prefix = "message-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().setContent(messageContent).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - producer.flush(); - producer.close(); - - // receive message and check partitionIds on consumer1 - std::set receivedPartitionIds; - while (true) { - Message msg; - Result rc = consumer1.receive(msg, 1000); - if (pulsar::ResultOk != rc) { - break; - } - - MessageId msgId = msg.getMessageId(); - int32_t partitionIndex = msgId.partition(); - ASSERT_TRUE(partitionIndex < numPartitions); - consumer1.acknowledge(msgId); - receivedPartitionIds.insert(partitionIndex); - } - - std::set result; - std::set_difference(listener1->activePartitonIds_.begin(), listener1->activePartitonIds_.end(), - receivedPartitionIds.begin(), receivedPartitionIds.end(), - std::inserter(result, result.end())); - ASSERT_EQ(0, result.size()); - - std::set().swap(result); - std::set_difference(listener2->inactivePartitionIds_.begin(), listener2->inactivePartitionIds_.end(), - receivedPartitionIds.begin(), receivedPartitionIds.end(), - std::inserter(result, result.end())); - ASSERT_EQ(0, result.size()); - - // receive message and check partitionIds on consumer2 - std::set().swap(receivedPartitionIds); - while (true) { - Message msg; - Result rc = consumer2.receive(msg, 1000); - if (pulsar::ResultOk != rc) { - break; - } - MessageId msgId = msg.getMessageId(); - int32_t partitionIndex = msgId.partition(); - ASSERT_TRUE(partitionIndex < numPartitions); - consumer2.acknowledge(msgId); - receivedPartitionIds.insert(partitionIndex); - } - - std::set().swap(result); - std::set_difference(listener2->activePartitonIds_.begin(), listener2->activePartitonIds_.end(), - receivedPartitionIds.begin(), receivedPartitionIds.end(), - std::inserter(result, result.end())); - ASSERT_EQ(0, result.size()); - - std::set().swap(result); - std::set_difference(listener1->inactivePartitionIds_.begin(), listener1->inactivePartitionIds_.end(), - receivedPartitionIds.begin(), receivedPartitionIds.end(), - std::inserter(result, result.end())); - ASSERT_EQ(0, result.size()); -} - -TEST(ConsumerTest, consumerNotInitialized) { - Consumer consumer; - - ASSERT_TRUE(consumer.getTopic().empty()); - ASSERT_TRUE(consumer.getSubscriptionName().empty()); - - Message msg; - ASSERT_EQ(ResultConsumerNotInitialized, consumer.receive(msg)); - ASSERT_EQ(ResultConsumerNotInitialized, consumer.receive(msg, 3000)); - - ASSERT_EQ(ResultConsumerNotInitialized, consumer.acknowledge(msg)); - - MessageId msgId; - ASSERT_EQ(ResultConsumerNotInitialized, consumer.acknowledge(msgId)); - - Result result; - { - Promise promise; - consumer.acknowledgeAsync(msg, WaitForCallback(promise)); - promise.getFuture().get(result); - - ASSERT_EQ(ResultConsumerNotInitialized, result); - } - - { - Promise promise; - consumer.acknowledgeAsync(msgId, WaitForCallback(promise)); - promise.getFuture().get(result); - - ASSERT_EQ(ResultConsumerNotInitialized, result); - } - - ASSERT_EQ(ResultConsumerNotInitialized, consumer.acknowledgeCumulative(msg)); - ASSERT_EQ(ResultConsumerNotInitialized, consumer.acknowledgeCumulative(msgId)); - - { - Promise promise; - consumer.acknowledgeCumulativeAsync(msg, WaitForCallback(promise)); - promise.getFuture().get(result); - - ASSERT_EQ(ResultConsumerNotInitialized, result); - } - - { - Promise promise; - consumer.acknowledgeCumulativeAsync(msgId, WaitForCallback(promise)); - promise.getFuture().get(result); - - ASSERT_EQ(ResultConsumerNotInitialized, result); - } - - ASSERT_EQ(ResultConsumerNotInitialized, consumer.close()); - - { - Promise promise; - consumer.closeAsync(WaitForCallback(promise)); - promise.getFuture().get(result); - - ASSERT_EQ(ResultConsumerNotInitialized, result); - } - - ASSERT_EQ(ResultConsumerNotInitialized, consumer.unsubscribe()); - - { - Promise promise; - consumer.unsubscribeAsync(WaitForCallback(promise)); - promise.getFuture().get(result); - - ASSERT_EQ(ResultConsumerNotInitialized, result); - } -} - -TEST(ConsumerTest, testPartitionIndex) { - Client client(lookupUrl); - - const std::string nonPartitionedTopic = - "ConsumerTestPartitionIndex-topic-" + std::to_string(time(nullptr)); - const std::string partitionedTopic1 = - "ConsumerTestPartitionIndex-par-topic1-" + std::to_string(time(nullptr)); - const std::string partitionedTopic2 = - "ConsumerTestPartitionIndex-par-topic2-" + std::to_string(time(nullptr)); - constexpr int numPartitions = 3; - - int res = makePutRequest( - adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic1 + "/partitions", "1"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - res = makePutRequest(adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic2 + "/partitions", - std::to_string(numPartitions)); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - auto sendMessageToTopic = [&client](const std::string& topic) { - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topic, producer)); - - Message msg = MessageBuilder().setContent("hello").build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - }; - - // consumers - // [0] subscribes a non-partitioned topic - // [1] subscribes a partition of a partitioned topic - // [2] subscribes a partitioned topic - Consumer consumers[3]; - ASSERT_EQ(ResultOk, client.subscribe(nonPartitionedTopic, "sub", consumers[0])); - ASSERT_EQ(ResultOk, client.subscribe(partitionedTopic1 + "-partition-0", "sub", consumers[1])); - ASSERT_EQ(ResultOk, client.subscribe(partitionedTopic2, "sub", consumers[2])); - - sendMessageToTopic(nonPartitionedTopic); - sendMessageToTopic(partitionedTopic1); - for (int i = 0; i < numPartitions; i++) { - sendMessageToTopic(partitionedTopic2 + "-partition-" + std::to_string(i)); - } - - Message msg; - ASSERT_EQ(ResultOk, consumers[0].receive(msg, 5000)); - ASSERT_EQ(msg.getMessageId().partition(), -1); - - ASSERT_EQ(ResultOk, consumers[1].receive(msg, 5000)); - ASSERT_EQ(msg.getMessageId().partition(), 0); - - std::set partitionIndexes; - for (int i = 0; i < 3; i++) { - ASSERT_EQ(ResultOk, consumers[2].receive(msg, 5000)); - partitionIndexes.emplace(msg.getMessageId().partition()); - } - ASSERT_EQ(partitionIndexes, (std::set{0, 1, 2})); - - client.close(); -} - -TEST(ConsumerTest, testPartitionedConsumerUnAckedMessageRedelivery) { - Client client(lookupUrl); - const std::string partitionedTopic = - "testPartitionedConsumerUnAckedMessageRedelivery" + std::to_string(time(nullptr)); - std::string subName = "sub-partition-consumer-un-acked-msg-redelivery"; - constexpr int numPartitions = 3; - constexpr int numOfMessages = 15; - constexpr int unAckedMessagesTimeoutMs = 10000; - constexpr int tickDurationInMs = 1000; - - int res = - makePutRequest(adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", - std::to_string(numPartitions)); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setUnAckedMessagesTimeoutMs(unAckedMessagesTimeoutMs); - consumerConfig.setTickDurationInMs(tickDurationInMs); - ASSERT_EQ(ResultOk, client.subscribe(partitionedTopic, subName, consumerConfig, consumer)); - - MultiTopicsConsumerImplPtr partitionedConsumerImplPtr = - PulsarFriend::getMultiTopicsConsumerImplPtr(consumer); - ASSERT_EQ(numPartitions, partitionedConsumerImplPtr->consumers_.size()); - - // send messages - ProducerConfiguration producerConfig; - producerConfig.setBatchingEnabled(false); - producerConfig.setBlockIfQueueFull(true); - producerConfig.setPartitionsRoutingMode(ProducerConfiguration::RoundRobinDistribution); - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(partitionedTopic, producerConfig, producer)); - std::string prefix = "message-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().setContent(messageContent).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - producer.close(); - - // receive message and don't acknowledge - std::set messageIds[numPartitions]; - for (auto i = 0; i < numOfMessages; ++i) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - - MessageId msgId = msg.getMessageId(); - int32_t partitionIndex = msgId.partition(); - ASSERT_TRUE(partitionIndex < numPartitions); - messageIds[msgId.partition()].emplace(msgId); - } - - auto partitionedTracker = static_cast( - partitionedConsumerImplPtr->unAckedMessageTrackerPtr_.get()); - ASSERT_EQ(numOfMessages, partitionedTracker->size()); - ASSERT_FALSE(partitionedTracker->isEmpty()); - for (auto i = 0; i < numPartitions; i++) { - auto topicName = - "persistent://public/default/" + partitionedTopic + "-partition-" + std::to_string(i); - ASSERT_EQ(numOfMessages / numPartitions, messageIds[i].size()); - auto subConsumerPtr = partitionedConsumerImplPtr->consumers_.find(topicName).value(); - auto tracker = - static_cast(subConsumerPtr->unAckedMessageTrackerPtr_.get()); - ASSERT_EQ(0, tracker->size()); - ASSERT_TRUE(tracker->isEmpty()); - } - - // timeout and send redeliver message - std::this_thread::sleep_for(std::chrono::milliseconds(unAckedMessagesTimeoutMs + tickDurationInMs * 2)); - ASSERT_EQ(0, partitionedTracker->size()); - ASSERT_TRUE(partitionedTracker->isEmpty()); - - for (auto i = 0; i < numOfMessages; ++i) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - ASSERT_EQ(1, partitionedTracker->size()); - ASSERT_EQ(ResultOk, consumer.acknowledge(msg.getMessageId())); - ASSERT_EQ(0, partitionedTracker->size()); - } - ASSERT_EQ(0, partitionedTracker->size()); - ASSERT_TRUE(partitionedTracker->isEmpty()); - partitionedTracker = NULL; - - Message msg; - auto ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message ID: " << msg.getMessageId(); - consumer.close(); - client.close(); -} - -TEST(ConsumerTest, testPartitionedConsumerUnexpectedAckTimeout) { - ClientConfiguration clientConfig; - clientConfig.setMessageListenerThreads(1); - Client client(lookupUrl, clientConfig); - - const std::string partitionedTopic = - "testPartitionedConsumerUnexpectedAckTimeout" + std::to_string(time(nullptr)); - std::string subName = "sub"; - constexpr int numPartitions = 2; - constexpr int numOfMessages = 3; - constexpr int unAckedMessagesTimeoutMs = 10000; - constexpr int tickDurationInMs = 1000; - pulsar::Latch latch(numOfMessages); - std::vector messages; - std::mutex mtx; - - int res = - makePutRequest(adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", - std::to_string(numPartitions)); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setConsumerType(ConsumerShared); - consumerConfig.setUnAckedMessagesTimeoutMs(unAckedMessagesTimeoutMs); - consumerConfig.setTickDurationInMs(tickDurationInMs); - consumerConfig.setMessageListener([&](Consumer cons, const Message& msg) { - // acknowledge received messages immediately, so no ack timeout is expected - ASSERT_EQ(ResultOk, cons.acknowledge(msg.getMessageId())); - ASSERT_EQ(0, msg.getRedeliveryCount()); - - { - std::lock_guard lock(mtx); - messages.emplace_back(msg); - } - - if (latch.getCount() > 0) { - std::this_thread::sleep_for( - std::chrono::milliseconds(unAckedMessagesTimeoutMs + tickDurationInMs * 2)); - latch.countdown(); - } - }); - ASSERT_EQ(ResultOk, client.subscribe(partitionedTopic, subName, consumerConfig, consumer)); - - // send messages - ProducerConfiguration producerConfig; - producerConfig.setBatchingEnabled(false); - producerConfig.setBlockIfQueueFull(true); - producerConfig.setPartitionsRoutingMode(ProducerConfiguration::UseSinglePartition); - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(partitionedTopic, producerConfig, producer)); - std::string prefix = "message-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().setContent(messageContent).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - producer.close(); - - bool wasUnblocked = latch.wait( - std::chrono::milliseconds((unAckedMessagesTimeoutMs + tickDurationInMs * 2) * numOfMessages + 5000)); - ASSERT_TRUE(wasUnblocked); - - std::this_thread::sleep_for(std::chrono::milliseconds(5000)); - // messages are expected not to be redelivered - ASSERT_EQ(numOfMessages, messages.size()); - - consumer.close(); - client.close(); -} - -TEST(ConsumerTest, testMultiTopicsConsumerUnAckedMessageRedelivery) { - Client client(lookupUrl); - const std::string nonPartitionedTopic = - "testMultiTopicsConsumerUnAckedMessageRedelivery-topic-" + std::to_string(time(nullptr)); - const std::string partitionedTopic1 = - "testMultiTopicsConsumerUnAckedMessageRedelivery-par-topic1-" + std::to_string(time(nullptr)); - const std::string partitionedTopic2 = - "testMultiTopicsConsumerUnAckedMessageRedelivery-par-topic2-" + std::to_string(time(nullptr)); - std::string subName = "sub-multi-topics-consumer-un-acked-msg-redelivery"; - constexpr int numPartitions = 3; - constexpr int numOfMessages = 15; - constexpr int unAckedMessagesTimeoutMs = 10000; - constexpr int tickDurationInMs = 1000; - - int res = makePutRequest( - adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic1 + "/partitions", "1"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - res = makePutRequest(adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic2 + "/partitions", - std::to_string(numPartitions)); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setUnAckedMessagesTimeoutMs(unAckedMessagesTimeoutMs); - consumerConfig.setTickDurationInMs(tickDurationInMs); - const std::vector topics = {nonPartitionedTopic, partitionedTopic1, partitionedTopic2}; - ASSERT_EQ(ResultOk, client.subscribe(topics, subName, consumerConfig, consumer)); - MultiTopicsConsumerImplPtr multiTopicsConsumerImplPtr = - PulsarFriend::getMultiTopicsConsumerImplPtr(consumer); - ASSERT_EQ(numPartitions + 2 /* nonPartitionedTopic + partitionedTopic1 */, - multiTopicsConsumerImplPtr->consumers_.size()); - - // send messages - auto sendMessageToTopic = [&client](const std::string& topic) { - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topic, producer)); - - Message msg = MessageBuilder().setContent("hello").build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - }; - for (int i = 0; i < numOfMessages; i++) { - sendMessageToTopic(nonPartitionedTopic); - sendMessageToTopic(partitionedTopic1); - sendMessageToTopic(partitionedTopic2); - } - - // receive message and don't acknowledge - for (auto i = 0; i < numOfMessages * 3; ++i) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - MessageId msgId = msg.getMessageId(); - } - - auto multiTopicsTracker = static_cast( - multiTopicsConsumerImplPtr->unAckedMessageTrackerPtr_.get()); - ASSERT_EQ(numOfMessages * 3, multiTopicsTracker->size()); - ASSERT_FALSE(multiTopicsTracker->isEmpty()); - - std::vector trackers; - multiTopicsConsumerImplPtr->consumers_.forEach( - [&trackers](const std::string& name, const ConsumerImplPtr& consumer) { - trackers.emplace_back( - static_cast(consumer->unAckedMessageTrackerPtr_.get())); - }); - for (const auto& tracker : trackers) { - ASSERT_EQ(0, tracker->size()); - ASSERT_TRUE(tracker->isEmpty()); - } - - // timeout and send redeliver message - std::this_thread::sleep_for(std::chrono::milliseconds(unAckedMessagesTimeoutMs + tickDurationInMs * 2)); - ASSERT_EQ(0, multiTopicsTracker->size()); - ASSERT_TRUE(multiTopicsTracker->isEmpty()); - - for (auto i = 0; i < numOfMessages * 3; ++i) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - ASSERT_EQ(1, multiTopicsTracker->size()); - ASSERT_EQ(ResultOk, consumer.acknowledge(msg.getMessageId())); - ASSERT_EQ(0, multiTopicsTracker->size()); - } - ASSERT_EQ(0, multiTopicsTracker->size()); - ASSERT_TRUE(multiTopicsTracker->isEmpty()); - multiTopicsTracker = NULL; - - Message msg; - auto ret = consumer.receive(msg, 1000); - ASSERT_EQ(ResultTimeout, ret) << "Received redundant message ID: " << msg.getMessageId(); - consumer.close(); - client.close(); -} - -TEST(ConsumerTest, testBatchUnAckedMessageTracker) { - Client client(lookupUrl); - const std::string topic = "testBatchUnAckedMessageTracker" + std::to_string(time(nullptr)); - std::string subName = "sub-batch-un-acked-msg-tracker"; - constexpr int numOfMessages = 50; - constexpr int batchSize = 5; - constexpr int batchCount = numOfMessages / batchSize; - constexpr int unAckedMessagesTimeoutMs = 10000; - constexpr int tickDurationInMs = 1000; - - Consumer consumer; - ConsumerConfiguration consumerConfig; - consumerConfig.setUnAckedMessagesTimeoutMs(unAckedMessagesTimeoutMs); - consumerConfig.setTickDurationInMs(tickDurationInMs); - ASSERT_EQ(ResultOk, client.subscribe(topic, subName, consumerConfig, consumer)); - auto consumerImplPtr = PulsarFriend::getConsumerImplPtr(consumer); - auto tracker = - static_cast(consumerImplPtr->unAckedMessageTrackerPtr_.get()); - - // send messages - ProducerConfiguration producerConfig; - producerConfig.setBatchingEnabled(true); - producerConfig.setBlockIfQueueFull(true); - producerConfig.setBatchingMaxMessages(batchSize); - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topic, producerConfig, producer)); - std::string prefix = "message-"; - for (int i = 0; i < numOfMessages; i++) { - std::string messageContent = prefix + std::to_string(i); - Message msg = MessageBuilder().setContent(messageContent).build(); - producer.sendAsync(msg, NULL); - } - producer.close(); - - std::map> msgIdInBatchMap; - std::vector messageIds; - for (auto i = 0; i < numOfMessages; ++i) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 1000)); - MessageId msgId = msg.getMessageId(); - MessageId id(msgId.partition(), msgId.ledgerId(), msgId.entryId(), -1); - msgIdInBatchMap[id].emplace_back(msgId); - } - - ASSERT_EQ(batchCount, msgIdInBatchMap.size()); - ASSERT_EQ(batchCount, tracker->size()); - for (const auto& iter : msgIdInBatchMap) { - ASSERT_EQ(iter.second.size(), batchSize); - } - - int ackedBatchCount = 0; - for (auto iter = msgIdInBatchMap.begin(); iter != msgIdInBatchMap.end(); ++iter) { - ASSERT_EQ(batchSize, iter->second.size()); - for (auto i = 0; i < iter->second.size(); ++i) { - ASSERT_EQ(ResultOk, consumer.acknowledge(iter->second[i])); - } - ackedBatchCount++; - ASSERT_EQ(batchCount - ackedBatchCount, tracker->size()); - } - ASSERT_EQ(0, tracker->size()); - ASSERT_TRUE(tracker->isEmpty()); - - consumer.close(); - client.close(); -} - -TEST(ConsumerTest, testGetTopicNameFromReceivedMessage) { - // topic1 and topic2 are non-partitioned topics, topic3 is a partitioned topic - const std::string topic1 = "testGetTopicNameFromReceivedMessage1-" + std::to_string(time(nullptr)); - const std::string topic2 = "testGetTopicNameFromReceivedMessage2-" + std::to_string(time(nullptr)); - const std::string topic3 = "testGetTopicNameFromReceivedMessage3-" + std::to_string(time(nullptr)); - int res = makePutRequest(adminUrl + "admin/v2/persistent/public/default/" + topic3 + "/partitions", "3"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - Client client(lookupUrl); - - auto sendMessage = [&client](const std::string& topic, bool enabledBatching) { - const auto producerConf = ProducerConfiguration().setBatchingEnabled(enabledBatching); - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topic, producerConf, producer)); - ASSERT_EQ(ResultOk, producer.send(MessageBuilder().setContent("hello").build())); - LOG_INFO("Send 'hello' to " << topic); - }; - auto validateTopicName = [](Consumer& consumer, const std::string& topic) { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 3000)); - - const auto fullTopic = "persistent://public/default/" + topic; - ASSERT_EQ(msg.getTopicName(), fullTopic); - ASSERT_EQ(msg.getMessageId().getTopicName(), fullTopic); - }; - - // 1. ConsumerImpl - Consumer consumer1; - ASSERT_EQ(ResultOk, client.subscribe(topic1, "sub-1", consumer1)); - - // 2. MultiTopicsConsumerImpl - Consumer consumer2; - ASSERT_EQ(ResultOk, client.subscribe(std::vector{topic1, topic2}, "sub-2", consumer2)); - - sendMessage(topic1, true); - validateTopicName(consumer1, topic1); - validateTopicName(consumer2, topic1); - sendMessage(topic1, false); - validateTopicName(consumer1, topic1); - validateTopicName(consumer2, topic1); - - // 3. PartitionedConsumerImpl - Consumer consumer3; - ASSERT_EQ(ResultOk, client.subscribe(topic3, "sub-3", consumer3)); - const auto partition = topic3 + "-partition-0"; - sendMessage(partition, true); - validateTopicName(consumer3, partition); - sendMessage(partition, false); - validateTopicName(consumer3, partition); - - client.close(); -} - -TEST(ConsumerTest, testIsConnected) { - Client client(lookupUrl); - const std::string nonPartitionedTopic1 = - "testConsumerIsConnectedNonPartitioned1-" + std::to_string(time(nullptr)); - const std::string nonPartitionedTopic2 = - "testConsumerIsConnectedNonPartitioned2-" + std::to_string(time(nullptr)); - const std::string partitionedTopic = - "testConsumerIsConnectedPartitioned-" + std::to_string(time(nullptr)); - const std::string subName = "sub"; - - Consumer consumer; - ASSERT_FALSE(consumer.isConnected()); - // ConsumerImpl - ASSERT_EQ(ResultOk, client.subscribe(nonPartitionedTopic1, subName, consumer)); - ASSERT_TRUE(consumer.isConnected()); - ASSERT_EQ(ResultOk, consumer.close()); - ASSERT_FALSE(consumer.isConnected()); - - // MultiTopicsConsumerImpl - ASSERT_EQ(ResultOk, client.subscribe(std::vector{nonPartitionedTopic1, nonPartitionedTopic2}, - subName, consumer)); - ASSERT_TRUE(consumer.isConnected()); - ASSERT_EQ(ResultOk, consumer.close()); - ASSERT_FALSE(consumer.isConnected()); - - int res = makePutRequest( - adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", "2"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - // PartitionedConsumerImpl - ASSERT_EQ(ResultOk, client.subscribe(partitionedTopic, subName, consumer)); - ASSERT_TRUE(consumer.isConnected()); - ASSERT_EQ(ResultOk, consumer.close()); - ASSERT_FALSE(consumer.isConnected()); -} - -TEST(ConsumerTest, testPartitionsWithCloseUnblock) { - Client client(lookupUrl); - const std::string partitionedTopic = "testPartitionsWithCloseUnblock" + std::to_string(time(nullptr)); - constexpr int numPartitions = 2; - - int res = - makePutRequest(adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", - std::to_string(numPartitions)); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - Consumer consumer; - ConsumerConfiguration consumerConfig; - ASSERT_EQ(ResultOk, client.subscribe(partitionedTopic, "SubscriptionName", consumerConfig, consumer)); - - // send messages - ProducerConfiguration producerConfig; - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(partitionedTopic, producerConfig, producer)); - Message msg = MessageBuilder().setContent("message").build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - - producer.close(); - - // receive message on another thread - pulsar::Latch latch(1); - auto thread = std::thread([&]() { - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 10 * 1000)); - consumer.acknowledge(msg.getMessageId()); - ASSERT_EQ(ResultAlreadyClosed, consumer.receive(msg, 10 * 1000)); - latch.countdown(); - }); - - std::this_thread::sleep_for(std::chrono::seconds(1)); - - consumer.close(); - - bool wasUnblocked = latch.wait(std::chrono::milliseconds(100)); - - ASSERT_TRUE(wasUnblocked); - thread.join(); -} - -TEST(ConsumerTest, testGetLastMessageIdBlockWhenConnectionDisconnected) { - int operationTimeout = 5; - ClientConfiguration clientConfiguration; - clientConfiguration.setOperationTimeoutSeconds(operationTimeout); - - Client client(lookupUrl, clientConfiguration); - const std::string topic = - "testGetLastMessageIdBlockWhenConnectionDisconnected-" + std::to_string(time(nullptr)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topic, "test-sub", consumer)); - - ConsumerImpl& consumerImpl = PulsarFriend::getConsumerImpl(consumer); - ClientConnectionWeakPtr conn = PulsarFriend::getClientConnection(consumerImpl); - - PulsarFriend::setClientConnection(consumerImpl, std::weak_ptr()); - - pulsar::Latch latch(1); - auto start = TimeUtils::now(); - - consumerImpl.getLastMessageIdAsync([&latch](Result r, const GetLastMessageIdResponse&) -> void { - ASSERT_EQ(r, ResultNotConnected); - latch.countdown(); - }); - - ASSERT_TRUE(latch.wait(std::chrono::seconds(20))); - auto elapsed = TimeUtils::now() - start; - - // getLastMessageIdAsync should be blocked until operationTimeout when the connection is disconnected. - ASSERT_GE(elapsed.seconds(), operationTimeout); -} - -class ConsumerSeekTest : public ::testing::TestWithParam { - public: - void SetUp() override { producerConf_ = ProducerConfiguration().setBatchingEnabled(GetParam()); } - - void TearDown() override { client_.close(); } - - protected: - Client client_{lookupUrl}; - ProducerConfiguration producerConf_; -}; - -TEST_P(ConsumerSeekTest, testSeekForMessageId) { - Client client(lookupUrl); - - const std::string topic = "test-seek-for-message-id-" + std::string((GetParam() ? "batch-" : "")) + - std::to_string(time(nullptr)); - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topic, producerConf_, producer)); - - const auto numMessages = 100; - MessageId seekMessageId; - - int r = (rand() % (numMessages - 1)); - for (int i = 0; i < numMessages; i++) { - MessageId id; - ASSERT_EQ(ResultOk, - producer.send(MessageBuilder().setContent("msg-" + std::to_string(i)).build(), id)); - - if (i == r) { - seekMessageId = id; - } - } - - LOG_INFO("The seekMessageId is: " << seekMessageId << ", r : " << r); - - Consumer consumerExclusive; - ASSERT_EQ(ResultOk, client.subscribe(topic, "sub-0", consumerExclusive)); - consumerExclusive.seek(seekMessageId); - Message msg0; - ASSERT_EQ(ResultOk, consumerExclusive.receive(msg0, 3000)); - - Consumer consumerInclusive; - ASSERT_EQ(ResultOk, - client.subscribe(topic, "sub-1", ConsumerConfiguration().setStartMessageIdInclusive(true), - consumerInclusive)); - consumerInclusive.seek(seekMessageId); - Message msg1; - ASSERT_EQ(ResultOk, consumerInclusive.receive(msg1, 3000)); - - LOG_INFO("consumerExclusive received " << msg0.getDataAsString() << " from " << msg0.getMessageId()); - LOG_INFO("consumerInclusive received " << msg1.getDataAsString() << " from " << msg1.getMessageId()); - - ASSERT_EQ(msg0.getDataAsString(), "msg-" + std::to_string(r + 1)); - ASSERT_EQ(msg1.getDataAsString(), "msg-" + std::to_string(r)); - - consumerInclusive.close(); - consumerExclusive.close(); - producer.close(); -} - -INSTANTIATE_TEST_CASE_P(Pulsar, ConsumerSeekTest, ::testing::Values(true, false)); - -} // namespace pulsar diff --git a/pulsar-client-cpp/tests/ConsumerTest.h b/pulsar-client-cpp/tests/ConsumerTest.h deleted file mode 100644 index 8c7d3f7d95eae..0000000000000 --- a/pulsar-client-cpp/tests/ConsumerTest.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "lib/ConsumerImpl.h" -#include - -using std::string; - -namespace pulsar { -class ConsumerTest { - public: - static int getNumOfMessagesInQueue(const Consumer& consumer) { - return consumer.impl_->getNumOfPrefetchedMessages(); - } -}; -} // namespace pulsar diff --git a/pulsar-client-cpp/tests/CustomLoggerTest.cc b/pulsar-client-cpp/tests/CustomLoggerTest.cc deleted file mode 100644 index bd80c312e3ba8..0000000000000 --- a/pulsar-client-cpp/tests/CustomLoggerTest.cc +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include -#include - -using namespace pulsar; - -static std::vector logLines; - -class MyTestLogger : public Logger { - public: - MyTestLogger(const std::string &fileName) : fileName_(fileName) {} - - bool isEnabled(Level level) override { return true; } - - void log(Level level, int line, const std::string &message) override { - std::stringstream ss; - ss << std::this_thread::get_id() << " " << level << " " << fileName_ << ":" << line << " " << message - << std::endl; - logLines.emplace_back(ss.str()); - } - - private: - const std::string fileName_; -}; - -class MyTestLoggerFactory : public LoggerFactory { - public: - Logger *getLogger(const std::string &fileName) override { return new MyTestLogger(fileName); } -}; - -TEST(CustomLoggerTest, testCustomLogger) { - // simulate new client created on a different thread (because logging factory is called once per thread) - std::atomic_int numLogLines{0}; - auto testThread = std::thread([&numLogLines] { - ClientConfiguration clientConfig; - auto customLogFactory = new MyTestLoggerFactory(); - clientConfig.setLogger(customLogFactory); - // reset to previous log factory - Client client("pulsar://localhost:6650", clientConfig); - client.close(); - ASSERT_TRUE(logLines.size() > 0); - for (auto &&line : logLines) { - std::cout << line; - std::cout.flush(); - } - numLogLines = logLines.size(); - LogUtils::resetLoggerFactory(); - }); - testThread.join(); - - ClientConfiguration clientConfig; - Client client("pulsar://localhost:6650", clientConfig); - client.close(); - // custom logger didn't get any new lines - ASSERT_EQ(logLines.size(), numLogLines); -} - -TEST(CustomLoggerTest, testConsoleLoggerFactory) { - std::unique_ptr factory(new ConsoleLoggerFactory); - std::unique_ptr logger(factory->getLogger(__FILE__)); - ASSERT_FALSE(logger->isEnabled(Logger::LEVEL_DEBUG)); - ASSERT_TRUE(logger->isEnabled(Logger::LEVEL_INFO)); - ASSERT_TRUE(logger->isEnabled(Logger::LEVEL_WARN)); - ASSERT_TRUE(logger->isEnabled(Logger::LEVEL_ERROR)); - - factory.reset(new ConsoleLoggerFactory(Logger::LEVEL_DEBUG)); - logger.reset(factory->getLogger(__FILE__)); - ASSERT_TRUE(logger->isEnabled(Logger::LEVEL_DEBUG)); - ASSERT_TRUE(logger->isEnabled(Logger::LEVEL_INFO)); - ASSERT_TRUE(logger->isEnabled(Logger::LEVEL_WARN)); - ASSERT_TRUE(logger->isEnabled(Logger::LEVEL_ERROR)); - - factory.reset(new ConsoleLoggerFactory(Logger::LEVEL_WARN)); - logger.reset(factory->getLogger(__FILE__)); - ASSERT_FALSE(logger->isEnabled(Logger::LEVEL_DEBUG)); - ASSERT_FALSE(logger->isEnabled(Logger::LEVEL_INFO)); - ASSERT_TRUE(logger->isEnabled(Logger::LEVEL_WARN)); - ASSERT_TRUE(logger->isEnabled(Logger::LEVEL_ERROR)); - - factory.reset(new ConsoleLoggerFactory(Logger::LEVEL_ERROR)); - logger.reset(factory->getLogger(__FILE__)); - ASSERT_FALSE(logger->isEnabled(Logger::LEVEL_DEBUG)); - ASSERT_FALSE(logger->isEnabled(Logger::LEVEL_INFO)); - ASSERT_FALSE(logger->isEnabled(Logger::LEVEL_WARN)); - ASSERT_TRUE(logger->isEnabled(Logger::LEVEL_ERROR)); -} diff --git a/pulsar-client-cpp/tests/CustomRoutingPolicy.h b/pulsar-client-cpp/tests/CustomRoutingPolicy.h deleted file mode 100644 index ed10c5ba26369..0000000000000 --- a/pulsar-client-cpp/tests/CustomRoutingPolicy.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef CUSTOM_ROUTER_POLICY_HEADER_ -#define CUSTOM_ROUTER_POLICY_HEADER_ - -#include // rand() -#include -#include - -namespace pulsar { -class CustomRoutingPolicy : public MessageRoutingPolicy { - /** @deprecated */ - int getPartition(const Message& msg) { - throw DeprecatedException("Use getPartition(const Message&, const TopicMetadata&) instead."); - } - - int getPartition(const Message& msg, const TopicMetadata& topicMetadata) { return 0; } -}; - -class SimpleRoundRobinRoutingPolicy : public MessageRoutingPolicy { - public: - SimpleRoundRobinRoutingPolicy() : counter_(0) {} - - int getPartition(const Message& msg, const TopicMetadata& topicMetadata) { - return counter_++ % topicMetadata.getNumPartitions(); - } - - private: - uint32_t counter_; -}; - -} // namespace pulsar - -#endif // CUSTOM_ROUTER_POLICY_HEADER_ diff --git a/pulsar-client-cpp/tests/HashTest.cc b/pulsar-client-cpp/tests/HashTest.cc deleted file mode 100644 index bd6de09ed68f8..0000000000000 --- a/pulsar-client-cpp/tests/HashTest.cc +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include - -#include "../lib/BoostHash.h" -#include "../lib/JavaStringHash.h" -#include "../lib/Murmur3_32Hash.h" - -using ::testing::AtLeast; -using ::testing::Return; -using ::testing::ReturnRef; - -using namespace pulsar; - -TEST(HashTest, testBoostHash) { - BoostHash hash; - boost::hash boostHash; - - std::string key1 = "key1"; - std::string key2 = "key2"; - - ASSERT_EQ(boostHash(key1) & std::numeric_limits::max(), hash.makeHash(key1)); - ASSERT_EQ(boostHash(key2) & std::numeric_limits::max(), hash.makeHash(key2)); -} - -TEST(HashTest, testJavaStringHash) { - JavaStringHash hash; - - // Calculating `hashCode()` makes overflow as unsigned int32. - std::string key1 = "keykeykeykeykey1"; - - // `hashCode()` is negative as signed int32. - std::string key2 = "keykeykey2"; - - // Same as Java client - ASSERT_EQ(434058482, hash.makeHash(key1)); - ASSERT_EQ(42978643, hash.makeHash(key2)); -} - -TEST(HashTest, testMurmur3_32Hash) { - Murmur3_32Hash hash; - std::string k1 = "k1"; - std::string k2 = "k2"; - std::string key1 = "key1"; - std::string key2 = "key2"; - std::string key01 = "key01"; - std::string key02 = "key02"; - - // Same value as Java client - ASSERT_EQ(2110152746, hash.makeHash(k1)); - ASSERT_EQ(1479966664, hash.makeHash(k2)); - ASSERT_EQ(462881061, hash.makeHash(key1)); - ASSERT_EQ(1936800180, hash.makeHash(key2)); - ASSERT_EQ(39696932, hash.makeHash(key01)); - ASSERT_EQ(751761803, hash.makeHash(key02)); -} diff --git a/pulsar-client-cpp/tests/HttpHelper.cc b/pulsar-client-cpp/tests/HttpHelper.cc deleted file mode 100644 index c4118e6ed59d6..0000000000000 --- a/pulsar-client-cpp/tests/HttpHelper.cc +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "HttpHelper.h" - -#include - -static int makeRequest(const std::string& method, const std::string& url, const std::string& body) { - CURL* curl = curl_easy_init(); - - struct curl_slist* list = NULL; - - list = curl_slist_append(list, "Content-Type: application/json"); - - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, list); - curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); - curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, method.c_str()); - if (!body.empty()) { - curl_easy_setopt(curl, CURLOPT_POSTFIELDS, body.c_str()); - } - int res = curl_easy_perform(curl); - curl_slist_free_all(list); /* free the list again */ - - if (res != CURLE_OK) { - return -1; - } - - long httpResult = 0; - curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &httpResult); - curl_easy_cleanup(curl); - return (int)httpResult; -} - -int makePutRequest(const std::string& url, const std::string& body) { return makeRequest("PUT", url, body); } - -int makePostRequest(const std::string& url, const std::string& body) { - return makeRequest("POST", url, body); -} - -int makeDeleteRequest(const std::string& url) { return makeRequest("DELETE", url, ""); } diff --git a/pulsar-client-cpp/tests/HttpHelper.h b/pulsar-client-cpp/tests/HttpHelper.h deleted file mode 100644 index 68119a7926ab8..0000000000000 --- a/pulsar-client-cpp/tests/HttpHelper.h +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef HTTP_HELPER -#define HTTP_HELPER - -#include - -int makePutRequest(const std::string& url, const std::string& body); -int makePostRequest(const std::string& url, const std::string& body); -int makeDeleteRequest(const std::string& url); - -#endif /* end of include guard: HTTP_HELPER */ diff --git a/pulsar-client-cpp/tests/KeyBasedBatchingTest.cc b/pulsar-client-cpp/tests/KeyBasedBatchingTest.cc deleted file mode 100644 index fcb558a7dadb9..0000000000000 --- a/pulsar-client-cpp/tests/KeyBasedBatchingTest.cc +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include - -#include -#include -#include - -using namespace pulsar; - -static ProducerConfiguration createDefaultProducerConfig() { - // The default producer configuration only use number of messages to limit batching - return ProducerConfiguration() - .setBatchingType(ProducerConfiguration::KeyBasedBatching) - .setBatchingMaxAllowedSizeInBytes(static_cast(-1)) - .setBatchingMaxPublishDelayMs(3600 * 1000); -} - -class KeyBasedBatchingTest : public ::testing::Test { - protected: - KeyBasedBatchingTest() : client_("pulsar://localhost:6650") {} - - void TearDown() override { client_.close(); } - - void initTopicName(const std::string& testName) { - topicName_ = "KeyBasedBatchingTest-" + testName + "-" + std::to_string(time(nullptr)); - } - - void initProducer(const ProducerConfiguration& producerConfig) { - ASSERT_EQ(ResultOk, client_.createProducer(topicName_, producerConfig, producer_)); - } - - void initConsumer() { ASSERT_EQ(ResultOk, client_.subscribe(topicName_, "SubscriptionName", consumer_)); } - - void receiveAndAck(Message& msg) { - ASSERT_EQ(ResultOk, consumer_.receive(msg, 3000)); - ASSERT_EQ(ResultOk, consumer_.acknowledge(msg)); - } - - Client client_; - Producer producer_; - Consumer consumer_; - std::string topicName_; -}; - -TEST_F(KeyBasedBatchingTest, testFlush) { - initTopicName("Flush"); - // no limits for batching - initProducer(createDefaultProducerConfig().setBatchingMaxMessages( - static_cast(-1)) // no limits for batching - ); - - constexpr int numMessages = 100; - const std::string keys[] = {"A", "B"}; - std::atomic_int numMessageSent{0}; - for (int i = 0; i < numMessages; i++) { - producer_.sendAsync(MessageBuilder().setOrderingKey(keys[i % 2]).setContent("x").build(), - [&numMessageSent](Result result, const MessageId&) { - numMessageSent++; - ASSERT_EQ(result, ResultOk); - }); - } - - ASSERT_EQ(ResultOk, producer_.flush()); - ASSERT_EQ(numMessageSent.load(), numMessages); -} - -TEST_F(KeyBasedBatchingTest, testOrderingKeyPriority) { - initTopicName("OrderingKeyPriority"); - initProducer(createDefaultProducerConfig().setBatchingMaxMessages(3)); - initConsumer(); - - Latch latch(3); - auto sendCallback = [&latch](Result result, const MessageId& id) { - ASSERT_EQ(result, ResultOk); - latch.countdown(); - }; - // "0" is send to batch of "A" because ordering key has higher priority - producer_.sendAsync(MessageBuilder().setContent("0").setOrderingKey("A").setPartitionKey("B").build(), - sendCallback); - producer_.sendAsync(MessageBuilder().setContent("1").setOrderingKey("A").build(), sendCallback); - producer_.sendAsync(MessageBuilder().setContent("2").setOrderingKey("B").build(), sendCallback); - latch.countdown(); - - Message msg; - receiveAndAck(msg); - ASSERT_EQ("0", msg.getDataAsString()); - ASSERT_EQ("A", msg.getOrderingKey()); - ASSERT_EQ("B", msg.getPartitionKey()); - receiveAndAck(msg); - ASSERT_EQ("1", msg.getDataAsString()); - ASSERT_EQ("A", msg.getOrderingKey()); - receiveAndAck(msg); - ASSERT_EQ("2", msg.getDataAsString()); - ASSERT_EQ("B", msg.getOrderingKey()); -} - -TEST_F(KeyBasedBatchingTest, testSequenceId) { - initTopicName("SequenceId"); - initProducer(createDefaultProducerConfig().setBatchingMaxMessages(6)); - initConsumer(); - - Latch latch(6); - auto sendAsync = [this, &latch](const std::string& key, const std::string& value) { - producer_.sendAsync(MessageBuilder().setOrderingKey(key).setContent(value).build(), - [&latch](Result result, const MessageId& id) { - ASSERT_EQ(result, ResultOk); - latch.countdown(); - }); - }; - sendAsync("A", "0"); - sendAsync("B", "1"); - sendAsync("C", "2"); - sendAsync("B", "3"); - sendAsync("C", "4"); - sendAsync("A", "5"); - // sequence id: B < C < A, so there are 3 batches in order as following: - // B: 1, 3 - // C: 2, 4 - // A: 0, 5 - latch.wait(); - - std::vector receivedKeys; - std::vector receivedValues; - for (int i = 0; i < 6; i++) { - Message msg; - receiveAndAck(msg); - receivedKeys.emplace_back(msg.getOrderingKey()); - receivedValues.emplace_back(msg.getDataAsString()); - } - - decltype(receivedKeys) expectedKeys{"B", "B", "C", "C", "A", "A"}; - decltype(receivedValues) expectedValues{"1", "3", "2", "4", "0", "5"}; - EXPECT_EQ(receivedKeys, expectedKeys); - EXPECT_EQ(receivedValues, expectedValues); -} - -TEST_F(KeyBasedBatchingTest, testSingleBatch) { - initTopicName("SingleBatch"); - initProducer(createDefaultProducerConfig().setBatchingMaxMessages(5)); - initConsumer(); - - constexpr int numMessages = 5 * 100; - std::atomic_int numMessageSent{0}; - // messages with no key are packed to the same batch and this batch has no key - // the broker uses `NON_KEY` as the key when dispatching messages from this batch - for (int i = 0; i < numMessages; i++) { - producer_.sendAsync(MessageBuilder().setContent("x").build(), - [&numMessageSent](Result result, const MessageId&) { - ASSERT_EQ(result, ResultOk); - ++numMessageSent; - }); - } - - Message msg; - for (int i = 0; i < numMessages; i++) { - receiveAndAck(msg); - } - ASSERT_EQ(ResultTimeout, consumer_.receive(msg, 3000)); - ASSERT_EQ(numMessageSent.load(), numMessages); -} - -TEST_F(KeyBasedBatchingTest, testCloseBeforeSend) { - initTopicName("CloseBeforeSend"); - // Any asynchronous send won't be completed unless `close()` or `flush()` is triggered - initProducer(createDefaultProducerConfig().setBatchingMaxMessages(static_cast(-1))); - - std::mutex mtx; - std::vector results; - auto saveResult = [&mtx, &results](Result result) { - std::lock_guard lock(mtx); - results.emplace_back(result); - }; - auto sendAsync = [saveResult, this](const std::string& key, const std::string& value) { - producer_.sendAsync(MessageBuilder().setOrderingKey(key).setContent(value).build(), - [saveResult](Result result, const MessageId& id) { saveResult(result); }); - }; - - constexpr int numKeys = 10; - for (int i = 0; i < numKeys; i++) { - sendAsync("key-" + std::to_string(i), "value"); - } - - ASSERT_EQ(ResultOk, producer_.close()); - - // After close() completed, all callbacks should have failed with ResultAlreadyClosed - std::lock_guard lock(mtx); - ASSERT_EQ(results.size(), numKeys); - for (int i = 0; i < numKeys; i++) { - ASSERT_EQ(results[i], ResultAlreadyClosed) << " results[" << i << "] is " << results[i]; - } -} diff --git a/pulsar-client-cpp/tests/KeySharedConsumerTest.cc b/pulsar-client-cpp/tests/KeySharedConsumerTest.cc deleted file mode 100644 index 466033409997e..0000000000000 --- a/pulsar-client-cpp/tests/KeySharedConsumerTest.cc +++ /dev/null @@ -1,237 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include - -#include -#include -#include "lib/LogUtils.h" - -#include "HttpHelper.h" -#include "LogHelper.h" - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -static std::string lookupUrl = "pulsar://localhost:6650"; - -class KeySharedConsumerTest : public ::testing::Test { - protected: - KeySharedConsumerTest() : client(lookupUrl, ClientConfiguration().setPartititionsUpdateInterval(1)) {} - - void TearDown() override { client.close(); } - - void addProducer(const std::string& topicName) { - producers.emplace_back(); - auto conf = ProducerConfiguration().setBatchingEnabled(false).setPartitionsRoutingMode( - ProducerConfiguration::RoundRobinDistribution); - ASSERT_EQ(ResultOk, client.createProducer(topicName, conf, producers.back())); - } - - void addBatchedProducer(const std::string& topicName, int batchingMaxMessages) { - producers.emplace_back(); - auto conf = - ProducerConfiguration() // - .setBatchingType(ProducerConfiguration::KeyBasedBatching) // - .setBatchingMaxPublishDelayMs(3000) // - .setBatchingMaxAllowedSizeInBytes(static_cast(-1)) // no limits on bytes - .setBatchingMaxMessages(batchingMaxMessages); - ASSERT_EQ(ResultOk, client.createProducer(topicName, conf, producers.back())); - } - - ConsumerConfiguration getConsumerConfiguration() { - ConsumerConfiguration conf; - conf.setConsumerType(ConsumerKeyShared); - conf.setPatternAutoDiscoveryPeriod(1); // find new topics quickly - return conf; - } - - void addConsumer(const std::string& topicName) { - consumers.emplace_back(); - ASSERT_EQ(ResultOk, - client.subscribe(topicName, subName, getConsumerConfiguration(), consumers.back())); - } - - void addRegexConsumer(const std::string& pattern) { - consumers.emplace_back(); - ASSERT_EQ(ResultOk, - client.subscribeWithRegex(pattern, subName, getConsumerConfiguration(), consumers.back())); - } - - static constexpr int NUMBER_OF_KEYS = 300; - - static Message newIntMessage(int i, const std::string& key, const char* orderingKey = nullptr) { - MessageBuilder builder; - if (orderingKey) { - builder.setOrderingKey(orderingKey); - } - return builder.setPartitionKey(key).setContent(std::to_string(i)).build(); - } - - static void sendCallback(Result result, const MessageId&) { ASSERT_EQ(result, ResultOk); } - - void receiveAndCheckDistribution(int expectedNumTotalMessages) { - keyToConsumer.clear(); - messagesPerConsumer.clear(); - int totalMessages = 0; - - for (size_t i = 0; i < consumers.size(); i++) { - auto& consumer = consumers[i]; - while (true) { - Message msg; - Result result = consumer.receive(msg, 3000); - if (result == ResultTimeout) { - break; - } - - ASSERT_EQ(result, ResultOk); - totalMessages++; - messagesPerConsumer[i]++; - ASSERT_EQ(ResultOk, consumer.acknowledge(msg)); - - if (msg.hasPartitionKey() || msg.hasOrderingKey()) { - std::string key = msg.hasOrderingKey() ? msg.getOrderingKey() : msg.getPartitionKey(); - auto iter = keyToConsumer.find(key); - if (iter == keyToConsumer.end()) { - keyToConsumer[key] = i; - } else { - // check messages with the same key will be consumed by the same consumer - ASSERT_EQ(iter->second, i); - } - } - } - } - - LOG_INFO("messagesPerConsumer: " << messagesPerConsumer); - int numTotalMessages = 0; - for (const auto& kv : messagesPerConsumer) { - numTotalMessages += kv.second; - } - ASSERT_EQ(numTotalMessages, expectedNumTotalMessages); - - const double expectedMessagesPerConsumer = static_cast(totalMessages) / consumers.size(); - constexpr double PERCENT_ERROR = 0.50; - for (const auto& kv : messagesPerConsumer) { - int count = kv.second; - ASSERT_LT(fabs(count - expectedMessagesPerConsumer), expectedMessagesPerConsumer * PERCENT_ERROR); - } - } - - Client client; - std::vector producers; - std::vector consumers; - const std::string subName = "SubscriptionName"; - - // key is message's ordering key or partitioned key, value is consumer index - std::map keyToConsumer; - // key is consumer index, value is the number of message received by - std::map messagesPerConsumer; -}; - -TEST_F(KeySharedConsumerTest, testNonPartitionedTopic) { - const std::string topicName = "KeySharedConsumerTest-non-par-topic" + std::to_string(time(nullptr)); - - addProducer(topicName); - for (int i = 0; i < 3; i++) { - addConsumer(topicName); - } - - srand(time(nullptr)); - constexpr int numMessagesPerProducer = 1000; - for (int i = 0; i < numMessagesPerProducer; i++) { - std::string key = std::to_string(rand() % NUMBER_OF_KEYS); - producers[0].sendAsync(newIntMessage(i, key), sendCallback); - } - ASSERT_EQ(ResultOk, producers[0].flush()); - - receiveAndCheckDistribution(numMessagesPerProducer); -} - -TEST_F(KeySharedConsumerTest, testMultiTopics) { - const std::string topicNamePrefix = "KeySharedConsumerTest-multi-topics" + std::to_string(time(nullptr)); - - for (int i = 0; i < 3; i++) { - addProducer(topicNamePrefix + std::to_string(i)); - } - for (int i = 0; i < 3; i++) { - addRegexConsumer(".*" + topicNamePrefix + ".*"); - } - - srand(time(nullptr)); - constexpr int numMessagesPerProducer = 1000; - for (auto& producer : producers) { - for (int i = 0; i < numMessagesPerProducer; i++) { - std::string key = std::to_string(rand() % NUMBER_OF_KEYS); - producer.sendAsync(newIntMessage(i, key), sendCallback); - } - ASSERT_EQ(ResultOk, producer.flush()); - } - - receiveAndCheckDistribution(numMessagesPerProducer * 3); -} - -TEST_F(KeySharedConsumerTest, testOrderingKeyPriority) { - const std::string topicName = - "KeySharedConsumerTest-ordering-key-priority" + std::to_string(time(nullptr)); - - addProducer(topicName); - for (int i = 0; i < 3; i++) { - addConsumer(topicName); - } - - srand(time(nullptr)); - constexpr int numMessagesPerProducer = 1000; - for (int i = 0; i < numMessagesPerProducer; i++) { - int randomInt = rand(); - std::string key = std::to_string(randomInt % NUMBER_OF_KEYS); - std::string orderingKey = std::to_string((randomInt + 1) % NUMBER_OF_KEYS); - producers[0].sendAsync(newIntMessage(i, key, orderingKey.c_str()), sendCallback); - } - ASSERT_EQ(ResultOk, producers[0].flush()); - - receiveAndCheckDistribution(numMessagesPerProducer); -} - -TEST_F(KeySharedConsumerTest, testKeyBasedBatching) { - const std::string topicName = "KeySharedConsumerTest-key-based-batching" + std::to_string(time(nullptr)); - constexpr int NUM_KEYS = 2; - constexpr int NUM_MESSAGES_PER_KEY = 100; - constexpr int BATCHING_MAX_MESSAGES = NUM_KEYS * NUM_MESSAGES_PER_KEY; - - addBatchedProducer(topicName, BATCHING_MAX_MESSAGES); - for (int i = 0; i < NUM_KEYS; i++) { - // Each consumer is associated with only one key - addConsumer(topicName); - } - - std::string keys[NUM_KEYS] = {"A", "B"}; - for (int i = 0; i < BATCHING_MAX_MESSAGES; i++) { - const auto& key = keys[i % NUM_KEYS]; - producers[0].sendAsync(newIntMessage(i, "", key.c_str()), sendCallback); - } - - receiveAndCheckDistribution(BATCHING_MAX_MESSAGES); - // Each consumer should receive 1 batched message for each key - for (int i = 0; i < NUM_KEYS; i++) { - ASSERT_EQ(messagesPerConsumer[i], NUM_MESSAGES_PER_KEY); - } -} diff --git a/pulsar-client-cpp/tests/KeySharedPolicyTest.cc b/pulsar-client-cpp/tests/KeySharedPolicyTest.cc deleted file mode 100644 index 49fef3fd8fd45..0000000000000 --- a/pulsar-client-cpp/tests/KeySharedPolicyTest.cc +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include - -#include -#include -#include -#include "lib/LogUtils.h" - -#include "HttpHelper.h" -#include "LogHelper.h" - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -static std::string lookupUrl = "pulsar://localhost:6650"; - -class KeySharedPolicyTest : public ::testing::Test { - protected: - KeySharedPolicyTest() : client(lookupUrl, ClientConfiguration().setPartititionsUpdateInterval(1)) {} - - void TearDown() override { client.close(); } - - void addProducer(const std::string& topicName) { - producers.emplace_back(); - auto conf = ProducerConfiguration().setBatchingEnabled(false).setPartitionsRoutingMode( - ProducerConfiguration::RoundRobinDistribution); - ASSERT_EQ(ResultOk, client.createProducer(topicName, conf, producers.back())); - } - - ConsumerConfiguration getConsumerConfiguration() { - ConsumerConfiguration conf; - conf.setConsumerType(ConsumerKeyShared); - return conf; - } - - static constexpr int NUMBER_OF_KEYS = 300; - - static Message newIntMessage(int i, const std::string& key, const char* orderingKey = nullptr) { - MessageBuilder builder; - if (orderingKey) { - builder.setOrderingKey(orderingKey); - } - return builder.setPartitionKey(key).setContent(std::to_string(i)).build(); - } - - static void sendCallback(Result result, const MessageId&) { ASSERT_EQ(result, ResultOk); } - - void receiveAndCheckDistribution(int expectedNumTotalMessages) { - keyToConsumer.clear(); - messagesPerConsumer.clear(); - int totalMessages = 0; - - for (size_t i = 0; i < consumers.size(); i++) { - auto& consumer = consumers[i]; - while (true) { - Message msg; - Result result = consumer.receive(msg, 3000); - if (result == ResultTimeout) { - break; - } - - ASSERT_EQ(result, ResultOk); - totalMessages++; - messagesPerConsumer[i]++; - ASSERT_EQ(ResultOk, consumer.acknowledge(msg)); - - if (msg.hasPartitionKey() || msg.hasOrderingKey()) { - std::string key = msg.hasOrderingKey() ? msg.getOrderingKey() : msg.getPartitionKey(); - auto iter = keyToConsumer.find(key); - if (iter == keyToConsumer.end()) { - keyToConsumer[key] = i; - } else { - // check messages with the same key will be consumed by the same consumer - ASSERT_EQ(iter->second, i); - } - } - } - } - - LOG_INFO("messagesPerConsumer: " << messagesPerConsumer); - int numTotalMessages = 0; - for (const auto& kv : messagesPerConsumer) { - numTotalMessages += kv.second; - } - ASSERT_EQ(numTotalMessages, expectedNumTotalMessages); - - const double expectedMessagesPerConsumer = static_cast(totalMessages) / consumers.size(); - constexpr double PERCENT_ERROR = 0.50; - for (const auto& kv : messagesPerConsumer) { - int count = kv.second; - ASSERT_LT(fabs(count - expectedMessagesPerConsumer), expectedMessagesPerConsumer * PERCENT_ERROR); - } - } - - Client client; - std::vector producers; - std::vector consumers; - const std::string subName = "SubscriptionName"; - - // key is message's ordering key or partitioned key, value is consumer index - std::map keyToConsumer; - // key is consumer index, value is the number of message received by - std::map messagesPerConsumer; -}; - -TEST_F(KeySharedPolicyTest, testStickyConsumer) { - const std::string topicName = "KeySharedPolicyTest-sticky-consumer" + std::to_string(time(nullptr)); - - consumers.emplace_back(); - KeySharedPolicy ksp1; - ksp1.setKeySharedMode(STICKY); - ksp1.setStickyRanges({StickyRange(0, 20000)}); - ConsumerConfiguration consumerConfig1 = getConsumerConfiguration().setKeySharedPolicy(ksp1); - Result result = client.subscribe(topicName, subName, consumerConfig1, consumers.back()); - ASSERT_EQ(ResultOk, result); - - consumers.emplace_back(); - KeySharedPolicy ksp2; - ksp2.setKeySharedMode(STICKY); - ksp2.setStickyRanges({StickyRange(20001, 40000)}); - ConsumerConfiguration consumerConfig2 = getConsumerConfiguration().setKeySharedPolicy(ksp2); - result = client.subscribe(topicName, subName, consumerConfig2, consumers.back()); - ASSERT_EQ(ResultOk, result); - - consumers.emplace_back(); - KeySharedPolicy ksp3; - ksp3.setKeySharedMode(STICKY); - ksp3.setStickyRanges({StickyRange(40001, 65535)}); - ConsumerConfiguration consumerConfig3 = getConsumerConfiguration().setKeySharedPolicy(ksp3); - result = client.subscribe(topicName, subName, consumerConfig3, consumers.back()); - ASSERT_EQ(ResultOk, result); - - addProducer(topicName); - - srand(time(nullptr)); - constexpr int numMessagesPerProducer = 1000; - for (int i = 0; i < numMessagesPerProducer; i++) { - std::string key = std::to_string(rand() % NUMBER_OF_KEYS); - producers[0].sendAsync(newIntMessage(i, key), sendCallback); - } - ASSERT_EQ(ResultOk, producers[0].flush()); - - receiveAndCheckDistribution(numMessagesPerProducer); -} - -TEST_F(KeySharedPolicyTest, ResultConsumerAssignError) { - const std::string topicName = - "KeySharedPolicyTest-result-consumer-assign-error" + std::to_string(time(nullptr)); - - // empty range - KeySharedPolicy ksp; - ksp.setKeySharedMode(STICKY); - ConsumerConfiguration consumerConfig = getConsumerConfiguration().setKeySharedPolicy(ksp); - Consumer consumer; - ASSERT_EQ(ResultConsumerAssignError, client.subscribe(topicName, subName, consumerConfig, consumer)); - - // intersect range - KeySharedPolicy ksp1; - ksp1.setKeySharedMode(STICKY); - ksp1.setStickyRanges({StickyRange(0, 65535)}); - ConsumerConfiguration consumerConfig1 = getConsumerConfiguration().setKeySharedPolicy(ksp1); - Consumer consumer1; - Result result = client.subscribe(topicName, subName, consumerConfig1, consumer1); - ASSERT_EQ(ResultOk, result); - - KeySharedPolicy ksp2; - ksp2.setKeySharedMode(STICKY); - ksp2.setStickyRanges({StickyRange(0, 65535)}); - ConsumerConfiguration consumerConfig2 = getConsumerConfiguration().setKeySharedPolicy(ksp2); - Consumer consumer2; - ASSERT_EQ(ResultConsumerAssignError, client.subscribe(topicName, subName, consumerConfig2, consumer2)); - - ASSERT_EQ(ResultOk, consumer1.close()); -} - -TEST_F(KeySharedPolicyTest, InvalidStickyRanges) { - KeySharedPolicy ksp; - ASSERT_THROW(ksp.setStickyRanges({}), std::invalid_argument); - ASSERT_THROW(ksp.setStickyRanges({StickyRange(-1, 10)}), std::invalid_argument); - ASSERT_THROW(ksp.setStickyRanges({StickyRange(0, 65536)}), std::invalid_argument); - ASSERT_THROW(ksp.setStickyRanges({StickyRange(0, 10), StickyRange(9, 20)}), std::invalid_argument); -} diff --git a/pulsar-client-cpp/tests/LatchTest.cc b/pulsar-client-cpp/tests/LatchTest.cc deleted file mode 100644 index c69141ef39872..0000000000000 --- a/pulsar-client-cpp/tests/LatchTest.cc +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include "LogUtils.h" - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -class Service { - private: - std::string serviceName_; - std::chrono::milliseconds sleepDuration_; - Latch latch_; - std::thread thread_; - - public: - Service(const std::string& serviceName, std::chrono::milliseconds sleepDuration, const Latch& latch) - : serviceName_(serviceName), sleepDuration_(sleepDuration), latch_(latch) { - thread_ = std::thread(&Service::run, this); - } - - void run() { - std::this_thread::sleep_for(sleepDuration_); - LOG_INFO("Service " << serviceName_ << " is up"); - latch_.countdown(); - } - - ~Service() { thread_.join(); } -}; - -TEST(LatchTest, testCountDown) { - Latch latch(3); - Service service1("service1", std::chrono::milliseconds(50), latch); - Service service2("service2", std::chrono::milliseconds(30), latch); - Service service3("service3", std::chrono::milliseconds(20), latch); - latch.wait(); -} - -TEST(LatchTest, testLatchCount) { - Latch latch(3); - Service service1("service1", std::chrono::milliseconds(50), latch); - Service service2("service2", std::chrono::milliseconds(30), latch); - Service service3("service3", std::chrono::milliseconds(20), latch); - ASSERT_EQ(3, latch.getCount()); - latch.wait(); - ASSERT_EQ(0, latch.getCount()); -} - -TEST(LatchTest, testTimedWait) { - // Wait for at most 3 seconds which is more than the maximum sleep time (50 millis) - Latch latch1(3); - Service service1("service1", std::chrono::milliseconds(50), latch1); - Service service2("service2", std::chrono::milliseconds(30), latch1); - Service service3("service3", std::chrono::milliseconds(50), latch1); - ASSERT_TRUE(latch1.wait(std::chrono::seconds(3))); - - // Wait for up to 300 millis which is less than the maximum sleep time (500 millis) - Latch latch2(3); - Service service4("service4", std::chrono::milliseconds(500), latch2); - Service service5("service5", std::chrono::milliseconds(300), latch2); - Service service6("service6", std::chrono::milliseconds(500), latch2); - ASSERT_FALSE(latch2.wait(std::chrono::milliseconds(300))); - - // After the assert is passed and Service is destroyed because of join, the - // main thread would not exit until service4 thread is returned. -} diff --git a/pulsar-client-cpp/tests/LogHelper.h b/pulsar-client-cpp/tests/LogHelper.h deleted file mode 100644 index 113aa6710f638..0000000000000 --- a/pulsar-client-cpp/tests/LogHelper.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include - -template -inline std::ostream& operator<<(std::ostream& os, const std::map& m) { - os << "{"; - bool first = true; - for (const auto& kv : m) { - if (!first) { - os << ", "; - } else { - first = false; - } - os << kv.first << " => " << kv.second; - } - os << "}"; - return os; -} diff --git a/pulsar-client-cpp/tests/LoggerTest.cc b/pulsar-client-cpp/tests/LoggerTest.cc deleted file mode 100644 index d26ccc6b0e22b..0000000000000 --- a/pulsar-client-cpp/tests/LoggerTest.cc +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "LogUtils.h" -#include - -DECLARE_LOG_OBJECT() - -TEST(LoggerTest, testLogger) { - LOG_DEBUG("Testing logger..."); - int a = 5; - LOG_INFO("Testing logger with arguments " << a); -} diff --git a/pulsar-client-cpp/tests/LookupServiceTest.cc b/pulsar-client-cpp/tests/LookupServiceTest.cc deleted file mode 100644 index 77c1e1aaef234..0000000000000 --- a/pulsar-client-cpp/tests/LookupServiceTest.cc +++ /dev/null @@ -1,274 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include - -#include -#include -#include -#include "ConnectionPool.h" -#include "HttpHelper.h" -#include -#include -#include "LogUtils.h" -#include "RetryableLookupService.h" -#include "PulsarFriend.h" - -#include - -using namespace pulsar; - -DECLARE_LOG_OBJECT() - -TEST(LookupServiceTest, basicLookup) { - ExecutorServiceProviderPtr service = std::make_shared(1); - AuthenticationPtr authData = AuthFactory::Disabled(); - std::string url = "pulsar://localhost:6650"; - ClientConfiguration conf; - ExecutorServiceProviderPtr ioExecutorProvider_(std::make_shared(1)); - ConnectionPool pool_(conf, ioExecutorProvider_, authData, true); - ServiceNameResolver serviceNameResolver(url); - BinaryProtoLookupService lookupService(serviceNameResolver, pool_, ""); - - TopicNamePtr topicName = TopicName::get("topic"); - - Future partitionFuture = lookupService.getPartitionMetadataAsync(topicName); - LookupDataResultPtr lookupData; - Result result = partitionFuture.get(lookupData); - ASSERT_TRUE(lookupData != NULL); - ASSERT_EQ(0, lookupData->getPartitions()); - - const auto topicNamePtr = TopicName::get("topic"); - auto future = lookupService.getBroker(*topicNamePtr); - LookupService::LookupResult lookupResult; - result = future.get(lookupResult); - - ASSERT_EQ(ResultOk, result); - ASSERT_EQ(url, lookupResult.logicalAddress); - ASSERT_EQ(url, lookupResult.physicalAddress); -} - -TEST(LookupServiceTest, basicGetNamespaceTopics) { - std::string url = "pulsar://localhost:6650"; - std::string adminUrl = "http://localhost:8080/"; - Result result; - // 1. create some topics under same namespace - Client client(url); - - std::string topicName1 = "persistent://public/default/basicGetNamespaceTopics1"; - std::string topicName2 = "persistent://public/default/basicGetNamespaceTopics2"; - std::string topicName3 = "persistent://public/default/basicGetNamespaceTopics3"; - // This is not in same namespace. - std::string topicName4 = "persistent://public/default-2/basicGetNamespaceTopics4"; - - // call admin api to make topics partitioned - std::string url1 = adminUrl + "admin/v2/persistent/public/default/basicGetNamespaceTopics1/partitions"; - std::string url2 = adminUrl + "admin/v2/persistent/public/default/basicGetNamespaceTopics2/partitions"; - std::string url3 = adminUrl + "admin/v2/persistent/public/default/basicGetNamespaceTopics3/partitions"; - - int res = makePutRequest(url1, "2"); - ASSERT_FALSE(res != 204 && res != 409); - res = makePutRequest(url2, "3"); - ASSERT_FALSE(res != 204 && res != 409); - res = makePutRequest(url3, "4"); - ASSERT_FALSE(res != 204 && res != 409); - - Producer producer1; - result = client.createProducer(topicName1, producer1); - ASSERT_EQ(ResultOk, result); - Producer producer2; - result = client.createProducer(topicName2, producer2); - ASSERT_EQ(ResultOk, result); - Producer producer3; - result = client.createProducer(topicName3, producer3); - ASSERT_EQ(ResultOk, result); - Producer producer4; - result = client.createProducer(topicName4, producer4); - ASSERT_EQ(ResultOk, result); - - // 2. call getTopicsOfNamespaceAsync - ExecutorServiceProviderPtr service = std::make_shared(1); - AuthenticationPtr authData = AuthFactory::Disabled(); - ClientConfiguration conf; - ExecutorServiceProviderPtr ioExecutorProvider_(std::make_shared(1)); - ConnectionPool pool_(conf, ioExecutorProvider_, authData, true); - ServiceNameResolver serviceNameResolver(url); - BinaryProtoLookupService lookupService(serviceNameResolver, pool_, ""); - - TopicNamePtr topicName = TopicName::get(topicName1); - NamespaceNamePtr nsName = topicName->getNamespaceName(); - - Future getTopicsFuture = lookupService.getTopicsOfNamespaceAsync(nsName); - NamespaceTopicsPtr topicsData; - result = getTopicsFuture.get(topicsData); - ASSERT_EQ(ResultOk, result); - ASSERT_TRUE(topicsData != NULL); - - // 3. verify result contains first 3 topic - ASSERT_TRUE(std::find(topicsData->begin(), topicsData->end(), topicName1) != topicsData->end()); - ASSERT_TRUE(std::find(topicsData->begin(), topicsData->end(), topicName2) != topicsData->end()); - ASSERT_TRUE(std::find(topicsData->begin(), topicsData->end(), topicName3) != topicsData->end()); - ASSERT_FALSE(std::find(topicsData->begin(), topicsData->end(), topicName4) != topicsData->end()); - - client.shutdown(); -} - -static void testMultiAddresses(LookupService& lookupService) { - std::vector results; - constexpr int numRequests = 6; - - auto verifySuccessCount = [&results] { - // Only half of them succeeded - ASSERT_EQ(std::count(results.cbegin(), results.cend(), ResultOk), numRequests / 2); - ASSERT_EQ(std::count(results.cbegin(), results.cend(), ResultRetryable), numRequests / 2); - }; - - for (int i = 0; i < numRequests; i++) { - const auto topicNamePtr = TopicName::get("topic"); - LookupService::LookupResult lookupResult; - const auto result = lookupService.getBroker(*topicNamePtr).get(lookupResult); - LOG_INFO("getBroker [" << i << "] " << result << ", " << lookupResult); - results.emplace_back(result); - } - verifySuccessCount(); - - results.clear(); - for (int i = 0; i < numRequests; i++) { - LookupDataResultPtr data; - const auto result = lookupService.getPartitionMetadataAsync(TopicName::get("topic")).get(data); - LOG_INFO("getPartitionMetadataAsync [" << i << "] " << result); - results.emplace_back(result); - } - verifySuccessCount(); - - results.clear(); - for (int i = 0; i < numRequests; i++) { - NamespaceTopicsPtr data; - const auto result = - lookupService.getTopicsOfNamespaceAsync(TopicName::get("topic")->getNamespaceName()).get(data); - LOG_INFO("getTopicsOfNamespaceAsync [" << i << "] " << result); - results.emplace_back(result); - } - verifySuccessCount(); -} - -TEST(LookupServiceTest, testMultiAddresses) { - ConnectionPool pool({}, std::make_shared(1), AuthFactory::Disabled(), true); - ServiceNameResolver serviceNameResolver("pulsar://localhost,localhost:9999"); - BinaryProtoLookupService binaryLookupService(serviceNameResolver, pool, ""); - testMultiAddresses(binaryLookupService); - - // HTTPLookupService calls shared_from_this() internally, we must create a shared pointer to test - ServiceNameResolver serviceNameResolverForHttp("http://localhost,localhost:9999"); - auto httpLookupServicePtr = std::make_shared( - std::ref(serviceNameResolverForHttp), ClientConfiguration{}, AuthFactory::Disabled()); - testMultiAddresses(*httpLookupServicePtr); -} -TEST(LookupServiceTest, testRetry) { - auto executorProvider = std::make_shared(1); - ConnectionPool pool({}, executorProvider, AuthFactory::Disabled(), true); - ServiceNameResolver serviceNameResolver("pulsar://localhost:9999,localhost"); - - auto lookupService = RetryableLookupService::create( - std::make_shared(serviceNameResolver, pool, ""), 30 /* seconds */, - executorProvider); - - PulsarFriend::setServiceUrlIndex(serviceNameResolver, 0); - auto topicNamePtr = TopicName::get("lookup-service-test-retry"); - auto future1 = lookupService->getBroker(*topicNamePtr); - LookupService::LookupResult lookupResult; - ASSERT_EQ(ResultOk, future1.get(lookupResult)); - LOG_INFO("getBroker returns logicalAddress: " << lookupResult.logicalAddress - << ", physicalAddress: " << lookupResult.physicalAddress); - - PulsarFriend::setServiceUrlIndex(serviceNameResolver, 0); - auto future2 = lookupService->getPartitionMetadataAsync(topicNamePtr); - LookupDataResultPtr lookupDataResultPtr; - ASSERT_EQ(ResultOk, future2.get(lookupDataResultPtr)); - LOG_INFO("getPartitionMetadataAsync returns " << lookupDataResultPtr->getPartitions() << " partitions"); - - PulsarFriend::setServiceUrlIndex(serviceNameResolver, 0); - auto future3 = lookupService->getTopicsOfNamespaceAsync(topicNamePtr->getNamespaceName()); - NamespaceTopicsPtr namespaceTopicsPtr; - ASSERT_EQ(ResultOk, future3.get(namespaceTopicsPtr)); - LOG_INFO("getTopicPartitionName Async returns " << namespaceTopicsPtr->size() << " topics"); - - std::atomic_int retryCount{0}; - constexpr int totalRetryCount = 3; - auto future4 = lookupService->executeAsync("key", [&retryCount]() -> Future { - Promise promise; - if (++retryCount < totalRetryCount) { - LOG_INFO("Retry count: " << retryCount); - promise.setFailed(ResultRetryable); - } else { - LOG_INFO("Retry done with " << retryCount << " times"); - promise.setValue(100); - } - return promise.getFuture(); - }); - int customResult = 0; - ASSERT_EQ(ResultOk, future4.get(customResult)); - ASSERT_EQ(customResult, 100); - ASSERT_EQ(retryCount.load(), totalRetryCount); - - ASSERT_EQ(PulsarFriend::getNumberOfPendingTasks(*lookupService), 0); -} - -TEST(LookupServiceTest, testTimeout) { - auto executorProvider = std::make_shared(1); - ConnectionPool pool({}, executorProvider, AuthFactory::Disabled(), true); - ServiceNameResolver serviceNameResolver("pulsar://localhost:9990,localhost:9902,localhost:9904"); - - constexpr int timeoutInSeconds = 2; - auto lookupService = RetryableLookupService::create( - std::make_shared(serviceNameResolver, pool, ""), timeoutInSeconds, - executorProvider); - auto topicNamePtr = TopicName::get("lookup-service-test-retry"); - - decltype(std::chrono::high_resolution_clock::now()) startTime; - auto beforeMethod = [&startTime] { startTime = std::chrono::high_resolution_clock::now(); }; - auto afterMethod = [&startTime](const std::string& name) { - auto timeInterval = std::chrono::duration_cast( - std::chrono::high_resolution_clock::now() - startTime) - .count(); - LOG_INFO(name << " took " << timeInterval << " seconds"); - ASSERT_TRUE(timeInterval >= timeoutInSeconds * 1000L); - }; - - beforeMethod(); - auto future1 = lookupService->getBroker(*topicNamePtr); - LookupService::LookupResult lookupResult; - ASSERT_EQ(ResultTimeout, future1.get(lookupResult)); - afterMethod("getBroker"); - - beforeMethod(); - auto future2 = lookupService->getPartitionMetadataAsync(topicNamePtr); - LookupDataResultPtr lookupDataResultPtr; - ASSERT_EQ(ResultTimeout, future2.get(lookupDataResultPtr)); - afterMethod("getPartitionMetadataAsync"); - - beforeMethod(); - auto future3 = lookupService->getTopicsOfNamespaceAsync(topicNamePtr->getNamespaceName()); - NamespaceTopicsPtr namespaceTopicsPtr; - ASSERT_EQ(ResultTimeout, future3.get(namespaceTopicsPtr)); - afterMethod("getTopicsOfNamespaceAsync"); - - ASSERT_EQ(PulsarFriend::getNumberOfPendingTasks(*lookupService), 0); -} diff --git a/pulsar-client-cpp/tests/MapCacheTest.cc b/pulsar-client-cpp/tests/MapCacheTest.cc deleted file mode 100644 index 12a89ee17be34..0000000000000 --- a/pulsar-client-cpp/tests/MapCacheTest.cc +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -using namespace pulsar; - -struct MoveOnlyInt { - int x = 0; - - MoveOnlyInt() = default; - MoveOnlyInt(int xx) : x(xx) {} - MoveOnlyInt(const MoveOnlyInt&) = delete; - MoveOnlyInt(MoveOnlyInt&& rhs) noexcept : x(rhs.x) {} - - bool operator=(const MoveOnlyInt& rhs) const { return x == rhs.x; } -}; - -TEST(MapCacheTest, testPutIfAbsent) { - MapCache cache; - - ASSERT_NE(cache.putIfAbsent(1, {100}), cache.end()); - ASSERT_EQ(cache.putIfAbsent(1, {200}), cache.end()); - auto it = cache.find(1); - ASSERT_NE(it, cache.end()); - ASSERT_EQ(it->second.x, 100); - - cache.remove(1); - ASSERT_EQ(cache.find(1), cache.end()); -} - -TEST(MapCacheTest, testRemoveOldestValues) { - MapCache cache; - cache.putIfAbsent(1, {200}); - cache.putIfAbsent(2, {210}); - cache.putIfAbsent(3, {220}); - ASSERT_EQ(cache.getKeys(), (std::vector{1, 2, 3})); - - std::vector removedValues; - cache.removeOldestValues(2, [&removedValues](const int& key, const MoveOnlyInt& value) { - removedValues.emplace_back(value.x); - }); - ASSERT_EQ(removedValues, (std::vector{200, 210})); - - ASSERT_EQ(cache.getKeys(), (std::vector{3})); - ASSERT_EQ(cache.size(), 1); - auto it = cache.find(3); - ASSERT_NE(it, cache.end()); - ASSERT_EQ(it->second.x, 220); -} - -TEST(MapCacheTest, testRemoveAllValues) { - MapCache cache; - cache.putIfAbsent(1, {300}); - cache.putIfAbsent(2, {310}); - cache.putIfAbsent(3, {320}); - - // removeOldestValues works well even if the argument is greater than the size of keys - cache.removeOldestValues(10000, nullptr); - ASSERT_TRUE(cache.getKeys().empty()); - ASSERT_EQ(cache.size(), 0); -} diff --git a/pulsar-client-cpp/tests/MemoryLimitControllerTest.cc b/pulsar-client-cpp/tests/MemoryLimitControllerTest.cc deleted file mode 100644 index eb63760eedfba..0000000000000 --- a/pulsar-client-cpp/tests/MemoryLimitControllerTest.cc +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include - -#include "../lib/MemoryLimitController.h" -#include "../lib/Latch.h" - -using namespace pulsar; - -TEST(MemoryLimitControllerTest, testLimit) { - MemoryLimitController mlc(100); - - for (int i = 0; i < 101; i++) { - mlc.reserveMemory(1); - } - - ASSERT_EQ(mlc.currentUsage(), 101); - ASSERT_FALSE(mlc.tryReserveMemory(1)); - mlc.releaseMemory(1); - ASSERT_EQ(mlc.currentUsage(), 100); - - ASSERT_TRUE(mlc.tryReserveMemory(1)); - ASSERT_EQ(mlc.currentUsage(), 101); -} - -TEST(MemoryLimitControllerTest, testBlocking) { - MemoryLimitController mlc(100); - - for (int i = 0; i < 101; i++) { - mlc.reserveMemory(1); - } - - Latch l1(1); - std::thread t1([&]() { - mlc.reserveMemory(1); - l1.countdown(); - }); - - Latch l2(1); - std::thread t2([&]() { - mlc.reserveMemory(1); - l2.countdown(); - }); - - Latch l3(1); - std::thread t3([&]() { - mlc.reserveMemory(1); - l3.countdown(); - }); - - // The threads are blocked since the quota is full - ASSERT_FALSE(l1.wait(std::chrono::milliseconds(100))); - ASSERT_FALSE(l2.wait(std::chrono::milliseconds(100))); - ASSERT_FALSE(l3.wait(std::chrono::milliseconds(100))); - - ASSERT_EQ(mlc.currentUsage(), 101); - mlc.releaseMemory(3); - - ASSERT_TRUE(l1.wait(std::chrono::seconds(1))); - ASSERT_TRUE(l2.wait(std::chrono::seconds(1))); - ASSERT_TRUE(l3.wait(std::chrono::seconds(1))); - ASSERT_EQ(mlc.currentUsage(), 101); - - t1.join(); - t2.join(); - t3.join(); -} - -TEST(MemoryLimitControllerTest, testStepRelease) { - MemoryLimitController mlc(100); - - for (int i = 0; i < 101; i++) { - mlc.reserveMemory(1); - } - - Latch l1(1); - std::thread t1([&]() { - mlc.reserveMemory(1); - l1.countdown(); - }); - - Latch l2(1); - std::thread t2([&]() { - mlc.reserveMemory(1); - l2.countdown(); - }); - - Latch l3(1); - std::thread t3([&]() { - mlc.reserveMemory(1); - l3.countdown(); - }); - - // The threads are blocked since the quota is full - ASSERT_FALSE(l1.wait(std::chrono::milliseconds(100))); - ASSERT_FALSE(l2.wait(std::chrono::milliseconds(100))); - ASSERT_FALSE(l3.wait(std::chrono::milliseconds(100))); - - ASSERT_EQ(mlc.currentUsage(), 101); - mlc.releaseMemory(1); - mlc.releaseMemory(1); - mlc.releaseMemory(1); - - ASSERT_TRUE(l1.wait(std::chrono::seconds(1))); - ASSERT_TRUE(l2.wait(std::chrono::seconds(1))); - ASSERT_TRUE(l3.wait(std::chrono::seconds(1))); - ASSERT_EQ(mlc.currentUsage(), 101); - - t1.join(); - t2.join(); - t3.join(); -} \ No newline at end of file diff --git a/pulsar-client-cpp/tests/MemoryLimitTest.cc b/pulsar-client-cpp/tests/MemoryLimitTest.cc deleted file mode 100644 index cb0b47aeb8be2..0000000000000 --- a/pulsar-client-cpp/tests/MemoryLimitTest.cc +++ /dev/null @@ -1,163 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include -#include - -#include "../lib/MemoryLimitController.h" -#include "../lib/Latch.h" -#include "../lib/Future.h" -#include "../lib/Utils.h" - -#include - -using namespace pulsar; - -extern std::string lookupUrl; -extern std::string unique_str(); - -TEST(MemoryLimitTest, testRejectMessages) { - std::string topic = "topic-" + unique_str(); - - ClientConfiguration config; - config.setMemoryLimit(100 * 1024); - Client client(lookupUrl, config); - - ProducerConfiguration producerConf; - producerConf.setBlockIfQueueFull(false); - Producer producer; - Result res = client.createProducer(topic, producerConf, producer); - ASSERT_EQ(res, ResultOk); - - const int n = 101; - Latch latch(n); - - std::array buffer; - - for (int i = 0; i < n; i++) { - producer.sendAsync(MessageBuilder().setContent(buffer.data(), buffer.size()).build(), - [&](Result res, const MessageId& msgId) { - ASSERT_EQ(res, ResultOk); - latch.countdown(); - }); - } - - res = producer.send(MessageBuilder().setContent(buffer.data(), buffer.size()).build()); - ASSERT_EQ(res, ResultMemoryBufferIsFull); - - latch.wait(); - - // We should now be able to send again - res = producer.send(MessageBuilder().setContent(buffer.data(), buffer.size()).build()); - ASSERT_EQ(res, ResultOk); -} - -TEST(MemoryLimitTest, testRejectMessagesOnMultipleTopics) { - std::string t1 = "topic-1-" + unique_str(); - std::string t2 = "topic-2-" + unique_str(); - - ClientConfiguration config; - config.setMemoryLimit(100 * 1024); - Client client(lookupUrl, config); - - ProducerConfiguration producerConf; - producerConf.setBlockIfQueueFull(false); - producerConf.setBatchingMaxPublishDelayMs(10000); - - Producer p1; - Result res = client.createProducer(t1, producerConf, p1); - ASSERT_EQ(res, ResultOk); - - Producer p2; - res = client.createProducer(t2, producerConf, p2); - ASSERT_EQ(res, ResultOk); - - const int n = 101; - Latch latch(n); - - std::array buffer; - - for (int i = 0; i < n / 2; i++) { - p1.sendAsync(MessageBuilder().setContent(buffer.data(), buffer.size()).build(), - [&](Result res, const MessageId& msgId) { - ASSERT_EQ(res, ResultOk); - latch.countdown(); - }); - - p2.sendAsync(MessageBuilder().setContent(buffer.data(), buffer.size()).build(), - [&](Result res, const MessageId& msgId) { - ASSERT_EQ(res, ResultOk); - latch.countdown(); - }); - } - - // Last message in order to reach the limit - p1.sendAsync(MessageBuilder().setContent(buffer.data(), buffer.size()).build(), - [&](Result res, const MessageId& msgId) { - ASSERT_EQ(res, ResultOk); - latch.countdown(); - }); - - res = p1.send(MessageBuilder().setContent(buffer.data(), buffer.size()).build()); - ASSERT_EQ(res, ResultMemoryBufferIsFull); - - res = p2.send(MessageBuilder().setContent(buffer.data(), buffer.size()).build()); - ASSERT_EQ(res, ResultMemoryBufferIsFull); - - latch.wait(); - - // We should now be able to send again - res = p1.send(MessageBuilder().setContent(buffer.data(), buffer.size()).build()); - ASSERT_EQ(res, ResultOk); - - res = p2.send(MessageBuilder().setContent(buffer.data(), buffer.size()).build()); - ASSERT_EQ(res, ResultOk); -} - -TEST(MemoryLimitTest, testNoProducerQueueSize) { - std::string topic = "topic-" + unique_str(); - - ClientConfiguration config; - config.setMemoryLimit(10 * 1024); - Client client(lookupUrl, config); - - ProducerConfiguration producerConf; - producerConf.setBlockIfQueueFull(true); - producerConf.setMaxPendingMessages(0); - producerConf.setMaxPendingMessagesAcrossPartitions(0); - Producer producer; - Result res = client.createProducer(topic, producerConf, producer); - ASSERT_EQ(res, ResultOk); - - std::array, 100> promises; - - for (int i = 0; i < 100; i++) { - producer.sendAsync(MessageBuilder().setContent("hello").build(), - WaitForCallbackValue(promises[i])); - } - - producer.flush(); - - for (auto& p : promises) { - MessageId id; - Result res = p.getFuture().get(id); - ASSERT_EQ(res, ResultOk); - } -} \ No newline at end of file diff --git a/pulsar-client-cpp/tests/MessageChunkingTest.cc b/pulsar-client-cpp/tests/MessageChunkingTest.cc deleted file mode 100644 index ae0114cefb162..0000000000000 --- a/pulsar-client-cpp/tests/MessageChunkingTest.cc +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -#include -#include -#include "lib/LogUtils.h" -#include "PulsarFriend.h" - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -static const std::string lookupUrl = "pulsar://localhost:6650"; - -// See the `maxMessageSize` config in test-conf/standalone-ssl.conf -static constexpr size_t maxMessageSize = 1024000; - -static std::string toString(CompressionType compressionType) { - switch (compressionType) { - case CompressionType::CompressionNone: - return "None"; - case CompressionType::CompressionLZ4: - return "LZ4"; - case CompressionType::CompressionZLib: - return "ZLib"; - case CompressionType::CompressionZSTD: - return "ZSTD"; - case CompressionType::CompressionSNAPPY: - return "SNAPPY"; - default: - return "Unknown (" + std::to_string(compressionType) + ")"; - } -} - -inline std::string createLargeMessage() { - std::string largeMessage(maxMessageSize * 3, 'a'); - std::default_random_engine e(time(nullptr)); - std::uniform_int_distribution u(0, 25); - for (size_t i = 0; i < largeMessage.size(); i++) { - largeMessage[i] = 'a' + u(e); - } - return largeMessage; -} - -class MessageChunkingTest : public ::testing::TestWithParam { - public: - static std::string largeMessage; - - void TearDown() override { client_.close(); } - - void createProducer(const std::string& topic, Producer& producer) { - ProducerConfiguration conf; - conf.setBatchingEnabled(false); - conf.setChunkingEnabled(true); - conf.setCompressionType(GetParam()); - LOG_INFO("Create producer to topic: " << topic - << ", compression: " << toString(conf.getCompressionType())); - ASSERT_EQ(ResultOk, client_.createProducer(topic, conf, producer)); - } - - void createConsumer(const std::string& topic, Consumer& consumer) { - ASSERT_EQ(ResultOk, client_.subscribe(topic, "my-sub", consumer)); - } - - private: - Client client_{lookupUrl}; -}; - -std::string MessageChunkingTest::largeMessage = createLargeMessage(); - -TEST_F(MessageChunkingTest, testInvalidConfig) { - Client client(lookupUrl); - ProducerConfiguration conf; - conf.setBatchingEnabled(true); - conf.setChunkingEnabled(true); - Producer producer; - ASSERT_THROW(client.createProducer("xxx", conf, producer), std::invalid_argument); - client.close(); -} - -TEST_P(MessageChunkingTest, testEndToEnd) { - const std::string topic = - "MessageChunkingTest-EndToEnd-" + toString(GetParam()) + std::to_string(time(nullptr)); - Consumer consumer; - createConsumer(topic, consumer); - Producer producer; - createProducer(topic, producer); - - constexpr int numMessages = 10; - - std::vector sendMessageIds; - for (int i = 0; i < numMessages; i++) { - MessageId messageId; - ASSERT_EQ(ResultOk, producer.send(MessageBuilder().setContent(largeMessage).build(), messageId)); - LOG_INFO("Send " << i << " to " << messageId); - sendMessageIds.emplace_back(messageId); - } - - Message msg; - std::vector receivedMessageIds; - for (int i = 0; i < numMessages; i++) { - ASSERT_EQ(ResultOk, consumer.receive(msg, 3000)); - LOG_INFO("Receive " << msg.getLength() << " bytes from " << msg.getMessageId()); - ASSERT_EQ(msg.getDataAsString(), largeMessage); - receivedMessageIds.emplace_back(msg.getMessageId()); - } - ASSERT_EQ(receivedMessageIds, sendMessageIds); - ASSERT_EQ(receivedMessageIds.front().ledgerId(), receivedMessageIds.front().ledgerId()); - ASSERT_GT(receivedMessageIds.back().entryId(), numMessages); - - // Verify the cache has been cleared - auto& chunkedMessageCache = PulsarFriend::getChunkedMessageCache(consumer); - ASSERT_EQ(chunkedMessageCache.size(), 0); -} - -// The CI env is Ubuntu 16.04, the gtest-dev version is 1.8.0 that doesn't have INSTANTIATE_TEST_SUITE_P -INSTANTIATE_TEST_CASE_P(Pulsar, MessageChunkingTest, - ::testing::Values(CompressionNone, CompressionLZ4, CompressionZLib, CompressionZSTD, - CompressionSNAPPY)); diff --git a/pulsar-client-cpp/tests/MessageIdTest.cc b/pulsar-client-cpp/tests/MessageIdTest.cc deleted file mode 100644 index 55fa181da05f4..0000000000000 --- a/pulsar-client-cpp/tests/MessageIdTest.cc +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include "lib/MessageIdUtil.h" -#include "PulsarFriend.h" - -#include - -#include - -using namespace pulsar; - -TEST(MessageIdTest, testSerialization) { - MessageId msgId = PulsarFriend::getMessageId(-1, 1, 2, 3); - - std::string serialized; - msgId.serialize(serialized); - - MessageId deserialized = MessageId::deserialize(serialized); - - ASSERT_EQ(msgId, deserialized); -} - -TEST(MessageIdTest, testCompareLedgerAndEntryId) { - MessageId id1(-1, 2L, 1L, 0); - MessageId id2(-1, 2L, 1L, 1); - MessageId id3(-1, 2L, 2L, 0); - MessageId id4(-1, 3L, 0L, 0); - ASSERT_EQ(compareLedgerAndEntryId(id1, id2), 0); - ASSERT_EQ(compareLedgerAndEntryId(id1, id2), 0); - - ASSERT_EQ(compareLedgerAndEntryId(id1, id3), -1); - ASSERT_EQ(compareLedgerAndEntryId(id3, id1), 1); - - ASSERT_EQ(compareLedgerAndEntryId(id1, id4), -1); - ASSERT_EQ(compareLedgerAndEntryId(id4, id1), 1); - - ASSERT_EQ(compareLedgerAndEntryId(id2, id4), -1); - ASSERT_EQ(compareLedgerAndEntryId(id4, id2), 1); - - ASSERT_EQ(compareLedgerAndEntryId(id3, id4), -1); - ASSERT_EQ(compareLedgerAndEntryId(id4, id3), 1); -} diff --git a/pulsar-client-cpp/tests/MessageTest.cc b/pulsar-client-cpp/tests/MessageTest.cc deleted file mode 100644 index fcc22e97b65ce..0000000000000 --- a/pulsar-client-cpp/tests/MessageTest.cc +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include - -using namespace pulsar; -TEST(MessageTest, testMessageContents) { - MessageBuilder msgBuilder1; - std::string content = "my-content"; - msgBuilder1.setContent(content); - Message msg = msgBuilder1.build(); - ASSERT_EQ(content, msg.getDataAsString()); - ASSERT_EQ(content.length(), msg.getLength()); - ASSERT_EQ(content, std::string((char*)msg.getData(), msg.getLength())); - - MessageBuilder msgBuilder2; - std::string myContents = "mycontents"; - msgBuilder2.setContent(myContents.c_str(), myContents.length()); - msg = msgBuilder2.build(); - ASSERT_EQ(myContents, std::string((char*)msg.getData(), msg.getLength())); - ASSERT_NE(myContents.c_str(), (char*)msg.getData()); - ASSERT_EQ(myContents, msg.getDataAsString()); - ASSERT_EQ(std::string("mycontents").length(), msg.getLength()); -} - -TEST(MessageTest, testAllocatedContents) { - MessageBuilder msgBuilder; - std::string str = "content"; - char* content = new char[str.length() + 1]; - strncpy(content, str.c_str(), str.length()); - msgBuilder.setAllocatedContent(content, str.length()); - Message msg = msgBuilder.build(); - ASSERT_FALSE(strncmp("content", (char*)msg.getData(), msg.getLength())); - ASSERT_EQ(content, (char*)msg.getData()); - delete[] content; -} - -template -bool compareMaps(const Map& lhs, const Map& rhs) { - return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin()); -} - -TEST(MessageTest, testProperties) { - MessageBuilder msgBuilder1; - msgBuilder1.setProperty("property1", "value1"); - Message msg = msgBuilder1.build(); - ASSERT_EQ(msg.getProperty("property1"), "value1"); - - MessageBuilder msgBuilder2; - Message::StringMap stringMap; - stringMap.insert(std::pair("p1", "v1")); - stringMap.insert(std::pair("p2", "v2")); - stringMap.insert(std::pair("p3", "v3")); - msgBuilder2.setProperties(stringMap); - msg = msgBuilder2.build(); - ASSERT_EQ(msg.getProperty("p1"), "v1"); - ASSERT_EQ(msg.getProperty("p2"), "v2"); - ASSERT_EQ(msg.getProperty("p3"), "v3"); - ASSERT_TRUE(compareMaps(msg.getProperties(), stringMap)); -} - -TEST(MessageTest, testMessageBuilder) { - std::string value; - value.resize(1024, 'x'); - const void* originalAddress = &value[0]; - { - auto msg = MessageBuilder().setContent(value).build(); - ASSERT_NE(msg.getData(), originalAddress); - } - { - auto msg = MessageBuilder().setContent(value.data(), value.length()).build(); - ASSERT_NE(msg.getData(), originalAddress); - } - { - auto msg = MessageBuilder().setAllocatedContent(&value[0], value.length()).build(); - ASSERT_EQ(msg.getData(), originalAddress); - } - { - auto msg = MessageBuilder().setContent(std::move(value)).build(); - ASSERT_EQ(msg.getData(), originalAddress); - } -} diff --git a/pulsar-client-cpp/tests/NamespaceNameTest.cc b/pulsar-client-cpp/tests/NamespaceNameTest.cc deleted file mode 100644 index 85a57494282f9..0000000000000 --- a/pulsar-client-cpp/tests/NamespaceNameTest.cc +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -#include -using namespace pulsar; - -TEST(NamespaceNameTest, testNamespaceName) { - std::shared_ptr nn1 = NamespaceName::get("property", "cluster", "namespace"); - ASSERT_EQ("property", nn1->getProperty()); - ASSERT_EQ("cluster", nn1->getCluster()); - ASSERT_EQ("namespace", nn1->getLocalName()); - ASSERT_FALSE(nn1->isV2()); - - std::shared_ptr nn2 = NamespaceName::get("property", "cluster", "namespace"); - ASSERT_TRUE(*nn1 == *nn2); -} - -TEST(NamespaceNameTest, testNamespaceNameV2) { - std::shared_ptr nn1 = NamespaceName::get("property", "namespace"); - ASSERT_EQ("property", nn1->getProperty()); - ASSERT_TRUE(nn1->getCluster().empty()); - ASSERT_EQ("namespace", nn1->getLocalName()); - ASSERT_TRUE(nn1->isV2()); - - std::shared_ptr nn2 = NamespaceName::get("property", "namespace"); - ASSERT_TRUE(*nn1 == *nn2); -} diff --git a/pulsar-client-cpp/tests/NoOpsCryptoKeyReader.h b/pulsar-client-cpp/tests/NoOpsCryptoKeyReader.h deleted file mode 100644 index e152690e58bfa..0000000000000 --- a/pulsar-client-cpp/tests/NoOpsCryptoKeyReader.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include - -namespace pulsar { - -class NoOpsCryptoKeyReader : public CryptoKeyReader { - public: - Result getPublicKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const override { - return ResultOk; - } - - Result getPrivateKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const override { - return ResultOk; - } -}; - -} // namespace pulsar diff --git a/pulsar-client-cpp/tests/PaddingDemo.proto b/pulsar-client-cpp/tests/PaddingDemo.proto deleted file mode 100644 index 199a60026da49..0000000000000 --- a/pulsar-client-cpp/tests/PaddingDemo.proto +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -syntax = "proto3"; - -package padding.demo; - -message Person { - string name = 1; - int32 id = 2; -} diff --git a/pulsar-client-cpp/tests/PartitionsUpdateTest.cc b/pulsar-client-cpp/tests/PartitionsUpdateTest.cc deleted file mode 100644 index 845e44771bd71..0000000000000 --- a/pulsar-client-cpp/tests/PartitionsUpdateTest.cc +++ /dev/null @@ -1,185 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -#include -#include -#include -#include - -#include "HttpHelper.h" -#include "CustomRoutingPolicy.h" - -using namespace pulsar; - -static const std::string serviceUrl = "pulsar://localhost:6650"; -static const std::string adminUrl = "http://localhost:8080/"; - -static ClientConfiguration newClientConfig(bool enablePartitionsUpdate) { - ClientConfiguration clientConfig; - if (enablePartitionsUpdate) { - clientConfig.setPartititionsUpdateInterval(1); // 1s - } else { - clientConfig.setPartititionsUpdateInterval(0); // disable - } - return clientConfig; -} - -// In round robin routing mode, if N messages were sent to a topic with N partitions, each partition must have -// received 1 message. So we check whether producer/consumer have increased along with partitions by checking -// partitions' count of N messages. -// Use std::set because it doesn't allow repeated elements. -class PartitionsSet { - public: - size_t size() const { return names_.size(); } - - Result initProducer(std::string topicName, bool enablePartitionsUpdate, - bool lazyStartPartitionedProducers) { - clientForProducer_.reset(new Client(serviceUrl, newClientConfig(enablePartitionsUpdate))); - const auto producerConfig = ProducerConfiguration() - .setMessageRouter(std::make_shared()) - .setLazyStartPartitionedProducers(lazyStartPartitionedProducers); - return clientForProducer_->createProducer(topicName, producerConfig, producer_); - } - - Result initConsumer(std::string topicName, bool enablePartitionsUpdate) { - clientForConsumer_.reset(new Client(serviceUrl, newClientConfig(enablePartitionsUpdate))); - return clientForConsumer_->subscribe(topicName, "SubscriptionName", consumer_); - } - - void close() { - producer_.close(); - clientForProducer_->close(); - consumer_.close(); - clientForConsumer_->close(); - } - - void doSendAndReceive(int numMessagesSend, int numMessagesReceive) { - names_.clear(); - for (int i = 0; i < numMessagesSend; i++) { - producer_.send(MessageBuilder().setContent("a").build()); - } - while (numMessagesReceive > 0) { - Message msg; - if (consumer_.receive(msg, 100) == ResultOk) { - names_.emplace(msg.getTopicName()); - consumer_.acknowledge(msg); - numMessagesReceive--; - } - } - } - - private: - std::set names_; - - std::unique_ptr clientForProducer_; - Producer producer_; - - std::unique_ptr clientForConsumer_; - Consumer consumer_; -}; - -static void waitForPartitionsUpdated() { - // Assume producer and consumer have updated partitions in 3 seconds if enabled - std::this_thread::sleep_for(std::chrono::seconds(3)); -} - -TEST(PartitionsUpdateTest, testConfigPartitionsUpdateInterval) { - ClientConfiguration clientConfig; - ASSERT_EQ(60, clientConfig.getPartitionsUpdateInterval()); - - clientConfig.setPartititionsUpdateInterval(0); - ASSERT_EQ(0, clientConfig.getPartitionsUpdateInterval()); - - clientConfig.setPartititionsUpdateInterval(1); - ASSERT_EQ(1, clientConfig.getPartitionsUpdateInterval()); - - clientConfig.setPartititionsUpdateInterval(-1); - ASSERT_EQ(static_cast(-1), clientConfig.getPartitionsUpdateInterval()); -} - -void testPartitionsUpdate(bool lazyStartPartitionedProducers, std::string topicNameSuffix) { - std::string topicName = "persistent://" + topicNameSuffix; - std::string topicOperateUrl = adminUrl + "admin/v2/persistent/" + topicNameSuffix + "/partitions"; - - // Ensure `topicName` doesn't exist before created - makeDeleteRequest(topicOperateUrl); - // Create a 2 partitions topic - int res = makePutRequest(topicOperateUrl, "2"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - PartitionsSet partitionsSet; - - // 1. Both producer and consumer enable partitions update - ASSERT_EQ(ResultOk, partitionsSet.initProducer(topicName, true, lazyStartPartitionedProducers)); - ASSERT_EQ(ResultOk, partitionsSet.initConsumer(topicName, true)); - - res = makePostRequest(topicOperateUrl, "3"); // update partitions to 3 - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - waitForPartitionsUpdated(); - - partitionsSet.doSendAndReceive(3, 3); - ASSERT_EQ(3, partitionsSet.size()); - partitionsSet.close(); - - // 2. Only producer enables partitions update - ASSERT_EQ(ResultOk, partitionsSet.initProducer(topicName, true, false)); - ASSERT_EQ(ResultOk, partitionsSet.initConsumer(topicName, false)); - - res = makePostRequest(topicOperateUrl, "5"); // update partitions to 5 - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - waitForPartitionsUpdated(); - - partitionsSet.doSendAndReceive(5, 3); // can't consume partition-3,4 - ASSERT_EQ(3, partitionsSet.size()); - partitionsSet.close(); - - // 3. Only consumer enables partitions update - ASSERT_EQ(ResultOk, partitionsSet.initProducer(topicName, false, false)); - ASSERT_EQ(ResultOk, partitionsSet.initConsumer(topicName, true)); - - res = makePostRequest(topicOperateUrl, "7"); // update partitions to 7 - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - waitForPartitionsUpdated(); - - partitionsSet.doSendAndReceive(7, 7); - ASSERT_EQ(5, partitionsSet.size()); - partitionsSet.close(); - - // 4. Both producer and consumer disables partitions update - ASSERT_EQ(ResultOk, partitionsSet.initProducer(topicName, false, false)); - ASSERT_EQ(ResultOk, partitionsSet.initConsumer(topicName, false)); - - res = makePostRequest(topicOperateUrl, "10"); // update partitions to 10 - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - waitForPartitionsUpdated(); - - partitionsSet.doSendAndReceive(10, 10); - ASSERT_EQ(7, partitionsSet.size()); - partitionsSet.close(); -} - -TEST(PartitionsUpdateTest, testPartitionsUpdate) { - testPartitionsUpdate(false, "public/default/partitions-update-test-topic"); -} - -TEST(PartitionsUpdateTest, testPartitionsUpdateWithLazyProducers) { - testPartitionsUpdate(true, "public/default/partitions-update-test-topic-lazy"); -} diff --git a/pulsar-client-cpp/tests/PeriodicTaskTest.cc b/pulsar-client-cpp/tests/PeriodicTaskTest.cc deleted file mode 100644 index 2c1da70e80e3c..0000000000000 --- a/pulsar-client-cpp/tests/PeriodicTaskTest.cc +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include "lib/ExecutorService.h" -#include "lib/LogUtils.h" -#include "lib/PeriodicTask.h" - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -TEST(PeriodicTaskTest, testCountdownTask) { - auto executor = ExecutorService::create(); - - std::atomic_int count{5}; - - auto task = std::make_shared(executor->getIOService(), 200); - task->setCallback([task, &count](const PeriodicTask::ErrorCode& ec) { - if (--count <= 0) { - task->stop(); - } - LOG_INFO("Now count is " << count << ", error code: " << ec.message()); - }); - - // Wait for 2 seconds to verify callback won't be triggered after 1 second (200 ms * 5) - task->start(); - std::this_thread::sleep_for(std::chrono::seconds(2)); - LOG_INFO("Now count is " << count); - ASSERT_EQ(count.load(), 0); - task->stop(); // it's redundant, just to verify multiple stop() is idempotent - - // Test start again - count = 1; - task->start(); - std::this_thread::sleep_for(std::chrono::milliseconds(800)); - LOG_INFO("Now count is " << count); - ASSERT_EQ(count.load(), 0); - task->stop(); - - executor->close(); -} - -TEST(PeriodicTaskTest, testNegativePeriod) { - auto executor = ExecutorService::create(); - - auto task = std::make_shared(executor->getIOService(), -1); - std::atomic_bool callbackTriggered{false}; - task->setCallback([&callbackTriggered](const PeriodicTask::ErrorCode& ec) { callbackTriggered = true; }); - - task->start(); - std::this_thread::sleep_for(std::chrono::seconds(1)); - ASSERT_EQ(callbackTriggered.load(), false); - task->stop(); - - executor->close(); -} diff --git a/pulsar-client-cpp/tests/ProducerConfigurationTest.cc b/pulsar-client-cpp/tests/ProducerConfigurationTest.cc deleted file mode 100644 index 5c541295ff966..0000000000000 --- a/pulsar-client-cpp/tests/ProducerConfigurationTest.cc +++ /dev/null @@ -1,136 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include "NoOpsCryptoKeyReader.h" - -using namespace pulsar; - -TEST(ProducerConfigurationTest, testDefaultConfig) { - ProducerConfiguration conf; - ASSERT_EQ(conf.getProducerName(), ""); - ASSERT_EQ(conf.getSchema().getName(), "BYTES"); - ASSERT_EQ(conf.getSchema().getSchemaType(), SchemaType::BYTES); - ASSERT_EQ(conf.getSendTimeout(), 30000); - ASSERT_EQ(conf.getInitialSequenceId(), -1ll); - ASSERT_EQ(conf.getCompressionType(), CompressionType::CompressionNone); - ASSERT_EQ(conf.getMaxPendingMessages(), 1000); - ASSERT_EQ(conf.getMaxPendingMessagesAcrossPartitions(), 50000); - ASSERT_EQ(conf.getPartitionsRoutingMode(), ProducerConfiguration::UseSinglePartition); - ASSERT_EQ(conf.getMessageRouterPtr(), MessageRoutingPolicyPtr{}); - ASSERT_EQ(conf.getHashingScheme(), ProducerConfiguration::BoostHash); - ASSERT_EQ(conf.getBlockIfQueueFull(), false); - ASSERT_EQ(conf.getBatchingEnabled(), true); - ASSERT_EQ(conf.getBatchingMaxMessages(), 1000); - ASSERT_EQ(conf.getBatchingMaxAllowedSizeInBytes(), 128 * 1024); - ASSERT_EQ(conf.getBatchingMaxPublishDelayMs(), 10); - ASSERT_EQ(conf.getBatchingType(), ProducerConfiguration::DefaultBatching); - ASSERT_EQ(conf.getCryptoKeyReader(), CryptoKeyReaderPtr{}); - ASSERT_EQ(conf.getCryptoFailureAction(), ProducerCryptoFailureAction::FAIL); - ASSERT_EQ(conf.isEncryptionEnabled(), false); - ASSERT_EQ(conf.getEncryptionKeys(), std::set{}); - ASSERT_EQ(conf.getProperties().empty(), true); - ASSERT_EQ(conf.isChunkingEnabled(), false); -} - -class MockMessageRoutingPolicy : public MessageRoutingPolicy { - public: - int getPartition(const Message& msg) override { return 0; } - int getPartition(const Message& msg, const TopicMetadata& topicMetadata) override { return 0; } -}; - -TEST(ProducerConfigurationTest, testCustomConfig) { - ProducerConfiguration conf; - - conf.setProducerName("producer"); - ASSERT_EQ(conf.getProducerName(), "producer"); - - const std::string exampleSchema = - "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\"," - "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}"; - const SchemaInfo schema(AVRO, "Avro", exampleSchema, StringMap{{"schema-key", "schema-value"}}); - - conf.setSchema(schema); - ASSERT_EQ(conf.getSchema().getName(), schema.getName()); - ASSERT_EQ(conf.getSchema().getSchemaType(), schema.getSchemaType()); - ASSERT_EQ(conf.getSchema().getSchema(), schema.getSchema()); - ASSERT_EQ(conf.getSchema().getProperties(), schema.getProperties()); - - conf.setSendTimeout(0); - ASSERT_EQ(conf.getSendTimeout(), 0); - - conf.setInitialSequenceId(100ll); - ASSERT_EQ(conf.getInitialSequenceId(), 100ll); - - conf.setCompressionType(CompressionType::CompressionLZ4); - ASSERT_EQ(conf.getCompressionType(), CompressionType::CompressionLZ4); - - conf.setMaxPendingMessages(2000); - ASSERT_EQ(conf.getMaxPendingMessages(), 2000); - - conf.setMaxPendingMessagesAcrossPartitions(100000); - ASSERT_EQ(conf.getMaxPendingMessagesAcrossPartitions(), 100000); - - conf.setPartitionsRoutingMode(ProducerConfiguration::RoundRobinDistribution); - ASSERT_EQ(conf.getPartitionsRoutingMode(), ProducerConfiguration::RoundRobinDistribution); - - const auto router = std::make_shared(); - conf.setMessageRouter(router); - ASSERT_EQ(conf.getPartitionsRoutingMode(), ProducerConfiguration::CustomPartition); - ASSERT_EQ(conf.getMessageRouterPtr(), router); - - conf.setHashingScheme(ProducerConfiguration::JavaStringHash); - ASSERT_EQ(conf.getHashingScheme(), ProducerConfiguration::JavaStringHash); - - conf.setBlockIfQueueFull(true); - ASSERT_EQ(conf.getBlockIfQueueFull(), true); - - conf.setBatchingEnabled(false); - ASSERT_EQ(conf.getBatchingEnabled(), false); - - conf.setBatchingMaxMessages(2000); - ASSERT_EQ(conf.getBatchingMaxMessages(), 2000); - - conf.setBatchingMaxAllowedSizeInBytes(1024); - ASSERT_EQ(conf.getBatchingMaxAllowedSizeInBytes(), 1024); - - conf.setBatchingMaxPublishDelayMs(1); - ASSERT_EQ(conf.getBatchingMaxPublishDelayMs(), 1); - - conf.setBatchingType(ProducerConfiguration::KeyBasedBatching); - ASSERT_EQ(conf.getBatchingType(), ProducerConfiguration::KeyBasedBatching); - - const auto cryptoKeyReader = std::make_shared(); - conf.setCryptoKeyReader(cryptoKeyReader); - ASSERT_EQ(conf.getCryptoKeyReader(), cryptoKeyReader); - - conf.setCryptoFailureAction(pulsar::ProducerCryptoFailureAction::SEND); - ASSERT_EQ(conf.getCryptoFailureAction(), ProducerCryptoFailureAction::SEND); - - conf.addEncryptionKey("key"); - ASSERT_EQ(conf.getEncryptionKeys(), std::set{"key"}); - ASSERT_EQ(conf.isEncryptionEnabled(), true); - - conf.setProperty("k1", "v1"); - ASSERT_EQ(conf.getProperties()["k1"], "v1"); - ASSERT_EQ(conf.hasProperty("k1"), true); - - conf.setChunkingEnabled(true); - ASSERT_EQ(conf.isChunkingEnabled(), true); -} diff --git a/pulsar-client-cpp/tests/ProducerTest.cc b/pulsar-client-cpp/tests/ProducerTest.cc deleted file mode 100644 index d351ee9cdbc57..0000000000000 --- a/pulsar-client-cpp/tests/ProducerTest.cc +++ /dev/null @@ -1,298 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include - -#include "HttpHelper.h" - -#include "lib/Future.h" -#include "lib/Utils.h" -#include "lib/Latch.h" -#include "lib/LogUtils.h" -#include "lib/ProducerImpl.h" -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -static const std::string serviceUrl = "pulsar://localhost:6650"; -static const std::string adminUrl = "http://localhost:8080/"; - -// See the `maxMessageSize` config in test-conf/standalone-ssl.conf -static constexpr size_t maxMessageSize = 1024000; - -TEST(ProducerTest, producerNotInitialized) { - Producer producer; - - Message msg = MessageBuilder().setContent("test").build(); - - ASSERT_EQ(ResultProducerNotInitialized, producer.send(msg)); - - Promise promise; - producer.sendAsync(msg, WaitForCallbackValue(promise)); - - MessageId mi; - ASSERT_EQ(ResultProducerNotInitialized, promise.getFuture().get(mi)); - - ASSERT_EQ(ResultProducerNotInitialized, producer.close()); - - Promise promiseClose; - producer.closeAsync(WaitForCallback(promiseClose)); - - Result result; - promiseClose.getFuture().get(result); - ASSERT_EQ(ResultProducerNotInitialized, result); - - ASSERT_TRUE(producer.getTopic().empty()); -} - -TEST(ProducerTest, exactlyOnceWithProducerNameSpecified) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/exactlyOnceWithProducerNameSpecified"; - - Producer producer1; - ProducerConfiguration producerConfiguration1; - producerConfiguration1.setProducerName("p-name-1"); - - ASSERT_EQ(ResultOk, client.createProducer(topicName, producerConfiguration1, producer1)); - - Producer producer2; - ProducerConfiguration producerConfiguration2; - producerConfiguration2.setProducerName("p-name-2"); - ASSERT_EQ(ResultOk, client.createProducer(topicName, producerConfiguration2, producer2)); - - Producer producer3; - Result result = client.createProducer(topicName, producerConfiguration2, producer3); - ASSERT_EQ(ResultProducerBusy, result); -} - -TEST(ProducerTest, testSynchronouslySend) { - Client client(serviceUrl); - const std::string topic = "ProducerTestSynchronouslySend"; - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topic, "sub-name", consumer)); - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topic, producer)); - MessageId messageId; - ASSERT_EQ(ResultOk, producer.send(MessageBuilder().setContent("hello").build(), messageId)); - LOG_INFO("Send message to " << messageId); - - Message receivedMessage; - ASSERT_EQ(ResultOk, consumer.receive(receivedMessage, 3000)); - LOG_INFO("Received message from " << receivedMessage.getMessageId()); - ASSERT_EQ(receivedMessage.getMessageId(), messageId); - ASSERT_EQ(ResultOk, consumer.acknowledge(receivedMessage)); - - client.close(); -} - -TEST(ProducerTest, testIsConnected) { - Client client(serviceUrl); - const std::string nonPartitionedTopic = - "testProducerIsConnectedNonPartitioned-" + std::to_string(time(nullptr)); - const std::string partitionedTopic = - "testProducerIsConnectedPartitioned-" + std::to_string(time(nullptr)); - - Producer producer; - ASSERT_FALSE(producer.isConnected()); - // ProducerImpl - ASSERT_EQ(ResultOk, client.createProducer(nonPartitionedTopic, producer)); - ASSERT_TRUE(producer.isConnected()); - ASSERT_EQ(ResultOk, producer.close()); - ASSERT_FALSE(producer.isConnected()); - - int res = makePutRequest( - adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", "2"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - // PartitionedProducerImpl - ASSERT_EQ(ResultOk, client.createProducer(partitionedTopic, producer)); - ASSERT_TRUE(producer.isConnected()); - ASSERT_EQ(ResultOk, producer.close()); - ASSERT_FALSE(producer.isConnected()); - - client.close(); -} - -TEST(ProducerTest, testSendAsyncAfterCloseAsyncWithLazyProducers) { - Client client(serviceUrl); - const std::string partitionedTopic = - "testProducerIsConnectedPartitioned-" + std::to_string(time(nullptr)); - - int res = makePutRequest( - adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", "10"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - ProducerConfiguration producerConfiguration; - producerConfiguration.setLazyStartPartitionedProducers(true); - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(partitionedTopic, producerConfiguration, producer)); - - Message msg = MessageBuilder().setContent("test").build(); - - Promise promiseClose; - producer.closeAsync(WaitForCallback(promiseClose)); - - Promise promise; - producer.sendAsync(msg, WaitForCallbackValue(promise)); - - MessageId mi; - ASSERT_EQ(ResultAlreadyClosed, promise.getFuture().get(mi)); - - Result result; - promiseClose.getFuture().get(result); - ASSERT_EQ(ResultOk, result); -} - -TEST(ProducerTest, testGetNumOfChunks) { - ASSERT_EQ(ProducerImpl::getNumOfChunks(11, 5), 3); - ASSERT_EQ(ProducerImpl::getNumOfChunks(10, 5), 2); - ASSERT_EQ(ProducerImpl::getNumOfChunks(8, 5), 2); - ASSERT_EQ(ProducerImpl::getNumOfChunks(4, 5), 1); - ASSERT_EQ(ProducerImpl::getNumOfChunks(1, 0), 1); -} - -TEST(ProducerTest, testBacklogQuotasExceeded) { - std::string ns = "public/test-backlog-quotas"; - std::string topic = ns + "/testBacklogQuotasExceeded" + std::to_string(time(nullptr)); - - int res = makePutRequest(adminUrl + "admin/v2/persistent/" + topic + "/partitions", "5"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - LOG_INFO("Created topic " << topic << " with 5 partitions"); - - auto setBacklogPolicy = [&ns](const std::string& policy, int limitSize) { - const auto body = R"({"policy":")" + policy + R"(","limitSize":)" + std::to_string(limitSize) + "}"; - int res = makePostRequest(adminUrl + "admin/v2/namespaces/" + ns + "/backlogQuota", body); - LOG_INFO(res << " | Change the backlog policy to: " << body); - ASSERT_TRUE(res == 204 || res == 409); - std::this_thread::sleep_for(std::chrono::seconds(1)); - }; - - Client client(serviceUrl); - - // Create a topic with backlog size that is greater than 1024 - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topic, "sub", consumer)); // create a cursor - Producer producer; - - const auto partition = topic + "-partition-0"; - ASSERT_EQ(ResultOk, client.createProducer(partition, producer)); - ASSERT_EQ(ResultOk, producer.send(MessageBuilder().setContent(std::string(1024L, 'a')).build())); - ASSERT_EQ(ResultOk, producer.close()); - - setBacklogPolicy("producer_request_hold", 1024); - ASSERT_EQ(ResultProducerBlockedQuotaExceededError, client.createProducer(topic, producer)); - ASSERT_EQ(ResultProducerBlockedQuotaExceededError, client.createProducer(partition, producer)); - - setBacklogPolicy("producer_exception", 1024); - ASSERT_EQ(ResultProducerBlockedQuotaExceededException, client.createProducer(topic, producer)); - ASSERT_EQ(ResultProducerBlockedQuotaExceededException, client.createProducer(partition, producer)); - - setBacklogPolicy("consumer_backlog_eviction", 1024); - ASSERT_EQ(ResultOk, client.createProducer(topic, producer)); - ASSERT_EQ(ResultOk, client.createProducer(partition, producer)); - - client.close(); -} - -class ProducerTest : public ::testing::TestWithParam {}; - -TEST_P(ProducerTest, testMaxMessageSize) { - Client client(serviceUrl); - - const std::string topic = "ProducerTest-NoBatchMaxMessageSize-" + std::to_string(time(nullptr)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topic, "sub", consumer)); - - Producer producer; - ProducerConfiguration conf; - conf.setBatchingEnabled(GetParam()); - ASSERT_EQ(ResultOk, client.createProducer(topic, conf, producer)); - - std::string msg = std::string(maxMessageSize / 2, 'a'); - ASSERT_EQ(ResultOk, producer.send(MessageBuilder().setContent(msg).build())); - Message message; - ASSERT_EQ(ResultOk, consumer.receive(message)); - ASSERT_EQ(msg, message.getDataAsString()); - - std::string orderKey = std::string(maxMessageSize, 'a'); - ASSERT_EQ(ResultMessageTooBig, producer.send(MessageBuilder().setOrderingKey(orderKey).build())); - - ASSERT_EQ(ResultMessageTooBig, - producer.send(MessageBuilder().setContent(std::string(maxMessageSize, 'b')).build())); - - client.close(); -} - -TEST_P(ProducerTest, testChunkingMaxMessageSize) { - Client client(serviceUrl); - - const std::string topic = "ProducerTest-ChunkingMaxMessageSize-" + std::to_string(time(nullptr)); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topic, "sub", consumer)); - - Producer producer; - ProducerConfiguration conf; - conf.setBatchingEnabled(false); - conf.setChunkingEnabled(true); - ASSERT_EQ(ResultOk, client.createProducer(topic, conf, producer)); - - std::string orderKey = std::string(maxMessageSize, 'a'); - ASSERT_EQ(ResultMessageTooBig, producer.send(MessageBuilder().setOrderingKey(orderKey).build())); - - std::string msg = std::string(2 * maxMessageSize + 10, 'b'); - Message message; - ASSERT_EQ(ResultOk, producer.send(MessageBuilder().setContent(msg).build())); - ASSERT_EQ(ResultOk, consumer.receive(message)); - ASSERT_EQ(msg, message.getDataAsString()); - ASSERT_LE(1L, message.getMessageId().entryId()); - - client.close(); -} - -TEST(ProducerTest, testExclusiveProducer) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testExclusiveProducer"; - - Producer producer1; - ProducerConfiguration producerConfiguration1; - producerConfiguration1.setProducerName("p-name-1"); - producerConfiguration1.setAccessMode(ProducerConfiguration::Exclusive); - - ASSERT_EQ(ResultOk, client.createProducer(topicName, producerConfiguration1, producer1)); - - Producer producer2; - ProducerConfiguration producerConfiguration2; - producerConfiguration2.setProducerName("p-name-2"); - producerConfiguration2.setAccessMode(ProducerConfiguration::Exclusive); - ASSERT_EQ(ResultProducerFenced, client.createProducer(topicName, producerConfiguration2, producer2)); - - Producer producer3; - ProducerConfiguration producerConfiguration3; - producerConfiguration3.setProducerName("p-name-3"); - ASSERT_EQ(ResultProducerBusy, client.createProducer(topicName, producerConfiguration3, producer3)); -} - -INSTANTIATE_TEST_CASE_P(Pulsar, ProducerTest, ::testing::Values(true, false)); diff --git a/pulsar-client-cpp/tests/PromiseTest.cc b/pulsar-client-cpp/tests/PromiseTest.cc deleted file mode 100644 index 73c6f8c230846..0000000000000 --- a/pulsar-client-cpp/tests/PromiseTest.cc +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include -#include - -using namespace pulsar; - -TEST(PromiseTest, testSetValue) { - Promise promise; - std::thread t{[promise] { - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - promise.setValue("hello"); - }}; - t.detach(); - - std::string value; - ASSERT_EQ(promise.getFuture().get(value), 0); - ASSERT_EQ(value, "hello"); -} - -TEST(PromiseTest, testSetFailed) { - Promise promise; - std::thread t{[promise] { - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - promise.setFailed(-1); - }}; - t.detach(); - - std::string value; - ASSERT_EQ(promise.getFuture().get(value), -1); - ASSERT_EQ(value, ""); -} - -TEST(PromiseTest, testListeners) { - Promise promise; - auto future = promise.getFuture(); - - bool resultSetFailed = true; - bool resultSetValue = true; - std::vector results; - std::vector values; - - future - .addListener([promise, &resultSetFailed, &results, &values](int result, const std::string& value) { - resultSetFailed = promise.setFailed(-1L); - results.emplace_back(result); - values.emplace_back(value); - }) - .addListener([promise, &resultSetValue, &results, &values](int result, const std::string& value) { - resultSetValue = promise.setValue("WRONG"); - results.emplace_back(result); - values.emplace_back(value); - }); - - promise.setValue("hello"); - std::string value; - ASSERT_EQ(future.get(value), 0); - ASSERT_EQ(value, "hello"); - - ASSERT_FALSE(resultSetFailed); - ASSERT_FALSE(resultSetValue); - ASSERT_EQ(results, (std::vector(2, 0))); - ASSERT_EQ(values, (std::vector(2, "hello"))); -} diff --git a/pulsar-client-cpp/tests/ProtobufNativeSchemaTest.cc b/pulsar-client-cpp/tests/ProtobufNativeSchemaTest.cc deleted file mode 100644 index df1f9c6055782..0000000000000 --- a/pulsar-client-cpp/tests/ProtobufNativeSchemaTest.cc +++ /dev/null @@ -1,146 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include "PaddingDemo.pb.h" -#include "Test.pb.h" // generated from "pulsar-client/src/test/proto/Test.proto" - -using namespace pulsar; - -static std::string lookupUrl = "pulsar://localhost:6650"; - -TEST(ProtobufNativeSchemaTest, testSchemaJson) { - const std::string expectedSchemaJson = - "{\"fileDescriptorSet\":" - "\"CtMDCgpUZXN0LnByb3RvEgVwcm90bxoSRXh0ZXJuYWxUZXN0LnByb3RvImUKClN1Yk1lc3NhZ2USCwoDZm9vGAEgASgJEgsKA2" - "JhchgCIAEoARo9Cg1OZXN0ZWRNZXNzYWdlEgsKA3VybBgBIAEoCRINCgV0aXRsZRgCIAEoCRIQCghzbmlwcGV0cxgDIAMoCSLlAQ" - "oLVGVzdE1lc3NhZ2USEwoLc3RyaW5nRmllbGQYASABKAkSEwoLZG91YmxlRmllbGQYAiABKAESEAoIaW50RmllbGQYBiABKAUSIQ" - "oIdGVzdEVudW0YBCABKA4yDy5wcm90by5UZXN0RW51bRImCgtuZXN0ZWRGaWVsZBgFIAEoCzIRLnByb3RvLlN1Yk1lc3NhZ2USFQ" - "oNcmVwZWF0ZWRGaWVsZBgKIAMoCRI4Cg9leHRlcm5hbE1lc3NhZ2UYCyABKAsyHy5wcm90by5leHRlcm5hbC5FeHRlcm5hbE1lc3" - "NhZ2UqJAoIVGVzdEVudW0SCgoGU0hBUkVEEAASDAoIRkFJTE9WRVIQAUItCiVvcmcuYXBhY2hlLnB1bHNhci5jbGllbnQuc2NoZW" - "1hLnByb3RvQgRUZXN0YgZwcm90bzMKoAEKEkV4dGVybmFsVGVzdC5wcm90bxIOcHJvdG8uZXh0ZXJuYWwiOwoPRXh0ZXJuYWxNZX" - "NzYWdlEhMKC3N0cmluZ0ZpZWxkGAEgASgJEhMKC2RvdWJsZUZpZWxkGAIgASgBQjUKJW9yZy5hcGFjaGUucHVsc2FyLmNsaWVudC" - "5zY2hlbWEucHJvdG9CDEV4dGVybmFsVGVzdGIGcHJvdG8z\",\"rootMessageTypeName\":\"proto.TestMessage\"," - "\"rootFileDescriptorName\":\"Test.proto\"}"; - const auto schemaInfo = createProtobufNativeSchema(::proto::TestMessage::GetDescriptor()); - - ASSERT_EQ(schemaInfo.getSchemaType(), pulsar::PROTOBUF_NATIVE); - ASSERT_TRUE(schemaInfo.getName().empty()); - ASSERT_EQ(schemaInfo.getSchema(), expectedSchemaJson); - ASSERT_TRUE(schemaInfo.getProperties().empty()); -} - -TEST(ProtobufNativeSchemaTest, testAutoCreateSchema) { - const std::string topicPrefix = "ProtobufNativeSchemaTest-testAutoCreateSchema-"; - Client client(lookupUrl); - - const auto schemaInfo = createProtobufNativeSchema(::proto::TestMessage::GetDescriptor()); - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicPrefix + "producer", - ProducerConfiguration().setSchema(schemaInfo), producer)); - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topicPrefix + "consumer", "my-sub", - ConsumerConfiguration().setSchema(schemaInfo), consumer)); - client.close(); -} - -TEST(ProtobufNativeSchemaTest, testSchemaIncompatibility) { - const std::string topic = "ProtobufNativeSchemaTest-testSchemaIncompatibility"; - Client client(lookupUrl); - - Producer producer; - auto createProducerResult = [&](const google::protobuf::Descriptor* descriptor) { - return client.createProducer( - topic, ProducerConfiguration().setSchema(createProtobufNativeSchema(descriptor)), producer); - }; - - // Create the protobuf native schema automatically - ASSERT_EQ(ResultOk, createProducerResult(::proto::TestMessage::GetDescriptor())); - producer.close(); - - // Try to create producer with another protobuf generated class - ASSERT_EQ(ResultIncompatibleSchema, - createProducerResult(::proto::external::ExternalMessage::GetDescriptor())); - - // Try to create producer with the original schema again - ASSERT_EQ(ResultOk, createProducerResult(::proto::TestMessage::GetDescriptor())); - - // createProtobufNativeSchema() cannot accept a null descriptor - try { - createProducerResult(nullptr); - } catch (const std::invalid_argument& e) { - ASSERT_STREQ(e.what(), "descriptor is null"); - } - - client.close(); -} - -TEST(ProtobufNativeSchemaTest, testEndToEnd) { - const std::string topic = "ProtobufSchemaTest-testEndToEnd"; - Client client(lookupUrl); - - const auto schemaInfo = createProtobufNativeSchema(::proto::TestMessage::GetDescriptor()); - Consumer consumer; - ASSERT_EQ(ResultOk, - client.subscribe(topic, "my-sub", ConsumerConfiguration().setSchema(schemaInfo), consumer)); - Producer producer; - ASSERT_EQ(ResultOk, - client.createProducer(topic, ProducerConfiguration().setSchema(schemaInfo), producer)); - - // Send a message that is serialized from a ProtoBuf class - ::proto::TestMessage testMessage; - testMessage.set_testenum(::proto::TestEnum::FAILOVER); - std::string content(testMessage.ByteSizeLong(), '\0'); - testMessage.SerializeToArray(const_cast(content.data()), content.size()); - ASSERT_EQ(ResultOk, producer.send(MessageBuilder().setContent(content).build())); - - // Receive a message and parse it to the ProtoBuf class - ::proto::TestMessage receivedTestMessage; - ASSERT_EQ(receivedTestMessage.testenum(), ::proto::TestEnum::SHARED); - - Message msg; - ASSERT_EQ(ResultOk, consumer.receive(msg, 3000)); - receivedTestMessage.ParseFromArray(msg.getData(), msg.getLength()); - ASSERT_EQ(receivedTestMessage.testenum(), ::proto::TestEnum::FAILOVER); - - ASSERT_TRUE(msg.hasSchemaVersion()); - ASSERT_EQ(msg.getSchemaVersion(), std::string(8L, '\0')); - - client.close(); -} - -TEST(ProtobufNativeSchemaTest, testBase64WithPadding) { - const auto schemaInfo = createProtobufNativeSchema(::padding::demo::Person::GetDescriptor()); - const auto schemaJson = schemaInfo.getSchema(); - size_t pos = schemaJson.find(R"(","rootMessageTypeName":)"); - ASSERT_NE(pos, std::string::npos); - ASSERT_TRUE(pos > 0); - ASSERT_EQ(schemaJson[pos - 1], '='); // the tail of fileDescriptorSet is a padding character - - Client client(lookupUrl); - - const std::string topic = "ProtobufSchemaTest-testBase64WithPadding"; - Producer producer; - ASSERT_EQ(ResultOk, - client.createProducer(topic, ProducerConfiguration().setSchema(schemaInfo), producer)); - - client.close(); -} diff --git a/pulsar-client-cpp/tests/PulsarFriend.h b/pulsar-client-cpp/tests/PulsarFriend.h deleted file mode 100644 index d9f9923c7ce2d..0000000000000 --- a/pulsar-client-cpp/tests/PulsarFriend.h +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include - -#include "lib/ClientImpl.h" -#include "lib/ProducerImpl.h" -#include "lib/PartitionedProducerImpl.h" -#include "lib/ConsumerImpl.h" -#include "lib/MultiTopicsConsumerImpl.h" -#include "lib/ReaderImpl.h" -#include "lib/RetryableLookupService.h" - -using std::string; - -namespace pulsar { -class PulsarFriend { - public: - static MessageId getMessageId(int32_t partition, int64_t ledgerId, int64_t entryId, int32_t batchIndex) { - return MessageId(partition, ledgerId, entryId, batchIndex); - } - - static int getBatchIndex(const MessageId& mId) { return mId.batchIndex(); } - - static ProducerStatsImplPtr getProducerStatsPtr(Producer producer) { - ProducerImpl* producerImpl = static_cast(producer.impl_.get()); - return std::static_pointer_cast(producerImpl->producerStatsBasePtr_); - } - - template - static unsigned long sum(std::map m) { - unsigned long sum = 0; - for (typename std::map::iterator iter = m.begin(); iter != m.end(); iter++) { - sum += iter->second; - } - return sum; - } - - static ConsumerStatsImplPtr getConsumerStatsPtr(Consumer consumer) { - ConsumerImpl* consumerImpl = static_cast(consumer.impl_.get()); - return std::static_pointer_cast(consumerImpl->consumerStatsBasePtr_); - } - - static ProducerImpl& getProducerImpl(Producer producer) { - ProducerImpl* producerImpl = static_cast(producer.impl_.get()); - return *producerImpl; - } - - static ProducerImpl& getInternalProducerImpl(Producer producer, int index) { - PartitionedProducerImpl* producerImpl = static_cast(producer.impl_.get()); - return *(producerImpl->producers_[index]); - } - - static void producerFailMessages(Producer producer, Result result) { - producer.producerFailMessages(result); - } - - static ConsumerImpl& getConsumerImpl(Consumer consumer) { - ConsumerImpl* consumerImpl = static_cast(consumer.impl_.get()); - return *consumerImpl; - } - - static std::shared_ptr getConsumerImplPtr(Consumer consumer) { - return std::static_pointer_cast(consumer.impl_); - } - - static ConsumerImplPtr getConsumer(Reader reader) { - return std::static_pointer_cast(reader.impl_->getConsumer().lock()); - } - - static ReaderImplWeakPtr getReaderImplWeakPtr(Reader reader) { return reader.impl_; } - - static decltype(ConsumerImpl::chunkedMessageCache_)& getChunkedMessageCache(Consumer consumer) { - auto consumerImpl = getConsumerImplPtr(consumer); - ConsumerImpl::Lock lock(consumerImpl->chunkProcessMutex_); - return consumerImpl->chunkedMessageCache_; - } - - static std::shared_ptr getMultiTopicsConsumerImplPtr(Consumer consumer) { - return std::static_pointer_cast(consumer.impl_); - } - - static std::shared_ptr getClientImplPtr(Client client) { return client.impl_; } - - static ClientImpl::ProducersList& getProducers(const Client& client) { - return getClientImplPtr(client)->producers_; - } - - static ClientImpl::ConsumersList& getConsumers(const Client& client) { - return getClientImplPtr(client)->consumers_; - } - - static void setNegativeAckEnabled(Consumer consumer, bool enabled) { - consumer.impl_->setNegativeAcknowledgeEnabledForTesting(enabled); - } - - static ClientConnectionWeakPtr getClientConnection(HandlerBase& handler) { return handler.connection_; } - - static void setClientConnection(HandlerBase& handler, ClientConnectionWeakPtr conn) { - handler.connection_ = conn; - } - - static boost::posix_time::ptime& getFirstBackoffTime(Backoff& backoff) { - return backoff.firstBackoffTime_; - } - - static void setServiceUrlIndex(ServiceNameResolver& resolver, size_t index) { resolver.index_ = index; } - - static void setServiceUrlIndex(const Client& client, size_t index) { - setServiceUrlIndex(client.impl_->serviceNameResolver_, index); - } - - static size_t getNumberOfPendingTasks(const RetryableLookupService& lookupService) { - return lookupService.backoffTimers_.size(); - } -}; -} // namespace pulsar diff --git a/pulsar-client-cpp/tests/ReaderConfigurationTest.cc b/pulsar-client-cpp/tests/ReaderConfigurationTest.cc deleted file mode 100644 index 8dc60f44f5822..0000000000000 --- a/pulsar-client-cpp/tests/ReaderConfigurationTest.cc +++ /dev/null @@ -1,126 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/** - * This test only tests the ConsumerConfiguration used for the Reader's internal consumer. - * Because the ReaderConfiguration for Reader itself is meaningless. - */ -#include -#include -#include -#include "NoOpsCryptoKeyReader.h" - -using namespace pulsar; - -static const std::string lookupUrl = "pulsar://localhost:6650"; - -TEST(ReaderConfigurationTest, testDefaultConfig) { - const std::string topic = "ReaderConfigurationTest-default-config"; - Client client(lookupUrl); - ReaderConfiguration readerConf; - Reader reader; - - std::unique_lock lock(test::readerConfigTestMutex); - test::readerConfigTestEnabled = true; - ASSERT_EQ(ResultOk, client.createReader(topic, MessageId::earliest(), readerConf, reader)); - const auto consumerConf = test::consumerConfigOfReader.clone(); - test::readerConfigTestEnabled = false; - lock.unlock(); - - ASSERT_EQ(consumerConf.getConsumerType(), ConsumerExclusive); - ASSERT_EQ(consumerConf.getReceiverQueueSize(), 1000); - ASSERT_EQ(consumerConf.isReadCompacted(), false); - ASSERT_EQ(consumerConf.getSchema().getName(), "BYTES"); - ASSERT_EQ(consumerConf.getUnAckedMessagesTimeoutMs(), 0); - ASSERT_EQ(consumerConf.getTickDurationInMs(), 1000); - ASSERT_EQ(consumerConf.getAckGroupingTimeMs(), 100); - ASSERT_EQ(consumerConf.getAckGroupingMaxSize(), 1000); - ASSERT_EQ(consumerConf.getCryptoKeyReader().get(), nullptr); - ASSERT_EQ(consumerConf.getCryptoFailureAction(), ConsumerCryptoFailureAction::FAIL); - ASSERT_TRUE(consumerConf.getProperties().empty()); - ASSERT_TRUE(consumerConf.getConsumerName().empty()); - ASSERT_FALSE(consumerConf.hasMessageListener()); - - client.close(); -} - -TEST(ReaderConfigurationTest, testCustomConfig) { - const std::string exampleSchema = - "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\"," - "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}"; - - const std::string topic = "ReaderConfigurationTest-custom-config"; - Client client(lookupUrl); - - const SchemaInfo schema(AVRO, "Avro", exampleSchema, StringMap{{"schema-key", "schema-value"}}); - - ProducerConfiguration producerConf; - producerConf.setSchema(schema); - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topic, producerConf, producer)); - ASSERT_FALSE(producer.getSchemaVersion().empty()); - - ReaderConfiguration readerConf; - readerConf.setSchema(schema); - readerConf.setReaderListener([](Reader, const Message&) {}); - readerConf.setReceiverQueueSize(2000); - readerConf.setReaderName("my-reader"); - readerConf.setReadCompacted(true); - readerConf.setUnAckedMessagesTimeoutMs(11000); - readerConf.setTickDurationInMs(2000); - readerConf.setAckGroupingTimeMs(0); - readerConf.setAckGroupingMaxSize(4096); - const auto cryptoReader = std::make_shared(); - readerConf.setCryptoKeyReader(cryptoReader); - readerConf.setCryptoFailureAction(ConsumerCryptoFailureAction::DISCARD); - const std::map properties{{"key-1", "value-1"}, {"key-2", "value-2"}}; - readerConf.setProperties(properties); - - Reader reader; - std::unique_lock lock(test::readerConfigTestMutex); - test::readerConfigTestEnabled = true; - ASSERT_EQ(ResultOk, client.createReader(topic, MessageId::earliest(), readerConf, reader)); - const auto consumerConf = test::consumerConfigOfReader.clone(); - test::readerConfigTestEnabled = false; - lock.unlock(); - - ASSERT_EQ(consumerConf.getSchema().getName(), schema.getName()); - ASSERT_EQ(consumerConf.getSchema().getSchemaType(), schema.getSchemaType()); - ASSERT_EQ(consumerConf.getSchema().getSchema(), schema.getSchema()); - ASSERT_EQ(consumerConf.getSchema().getProperties(), schema.getProperties()); - - ASSERT_EQ(consumerConf.getConsumerType(), ConsumerExclusive); - ASSERT_TRUE(consumerConf.hasMessageListener()); - ASSERT_EQ(consumerConf.getReceiverQueueSize(), 2000); - ASSERT_EQ(consumerConf.getConsumerName(), "my-reader"); - ASSERT_EQ(consumerConf.isReadCompacted(), true); - ASSERT_EQ(consumerConf.getUnAckedMessagesTimeoutMs(), 11000); - ASSERT_EQ(consumerConf.getTickDurationInMs(), 2000); - ASSERT_EQ(consumerConf.getAckGroupingTimeMs(), 0); - ASSERT_EQ(consumerConf.getAckGroupingMaxSize(), 4096); - ASSERT_EQ(consumerConf.getCryptoKeyReader(), cryptoReader); - ASSERT_EQ(consumerConf.getCryptoFailureAction(), ConsumerCryptoFailureAction::DISCARD); - ASSERT_EQ(consumerConf.getProperties(), properties); - ASSERT_TRUE(consumerConf.hasProperty("key-1")); - ASSERT_EQ(consumerConf.getProperty("key-1"), "value-1"); - ASSERT_TRUE(consumerConf.hasProperty("key-2")); - ASSERT_EQ(consumerConf.getProperty("key-2"), "value-2"); - ASSERT_FALSE(consumerConf.hasProperty("key-3")); - - client.close(); -} diff --git a/pulsar-client-cpp/tests/ReaderTest.cc b/pulsar-client-cpp/tests/ReaderTest.cc deleted file mode 100644 index 799702f9b4c11..0000000000000 --- a/pulsar-client-cpp/tests/ReaderTest.cc +++ /dev/null @@ -1,608 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include "HttpHelper.h" -#include "PulsarFriend.h" - -#include - -#include -#include - -#include -#include -#include -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -static std::string serviceUrl = "pulsar://localhost:6650"; -static const std::string adminUrl = "http://localhost:8080/"; - -TEST(ReaderTest, testSimpleReader) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/test-simple-reader"; - - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader)); - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicName, producer)); - - for (int i = 0; i < 10; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - for (int i = 0; i < 10; i++) { - Message msg; - ASSERT_EQ(ResultOk, reader.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(i); - ASSERT_EQ(expected, content); - } - - producer.close(); - reader.close(); - client.close(); -} - -TEST(ReaderTest, testReaderAfterMessagesWerePublished) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testReaderAfterMessagesWerePublished"; - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicName, producer)); - - for (int i = 0; i < 10; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader)); - - for (int i = 0; i < 10; i++) { - Message msg; - ASSERT_EQ(ResultOk, reader.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(i); - ASSERT_EQ(expected, content); - } - - producer.close(); - reader.close(); - client.close(); -} - -TEST(ReaderTest, testMultipleReaders) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testMultipleReaders"; - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicName, producer)); - - for (int i = 0; i < 10; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - ReaderConfiguration readerConf; - Reader reader1; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader1)); - - Reader reader2; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader2)); - - for (int i = 0; i < 10; i++) { - Message msg; - ASSERT_EQ(ResultOk, reader1.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(i); - ASSERT_EQ(expected, content); - } - - for (int i = 0; i < 10; i++) { - Message msg; - ASSERT_EQ(ResultOk, reader2.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(i); - ASSERT_EQ(expected, content); - } - - producer.close(); - reader1.close(); - reader2.close(); - client.close(); -} - -TEST(ReaderTest, testReaderOnLastMessage) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testReaderOnLastMessage"; - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicName, producer)); - - for (int i = 0; i < 10; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::latest(), readerConf, reader)); - - for (int i = 10; i < 20; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - for (int i = 10; i < 20; i++) { - Message msg; - ASSERT_EQ(ResultOk, reader.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(i); - ASSERT_EQ(expected, content); - } - - producer.close(); - reader.close(); - client.close(); -} - -TEST(ReaderTest, testReaderOnSpecificMessage) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testReaderOnSpecificMessage"; - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicName, producer)); - - for (int i = 0; i < 10; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader)); - - MessageId lastMessageId; - - for (int i = 0; i < 5; i++) { - Message msg; - ASSERT_EQ(ResultOk, reader.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(i); - ASSERT_EQ(expected, content); - - lastMessageId = msg.getMessageId(); - } - - // Create another reader starting on msgid4 - ASSERT_EQ(ResultOk, client.createReader(topicName, lastMessageId, readerConf, reader)); - - for (int i = 5; i < 10; i++) { - Message msg; - ASSERT_EQ(ResultOk, reader.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(i); - ASSERT_EQ(expected, content); - } - - producer.close(); - reader.close(); - client.close(); -} - -/** - * Test that we can position on a particular message even within a batch - */ -TEST(ReaderTest, testReaderOnSpecificMessageWithBatches) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testReaderOnSpecificMessageWithBatches"; - - Producer producer; - // Enable batching - ProducerConfiguration producerConf; - producerConf.setBatchingEnabled(true); - producerConf.setBatchingMaxPublishDelayMs(1000); - ASSERT_EQ(ResultOk, client.createProducer(topicName, producerConf, producer)); - - for (int i = 0; i < 10; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - producer.sendAsync(msg, NULL); - } - - // Send one sync message, to wait for everything before to be persisted as well - std::string content = "my-message-10"; - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader)); - - std::string lastMessageId; - - for (int i = 0; i < 5; i++) { - Message msg; - ASSERT_EQ(ResultOk, reader.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(i); - ASSERT_EQ(expected, content); - - msg.getMessageId().serialize(lastMessageId); - } - - // Create another reader starting on msgid4 - auto msgId4 = MessageId::deserialize(lastMessageId); - Reader reader2; - ASSERT_EQ(ResultOk, client.createReader(topicName, msgId4, readerConf, reader2)); - - for (int i = 5; i < 11; i++) { - Message msg; - ASSERT_EQ(ResultOk, reader2.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(i); - ASSERT_EQ(expected, content); - } - - producer.close(); - reader.close(); - reader2.close(); - client.close(); -} - -TEST(ReaderTest, testReaderReachEndOfTopic) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testReaderReachEndOfTopic"; - - // 1. create producer - Producer producer; - // Enable batching - ProducerConfiguration producerConf; - producerConf.setBatchingEnabled(true); - producerConf.setBatchingMaxPublishDelayMs(1000); - ASSERT_EQ(ResultOk, client.createProducer(topicName, producerConf, producer)); - - // 2. create reader, and expect hasMessageAvailable return false since no message produced. - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::latest(), readerConf, reader)); - - bool hasMessageAvailable; - ASSERT_EQ(ResultOk, reader.hasMessageAvailable(hasMessageAvailable)); - ASSERT_FALSE(hasMessageAvailable); - - // 3. produce 10 messages. - for (int i = 0; i < 10; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - // 4. expect hasMessageAvailable return true, and after read 10 messages out, it return false. - ASSERT_EQ(ResultOk, reader.hasMessageAvailable(hasMessageAvailable)); - ASSERT_TRUE(hasMessageAvailable); - - int readMessageCount = 0; - for (; hasMessageAvailable; readMessageCount++) { - Message msg; - ASSERT_EQ(ResultOk, reader.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(readMessageCount); - ASSERT_EQ(expected, content); - reader.hasMessageAvailable(hasMessageAvailable); - } - - ASSERT_EQ(readMessageCount, 10); - ASSERT_FALSE(hasMessageAvailable); - - // 5. produce another 10 messages, expect hasMessageAvailable return true, - // and after read these 10 messages out, it return false. - for (int i = 10; i < 20; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - ASSERT_EQ(ResultOk, reader.hasMessageAvailable(hasMessageAvailable)); - ASSERT_TRUE(hasMessageAvailable); - - for (; hasMessageAvailable; readMessageCount++) { - Message msg; - ASSERT_EQ(ResultOk, reader.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(readMessageCount); - ASSERT_EQ(expected, content); - reader.hasMessageAvailable(hasMessageAvailable); - } - ASSERT_EQ(readMessageCount, 20); - ASSERT_FALSE(hasMessageAvailable); - - producer.close(); - reader.close(); - client.close(); -} - -TEST(ReaderTest, testReaderReachEndOfTopicMessageWithoutBatches) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testReaderReachEndOfTopicMessageWithBatches"; - - // 1. create producer - Producer producer; - ProducerConfiguration producerConf; - producerConf.setBatchingEnabled(false); - ASSERT_EQ(ResultOk, client.createProducer(topicName, producerConf, producer)); - - // 2. create reader, and expect hasMessageAvailable return false since no message produced. - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::latest(), readerConf, reader)); - - bool hasMessageAvailable; - ASSERT_EQ(ResultOk, reader.hasMessageAvailable(hasMessageAvailable)); - ASSERT_FALSE(hasMessageAvailable); - - // 3. produce 10 messages in batches way. - for (int i = 0; i < 10; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - producer.sendAsync(msg, NULL); - } - // Send one sync message, to wait for everything before to be persisted as well - std::string content = "my-message-10"; - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - - // 4. expect hasMessageAvailable return true, and after read 11 messages out, it return false. - ASSERT_EQ(ResultOk, reader.hasMessageAvailable(hasMessageAvailable)); - ASSERT_TRUE(hasMessageAvailable); - - std::string lastMessageId; - int readMessageCount = 0; - for (; hasMessageAvailable; readMessageCount++) { - Message msg; - ASSERT_EQ(ResultOk, reader.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(readMessageCount); - ASSERT_EQ(expected, content); - reader.hasMessageAvailable(hasMessageAvailable); - msg.getMessageId().serialize(lastMessageId); - } - ASSERT_FALSE(hasMessageAvailable); - ASSERT_EQ(readMessageCount, 11); - - producer.close(); - reader.close(); - client.close(); -} - -TEST(ReaderTest, testPartitionIndex) { - Client client(serviceUrl); - - const std::string nonPartitionedTopic = "ReaderTestPartitionIndex-topic-" + std::to_string(time(nullptr)); - const std::string partitionedTopic = - "ReaderTestPartitionIndex-par-topic-" + std::to_string(time(nullptr)); - - int res = makePutRequest( - adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", "2"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - const std::string partition0 = partitionedTopic + "-partition-0"; - const std::string partition1 = partitionedTopic + "-partition-1"; - - ReaderConfiguration readerConf; - Reader readers[3]; - ASSERT_EQ(ResultOk, - client.createReader(nonPartitionedTopic, MessageId::earliest(), readerConf, readers[0])); - ASSERT_EQ(ResultOk, client.createReader(partition0, MessageId::earliest(), readerConf, readers[1])); - ASSERT_EQ(ResultOk, client.createReader(partition1, MessageId::earliest(), readerConf, readers[2])); - - Producer producers[3]; - ASSERT_EQ(ResultOk, client.createProducer(nonPartitionedTopic, producers[0])); - ASSERT_EQ(ResultOk, client.createProducer(partition0, producers[1])); - ASSERT_EQ(ResultOk, client.createProducer(partition1, producers[2])); - - for (auto& producer : producers) { - ASSERT_EQ(ResultOk, producer.send(MessageBuilder().setContent("hello").build())); - } - - Message msg; - readers[0].readNext(msg); - ASSERT_EQ(msg.getMessageId().partition(), -1); - readers[1].readNext(msg); - ASSERT_EQ(msg.getMessageId().partition(), 0); - readers[2].readNext(msg); - ASSERT_EQ(msg.getMessageId().partition(), 1); - - client.close(); -} - -TEST(ReaderTest, testSubscriptionNameSetting) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/test-subscription-name-setting"; - std::string subName = "test-sub"; - - ReaderConfiguration readerConf; - readerConf.setInternalSubscriptionName(subName); - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader)); - - ASSERT_EQ(subName, PulsarFriend::getConsumer(reader)->getSubscriptionName()); - - reader.close(); - client.close(); -} - -TEST(ReaderTest, testSetSubscriptionNameAndPrefix) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testSetSubscriptionNameAndPrefix"; - std::string subName = "test-sub"; - - ReaderConfiguration readerConf; - readerConf.setInternalSubscriptionName(subName); - readerConf.setSubscriptionRolePrefix("my-prefix"); - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader)); - - ASSERT_EQ(subName, PulsarFriend::getConsumer(reader)->getSubscriptionName()); - - reader.close(); - client.close(); -} - -TEST(ReaderTest, testMultiSameSubscriptionNameReaderShouldFail) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testMultiSameSubscriptionNameReaderShouldFail"; - std::string subscriptionName = "test-sub"; - - ReaderConfiguration readerConf1; - readerConf1.setInternalSubscriptionName(subscriptionName); - Reader reader1; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf1, reader1)); - - ReaderConfiguration readerConf2; - readerConf2.setInternalSubscriptionName(subscriptionName); - Reader reader2; - ASSERT_EQ(ResultConsumerBusy, - client.createReader(topicName, MessageId::earliest(), readerConf2, reader2)); - - reader1.close(); - reader2.close(); - client.close(); -} - -TEST(ReaderTest, testIsConnected) { - const std::string topic = "testReaderIsConnected-" + std::to_string(time(nullptr)); - Client client(serviceUrl); - - Reader reader; - ASSERT_FALSE(reader.isConnected()); - - ASSERT_EQ(ResultOk, client.createReader(topic, MessageId::earliest(), {}, reader)); - ASSERT_TRUE(reader.isConnected()); - - ASSERT_EQ(ResultOk, reader.close()); - ASSERT_FALSE(reader.isConnected()); -} - -TEST(ReaderTest, testHasMessageAvailableWhenCreated) { - const std::string topic = "testHasMessageAvailableWhenCreated-" + std::to_string(time(nullptr)); - Client client(serviceUrl); - - ProducerConfiguration producerConf; - producerConf.setBatchingMaxMessages(3); - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topic, producerConf, producer)); - - std::vector messageIds; - constexpr int numMessages = 7; - Latch latch(numMessages); - for (int i = 0; i < numMessages; i++) { - producer.sendAsync(MessageBuilder().setContent("msg-" + std::to_string(i)).build(), - [i, &messageIds, &latch](Result result, const MessageId& messageId) { - if (result == ResultOk) { - LOG_INFO("Send " << i << " to " << messageId); - messageIds.emplace_back(messageId); - } else { - LOG_ERROR("Failed to send " << i << ": " << messageId); - } - latch.countdown(); - }); - } - latch.wait(std::chrono::seconds(3)); - ASSERT_EQ(messageIds.size(), numMessages); - - Reader reader; - bool hasMessageAvailable; - - for (size_t i = 0; i < messageIds.size() - 1; i++) { - ASSERT_EQ(ResultOk, client.createReader(topic, messageIds[i], {}, reader)); - ASSERT_EQ(ResultOk, reader.hasMessageAvailable(hasMessageAvailable)); - EXPECT_TRUE(hasMessageAvailable); - } - - // The start message ID is exclusive by default, so when we start at the last message, there should be no - // message available. - ASSERT_EQ(ResultOk, client.createReader(topic, messageIds.back(), {}, reader)); - ASSERT_EQ(ResultOk, reader.hasMessageAvailable(hasMessageAvailable)); - EXPECT_FALSE(hasMessageAvailable); - client.close(); -} - -TEST(ReaderTest, testReceiveAfterSeek) { - Client client(serviceUrl); - const std::string topic = "reader-test-receive-after-seek-" + std::to_string(time(nullptr)); - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topic, producer)); - - MessageId seekMessageId; - for (int i = 0; i < 5; i++) { - MessageId messageId; - producer.send(MessageBuilder().setContent("msg-" + std::to_string(i)).build(), messageId); - if (i == 3) { - seekMessageId = messageId; - } - } - - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topic, MessageId::latest(), {}, reader)); - - reader.seek(seekMessageId); - - bool hasMessageAvailable; - ASSERT_EQ(ResultOk, reader.hasMessageAvailable(hasMessageAvailable)); - - client.close(); -} diff --git a/pulsar-client-cpp/tests/RoundRobinMessageRouterTest.cc b/pulsar-client-cpp/tests/RoundRobinMessageRouterTest.cc deleted file mode 100644 index ce5ad170b8dee..0000000000000 --- a/pulsar-client-cpp/tests/RoundRobinMessageRouterTest.cc +++ /dev/null @@ -1,165 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include - -#include "../lib/RoundRobinMessageRouter.h" -#include "../lib/TopicMetadataImpl.h" - -using namespace pulsar; - -TEST(RoundRobinMessageRouterTest, onePartition) { - const int numPartitions = 1; - - RoundRobinMessageRouter router(ProducerConfiguration::BoostHash, false, 1, 1, - boost::posix_time::milliseconds(0)); - - Message msg1 = MessageBuilder().setPartitionKey("my-key-1").setContent("one").build(); - Message msg2 = MessageBuilder().setPartitionKey("my-key-2").setContent("two").build(); - Message msg3 = MessageBuilder().setContent("three").build(); - - int p1 = router.getPartition(msg1, TopicMetadataImpl(numPartitions)); - int p2 = router.getPartition(msg2, TopicMetadataImpl(numPartitions)); - int p3 = router.getPartition(msg3, TopicMetadataImpl(numPartitions)); - ASSERT_EQ(p1, 0); - ASSERT_EQ(p2, 0); - ASSERT_EQ(p3, 0); -} - -TEST(RoundRobinMessageRouterTest, sameKey) { - const int numPartitions = 13; - - RoundRobinMessageRouter router(ProducerConfiguration::BoostHash, false, 1, 1, - boost::posix_time::milliseconds(0)); - - Message msg1 = MessageBuilder().setPartitionKey("my-key").setContent("one").build(); - Message msg2 = MessageBuilder().setPartitionKey("my-key").setContent("two").build(); - - int p1 = router.getPartition(msg1, TopicMetadataImpl(numPartitions)); - int p2 = router.getPartition(msg2, TopicMetadataImpl(numPartitions)); - ASSERT_EQ(p2, p1); -} - -TEST(RoundRobinMessageRouterTest, batchingDisabled) { - const int numPartitions = 13; - - RoundRobinMessageRouter router(ProducerConfiguration::BoostHash, false, 1, 1, - boost::posix_time::milliseconds(0)); - - Message msg1 = MessageBuilder().setContent("one").build(); - Message msg2 = MessageBuilder().setContent("two").build(); - - int p1 = router.getPartition(msg1, TopicMetadataImpl(numPartitions)); - int p2 = router.getPartition(msg2, TopicMetadataImpl(numPartitions)); - ASSERT_EQ(p2, (p1 + 1) % numPartitions); -} - -TEST(RoundRobinMessageRouterTest, batchingEnabled) { - const int numPartitions = 13; - - RoundRobinMessageRouter router(ProducerConfiguration::BoostHash, true, 1000, 100000, - boost::posix_time::seconds(1)); - - int p = -1; - for (int i = 0; i < 100; i++) { - Message msg = MessageBuilder().setContent("0123456789").build(); - - int p1 = router.getPartition(msg, TopicMetadataImpl(numPartitions)); - if (p != -1) { - ASSERT_EQ(p1, p); - } - - p = p1; - } -} - -TEST(RoundRobinMessageRouterTest, maxDelay) { - const int numPartitions = 13; - - RoundRobinMessageRouter router(ProducerConfiguration::BoostHash, true, 1000, 100000, - boost::posix_time::seconds(1)); - - int p1 = -1; - for (int i = 0; i < 100; i++) { - Message msg = MessageBuilder().setContent("0123456789").build(); - - int p = router.getPartition(msg, TopicMetadataImpl(numPartitions)); - if (p1 != -1) { - ASSERT_EQ(p1, p); - } - - p1 = p; - } - - std::this_thread::sleep_for(std::chrono::seconds(1)); - - // Second set of messages will go in separate partition - - int p2 = -1; - for (int i = 0; i < 100; i++) { - Message msg = MessageBuilder().setContent("0123456789").build(); - - int p = router.getPartition(msg, TopicMetadataImpl(numPartitions)); - if (p2 != -1) { - ASSERT_EQ(p2, p); - } - - p2 = p; - } - - ASSERT_EQ(p2, (p1 + 1) % numPartitions); -} - -TEST(RoundRobinMessageRouterTest, maxNumberOfMessages) { - const int numPartitions = 13; - - RoundRobinMessageRouter router(ProducerConfiguration::BoostHash, true, 2, 1000, - boost::posix_time::seconds(1)); - - Message msg1 = MessageBuilder().setContent("one").build(); - Message msg2 = MessageBuilder().setContent("two").build(); - Message msg3 = MessageBuilder().setContent("tree").build(); - - TopicMetadataImpl tm = TopicMetadataImpl(numPartitions); - int p1 = router.getPartition(msg1, tm); - int p2 = router.getPartition(msg2, tm); - int p3 = router.getPartition(msg3, tm); - ASSERT_EQ(p1, p2); - ASSERT_EQ(p3, (p2 + 1) % numPartitions); -} - -TEST(RoundRobinMessageRouterTest, maxBatchSize) { - const int numPartitions = 13; - - RoundRobinMessageRouter router(ProducerConfiguration::BoostHash, true, 10, 8, - boost::posix_time::seconds(1)); - - Message msg1 = MessageBuilder().setContent("one").build(); - Message msg2 = MessageBuilder().setContent("two").build(); - Message msg3 = MessageBuilder().setContent("tree").build(); - - TopicMetadataImpl tm = TopicMetadataImpl(numPartitions); - int p1 = router.getPartition(msg1, tm); - int p2 = router.getPartition(msg2, tm); - int p3 = router.getPartition(msg3, tm); - ASSERT_EQ(p1, p2); - ASSERT_EQ(p3, (p2 + 1) % numPartitions); -} diff --git a/pulsar-client-cpp/tests/SchemaTest.cc b/pulsar-client-cpp/tests/SchemaTest.cc deleted file mode 100644 index f15365219f748..0000000000000 --- a/pulsar-client-cpp/tests/SchemaTest.cc +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -using namespace pulsar; - -static std::string lookupUrl = "pulsar://localhost:6650"; - -static const std::string exampleSchema = - "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\"," - "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}"; - -TEST(SchemaTest, testSchema) { - ClientConfiguration config; - Client client(lookupUrl); - Result res; - - Producer producer; - ProducerConfiguration producerConf; - producerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema)); - res = client.createProducer("topic-avro", producerConf, producer); - ASSERT_EQ(res, ResultOk); - - // Check schema version - ASSERT_FALSE(producer.getSchemaVersion().empty()); - producer.close(); - - ASSERT_EQ(ResultOk, res); - - // Creating producer with no schema on same topic should fail - producerConf.setSchema(SchemaInfo(JSON, "Json", "{}")); - res = client.createProducer("topic-avro", producerConf, producer); - ASSERT_EQ(ResultIncompatibleSchema, res); - - // Creating producer with no schema on same topic should succeed - // because standalone broker is configured by default to not - // require the schema to be set - res = client.createProducer("topic-avro", producer); - ASSERT_EQ(ResultOk, res); - - ConsumerConfiguration consumerConf; - Consumer consumer; - // Subscribing with no schema will still succeed - res = client.subscribe("topic-avro", "sub-1", consumerConf, consumer); - ASSERT_EQ(ResultOk, res); - - // Subscribing with same Avro schema will succeed - consumerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema)); - res = client.subscribe("topic-avro", "sub-2", consumerConf, consumer); - ASSERT_EQ(ResultOk, res); - - // Subscribing with different schema type will fail - consumerConf.setSchema(SchemaInfo(JSON, "Json", "{}")); - res = client.subscribe("topic-avro", "sub-2", consumerConf, consumer); - ASSERT_EQ(ResultIncompatibleSchema, res); - - client.close(); -} - -TEST(SchemaTest, testHasSchemaVersion) { - Client client(lookupUrl); - std::string topic = "SchemaTest-HasSchemaVersion"; - SchemaInfo stringSchema(SchemaType::STRING, "String", ""); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topic + "1", "sub", ConsumerConfiguration().setSchema(stringSchema), - consumer)); - Producer batchedProducer; - ASSERT_EQ(ResultOk, client.createProducer(topic + "1", ProducerConfiguration().setSchema(stringSchema), - batchedProducer)); - Producer nonBatchedProducer; - ASSERT_EQ(ResultOk, client.createProducer(topic + "1", ProducerConfiguration().setSchema(stringSchema), - nonBatchedProducer)); - - ASSERT_EQ(ResultOk, batchedProducer.send(MessageBuilder().setContent("msg-0").build())); - ASSERT_EQ(ResultOk, nonBatchedProducer.send(MessageBuilder().setContent("msg-1").build())); - - Message msgs[2]; - ASSERT_EQ(ResultOk, consumer.receive(msgs[0], 3000)); - ASSERT_EQ(ResultOk, consumer.receive(msgs[1], 3000)); - - std::string schemaVersion(8, '\0'); - ASSERT_EQ(msgs[0].getDataAsString(), "msg-0"); - ASSERT_TRUE(msgs[0].hasSchemaVersion()); - ASSERT_EQ(msgs[0].getSchemaVersion(), schemaVersion); - - ASSERT_EQ(msgs[1].getDataAsString(), "msg-1"); - ASSERT_TRUE(msgs[1].hasSchemaVersion()); - ASSERT_EQ(msgs[1].getSchemaVersion(), schemaVersion); - - client.close(); -} diff --git a/pulsar-client-cpp/tests/SemaphoreTest.cc b/pulsar-client-cpp/tests/SemaphoreTest.cc deleted file mode 100644 index 0cdec79feda68..0000000000000 --- a/pulsar-client-cpp/tests/SemaphoreTest.cc +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include - -#include "../lib/Semaphore.h" -#include "../lib/Latch.h" - -using namespace pulsar; - -TEST(SemaphoreTest, testLimit) { - Semaphore s(100); - - for (int i = 0; i < 100; i++) { - s.acquire(); - } - - ASSERT_EQ(s.currentUsage(), 100); - ASSERT_FALSE(s.tryAcquire()); - s.release(); - ASSERT_EQ(s.currentUsage(), 99); - - ASSERT_TRUE(s.tryAcquire()); - ASSERT_EQ(s.currentUsage(), 100); -} - -TEST(SemaphoreTest, testStepRelease) { - Semaphore s(100); - - for (int i = 0; i < 100; i++) { - s.acquire(); - } - - Latch l1(1); - std::thread t1([&]() { - s.acquire(); - l1.countdown(); - }); - - Latch l2(1); - std::thread t2([&]() { - s.acquire(); - l2.countdown(); - }); - - Latch l3(1); - std::thread t3([&]() { - s.acquire(); - l3.countdown(); - }); - - // The threads are blocked since the quota is full - ASSERT_FALSE(l1.wait(std::chrono::milliseconds(100))); - ASSERT_FALSE(l2.wait(std::chrono::milliseconds(100))); - ASSERT_FALSE(l3.wait(std::chrono::milliseconds(100))); - - ASSERT_EQ(s.currentUsage(), 100); - s.release(); - s.release(); - s.release(); - - ASSERT_TRUE(l1.wait(std::chrono::seconds(1))); - ASSERT_TRUE(l2.wait(std::chrono::seconds(1))); - ASSERT_TRUE(l3.wait(std::chrono::seconds(1))); - ASSERT_EQ(s.currentUsage(), 100); - - t1.join(); - t2.join(); - t3.join(); -} - -TEST(SemaphoreTest, testSingleRelease) { - Semaphore s(100); - - s.acquire(100); - ASSERT_EQ(s.currentUsage(), 100); - - Latch l1(1); - std::thread t1([&]() { - s.acquire(); - l1.countdown(); - }); - - Latch l2(1); - std::thread t2([&]() { - s.acquire(); - l2.countdown(); - }); - - Latch l3(1); - std::thread t3([&]() { - s.acquire(); - l3.countdown(); - }); - - // The threads are blocked since the quota is full - ASSERT_FALSE(l1.wait(std::chrono::milliseconds(100))); - ASSERT_FALSE(l2.wait(std::chrono::milliseconds(100))); - ASSERT_FALSE(l3.wait(std::chrono::milliseconds(100))); - - ASSERT_EQ(s.currentUsage(), 100); - s.release(3); - - ASSERT_TRUE(l1.wait(std::chrono::seconds(1))); - ASSERT_TRUE(l2.wait(std::chrono::seconds(1))); - ASSERT_TRUE(l3.wait(std::chrono::seconds(1))); - ASSERT_EQ(s.currentUsage(), 100); - - t1.join(); - t2.join(); - t3.join(); -} - -TEST(SemaphoreTest, testCloseInterruptOnFull) { - Semaphore s(100); - s.acquire(100); - Latch latch(1); - - auto thread = std::thread([&]() { - bool res = s.acquire(1); - ASSERT_FALSE(res); - latch.countdown(); - }); - - // Sleep to allow for background thread to fill the queue and be blocked there - std::this_thread::sleep_for(std::chrono::seconds(1)); - - s.close(); - bool wasUnblocked = latch.wait(std::chrono::seconds(5)); - - ASSERT_TRUE(wasUnblocked); - thread.join(); -} diff --git a/pulsar-client-cpp/tests/ServiceURITest.cc b/pulsar-client-cpp/tests/ServiceURITest.cc deleted file mode 100644 index 9d4c88fc4972e..0000000000000 --- a/pulsar-client-cpp/tests/ServiceURITest.cc +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include "lib/ServiceURI.h" - -using namespace pulsar; - -static void verifyServiceURIFailure(const std::string& uriString, const std::string& errorMsg) { - try { - ServiceURI uri{uriString}; - std::cerr << uriString << " should be invalid" << std::endl; - FAIL(); - } catch (const std::invalid_argument& e) { - EXPECT_EQ(errorMsg, e.what()); - } -} - -static void verifyServiceURI(const std::string& uriString, PulsarScheme expectedScheme, - const std::vector& expectedServiceHosts) { - ServiceURI uri{uriString}; - EXPECT_EQ(uri.getScheme(), expectedScheme); - EXPECT_EQ(uri.getServiceHosts(), expectedServiceHosts); -} - -TEST(ServiceURITest, testInvalidServiceUris) { - verifyServiceURIFailure("localhost:6650", "The scheme part is missing: localhost:6650"); - verifyServiceURIFailure("unknown://localhost:6650", "Invalid scheme: unknown"); - verifyServiceURIFailure("://localhost:6650", "Expected scheme name at index 0: ://localhost:6650"); - verifyServiceURIFailure("pulsar:///", "authority component is missing in service uri: pulsar:///"); - verifyServiceURIFailure("pulsar://localhost:6650:6651", "invalid hostname: localhost:6650:6651"); - verifyServiceURIFailure("pulsar://localhost:xyz/", "invalid hostname: localhost:xyz"); - verifyServiceURIFailure("pulsar://localhost:-6650/", "invalid hostname: localhost:-6650"); -} - -TEST(ServiceURITest, testPathIgnored) { - verifyServiceURI("pulsar://localhost:6650", PulsarScheme::PULSAR, {"pulsar://localhost:6650"}); - verifyServiceURI("pulsar://localhost:6650/", PulsarScheme::PULSAR, {"pulsar://localhost:6650"}); -} - -TEST(ServiceURITest, testMultipleHostsComma) { - verifyServiceURI("pulsar://host1:6650,host2:6650,host3:6650/path/to/namespace", PulsarScheme::PULSAR, - {"pulsar://host1:6650", "pulsar://host2:6650", "pulsar://host3:6650"}); -} - -TEST(ServiceURITest, testMultipleHostsWithoutPulsarPorts) { - verifyServiceURI("pulsar://host1,host2,host3/path/to/namespace", PulsarScheme::PULSAR, - {"pulsar://host1:6650", "pulsar://host2:6650", "pulsar://host3:6650"}); - verifyServiceURI("pulsar+ssl://host1,host2,host3/path/to/namespace", PulsarScheme::PULSAR_SSL, - {"pulsar+ssl://host1:6651", "pulsar+ssl://host2:6651", "pulsar+ssl://host3:6651"}); - verifyServiceURI("http://host1,host2,host3/path/to/namespace", PulsarScheme::HTTP, - {"http://host1:8080", "http://host2:8080", "http://host3:8080"}); - verifyServiceURI("https://host1,host2,host3/path/to/namespace", PulsarScheme::HTTPS, - {"https://host1:8081", "https://host2:8081", "https://host3:8081"}); -} - -TEST(ServiceURITest, testMultipleHostsMixed) { - verifyServiceURI("pulsar://host1:6640,host2,host3:6660/path/to/namespace", PulsarScheme::PULSAR, - {"pulsar://host1:6640", "pulsar://host2:6650", "pulsar://host3:6660"}); -} diff --git a/pulsar-client-cpp/tests/SinglePartitionMessageRouterTest.cc b/pulsar-client-cpp/tests/SinglePartitionMessageRouterTest.cc deleted file mode 100644 index e82d080f516a1..0000000000000 --- a/pulsar-client-cpp/tests/SinglePartitionMessageRouterTest.cc +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include - -#include "tests/mocks/GMockMessage.h" - -#include "../lib/SinglePartitionMessageRouter.h" -#include "../lib/TopicMetadataImpl.h" - -using ::testing::AtLeast; -using ::testing::Return; -using ::testing::ReturnRef; - -using namespace pulsar; - -// TODO: Edit Message class to suit Google Mock and enable these tests when 2.0.0 release. - -TEST(SinglePartitionMessageRouterTest, DISABLED_getPartitionWithoutPartitionKey) { - const int selectedPartition = 1234; - - SinglePartitionMessageRouter router(selectedPartition, 10000, ProducerConfiguration::BoostHash); - - GMockMessage message; - EXPECT_CALL(message, hasPartitionKey()).Times(1).WillOnce(Return(false)); - EXPECT_CALL(message, getPartitionKey()).Times(0); - - ASSERT_EQ(selectedPartition, router.getPartition(message, TopicMetadataImpl(1))); -} - -TEST(SinglePartitionMessageRouterTest, DISABLED_getPartitionWithPartitionKey) { - const int numPartitons = 1234; - - SinglePartitionMessageRouter router(1, numPartitons, ProducerConfiguration::BoostHash); - - std::string partitionKey1 = "key1"; - std::string partitionKey2 = "key2"; - - GMockMessage message1; - EXPECT_CALL(message1, hasPartitionKey()).Times(1).WillOnce(Return(true)); - EXPECT_CALL(message1, getPartitionKey()).Times(1).WillOnce(ReturnRef(partitionKey1)); - - GMockMessage message2; - EXPECT_CALL(message2, hasPartitionKey()).Times(1).WillOnce(Return(true)); - EXPECT_CALL(message2, getPartitionKey()).Times(1).WillOnce(ReturnRef(partitionKey2)); - - auto expectedParrtition1 = - static_cast(boost::hash()(partitionKey1) % numPartitons); - auto expectedParrtition2 = - static_cast(boost::hash()(partitionKey2) % numPartitons); - - ASSERT_EQ(expectedParrtition1, router.getPartition(message1, TopicMetadataImpl(numPartitons))); - ASSERT_EQ(expectedParrtition2, router.getPartition(message2, TopicMetadataImpl(numPartitons))); -} \ No newline at end of file diff --git a/pulsar-client-cpp/tests/SynchronizedHashMapTest.cc b/pulsar-client-cpp/tests/SynchronizedHashMapTest.cc deleted file mode 100644 index 8d74a24014a62..0000000000000 --- a/pulsar-client-cpp/tests/SynchronizedHashMapTest.cc +++ /dev/null @@ -1,132 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include -#include -#include "lib/Latch.h" -#include "lib/SynchronizedHashMap.h" - -using namespace pulsar; -using SyncMapType = SynchronizedHashMap; -using OptValue = typename SyncMapType::OptValue; -using PairVector = typename SyncMapType::PairVector; - -inline void sleepMs(long millis) { std::this_thread::sleep_for(std::chrono::milliseconds(millis)); } - -inline PairVector sort(PairVector pairs) { - std::sort(pairs.begin(), pairs.end(), [](const std::pair& lhs, const std::pair& rhs) { - return lhs.first < rhs.first; - }); - return pairs; -} - -TEST(SynchronizedHashMap, testClear) { - SyncMapType m({{1, 100}, {2, 200}}); - m.clear(); - ASSERT_EQ(m.toPairVector(), PairVector{}); - - PairVector expectedPairs({{3, 300}, {4, 400}}); - SyncMapType m2(expectedPairs); - PairVector pairs; - m2.clear([&pairs](const int& key, const int& value) { pairs.emplace_back(key, value); }); - ASSERT_EQ(m2.toPairVector(), PairVector{}); - ASSERT_EQ(sort(pairs), expectedPairs); -} - -TEST(SynchronizedHashMap, testRemoveAndFind) { - SyncMapType m({{1, 100}, {2, 200}, {3, 300}}); - - OptValue optValue; - optValue = m.findFirstValueIf([](const int& x) { return x == 200; }); - ASSERT_TRUE(optValue.is_present()); - ASSERT_EQ(optValue.value(), 200); - - optValue = m.findFirstValueIf([](const int& x) { return x >= 301; }); - ASSERT_FALSE(optValue.is_present()); - - optValue = m.find(1); - ASSERT_TRUE(optValue.is_present()); - ASSERT_EQ(optValue.value(), 100); - - ASSERT_FALSE(m.find(0).is_present()); - ASSERT_FALSE(m.remove(0).is_present()); - - optValue = m.remove(1); - ASSERT_TRUE(optValue.is_present()); - ASSERT_EQ(optValue.value(), 100); - - ASSERT_FALSE(m.remove(1).is_present()); - ASSERT_FALSE(m.find(1).is_present()); -} - -TEST(SynchronizedHashMapTest, testForEach) { - SyncMapType m({{1, 100}, {2, 200}, {3, 300}}); - std::vector values; - m.forEachValue([&values](const int& value) { values.emplace_back(value); }); - std::sort(values.begin(), values.end()); - ASSERT_EQ(values, std::vector({100, 200, 300})); - - PairVector pairs; - m.forEach([&pairs](const int& key, const int& value) { pairs.emplace_back(key, value); }); - PairVector expectedPairs({{1, 100}, {2, 200}, {3, 300}}); - ASSERT_EQ(sort(pairs), expectedPairs); -} - -TEST(SynchronizedHashMap, testRecursiveMutex) { - SyncMapType m({{1, 100}}); - OptValue optValue; - m.forEach([&m, &optValue](const int& key, const int& value) { - optValue = m.find(key); // the internal mutex was locked again - }); - ASSERT_TRUE(optValue.is_present()); - ASSERT_EQ(optValue.value(), 100); -} - -TEST(SynchronizedHashMapTest, testThreadSafeForEach) { - SyncMapType m({{1, 100}, {2, 200}, {3, 300}}); - - Latch latch(1); - std::thread t{[&m, &latch] { - latch.wait(); // this thread must start after `m.forEach` started - m.remove(2); - }}; - - std::atomic_bool firstElementDone{false}; - PairVector pairs; - m.forEach([&latch, &firstElementDone, &pairs](const int& key, const int& value) { - pairs.emplace_back(key, value); - if (!firstElementDone) { - latch.countdown(); - firstElementDone = true; - } - sleepMs(200); - }); - { - PairVector expectedPairs({{1, 100}, {2, 200}, {3, 300}}); - ASSERT_EQ(sort(pairs), expectedPairs); - } - t.join(); - { - PairVector expectedPairs({{1, 100}, {3, 300}}); - ASSERT_EQ(sort(m.toPairVector()), expectedPairs); - } -} diff --git a/pulsar-client-cpp/tests/TopicMetadataImplTest.cc b/pulsar-client-cpp/tests/TopicMetadataImplTest.cc deleted file mode 100644 index 091dd26591448..0000000000000 --- a/pulsar-client-cpp/tests/TopicMetadataImplTest.cc +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -#include "../lib/TopicMetadataImpl.h" - -using namespace pulsar; - -TEST(TopicMetadataImplTest, numPartitions) { - TopicMetadataImpl topicMetadata(1234); - ASSERT_EQ(1234, topicMetadata.getNumPartitions()); -} diff --git a/pulsar-client-cpp/tests/TopicNameTest.cc b/pulsar-client-cpp/tests/TopicNameTest.cc deleted file mode 100644 index 377a9319fa87e..0000000000000 --- a/pulsar-client-cpp/tests/TopicNameTest.cc +++ /dev/null @@ -1,181 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include - -using namespace pulsar; - -TEST(TopicNameTest, testLookup) { - std::shared_ptr topicName = TopicName::get("persistent://pulsar/bf1/TESTNS.0/curveballapps"); - std::string lookup_name = topicName->getLookupName(); - ASSERT_EQ(lookup_name, "persistent/pulsar/bf1/TESTNS.0/curveballapps"); -} - -TEST(TopicNameTest, testTopicName) { - // Compare getters and setters - std::shared_ptr topicName = TopicName::get("persistent://property/cluster/namespace/topic"); - ASSERT_EQ("property", topicName->getProperty()); - ASSERT_EQ("cluster", topicName->getCluster()); - ASSERT_EQ("namespace", topicName->getNamespacePortion()); - ASSERT_EQ("persistent", topicName->getDomain()); - ASSERT_EQ(TopicName::getEncodedName("topic"), topicName->getLocalName()); - - // Compare == operator - std::shared_ptr topicName1 = TopicName::get("persistent://p/c/n/d"); - std::shared_ptr topicName2 = TopicName::get("persistent://p/c/n/d"); - ASSERT_TRUE(*topicName1 == *topicName2); -} - -TEST(TopicNameTest, testShortTopicName) { - // "short-topic" - std::shared_ptr tn1 = TopicName::get("short-topic"); - ASSERT_EQ("public", tn1->getProperty()); - ASSERT_EQ("", tn1->getCluster()); - ASSERT_EQ("default", tn1->getNamespacePortion()); - ASSERT_EQ("persistent", tn1->getDomain()); - ASSERT_EQ(TopicName::getEncodedName("short-topic"), tn1->getLocalName()); - - // tenant/namespace/topic - std::shared_ptr tn2 = TopicName::get("tenant/namespace/short-topic"); - ASSERT_EQ("tenant", tn2->getProperty()); - ASSERT_EQ("", tn2->getCluster()); - ASSERT_EQ("namespace", tn2->getNamespacePortion()); - ASSERT_EQ("persistent", tn2->getDomain()); - ASSERT_EQ(TopicName::getEncodedName("short-topic"), tn2->getLocalName()); - - // tenant/cluster/namespace/topic - std::shared_ptr tn3 = TopicName::get("tenant/cluster/namespace/short-topic"); - ASSERT_FALSE(tn3); - - // tenant/cluster - std::shared_ptr tn4 = TopicName::get("tenant/cluster"); - ASSERT_FALSE(tn4); -} - -TEST(TopicNameTest, testTopicNameV2) { - // v2 topic names doesn't have "cluster" - std::shared_ptr tn1 = TopicName::get("persistent://tenant/namespace/short-topic"); - ASSERT_EQ("tenant", tn1->getProperty()); - ASSERT_EQ("", tn1->getCluster()); - ASSERT_EQ("namespace", tn1->getNamespacePortion()); - ASSERT_EQ("persistent", tn1->getDomain()); - ASSERT_EQ(TopicName::getEncodedName("short-topic"), tn1->getLocalName()); -} - -TEST(TopicNameTest, testNonPersistentTopicNameV2) { - // v2 topic names doesn't have "cluster" - std::shared_ptr tn1 = TopicName::get("non-persistent://tenant/namespace/short-topic"); - ASSERT_EQ("tenant", tn1->getProperty()); - ASSERT_EQ("", tn1->getCluster()); - ASSERT_EQ("namespace", tn1->getNamespacePortion()); - ASSERT_EQ("non-persistent", tn1->getDomain()); - ASSERT_EQ(TopicName::getEncodedName("short-topic"), tn1->getLocalName()); -} - -TEST(TopicNameTest, testTopicNameWithSlashes) { - // Compare getters and setters - std::shared_ptr topicName = - TopicName::get("persistent://property/cluster/namespace/topic/name/with/slash"); - ASSERT_EQ("property", topicName->getProperty()); - ASSERT_EQ("cluster", topicName->getCluster()); - ASSERT_EQ("namespace", topicName->getNamespacePortion()); - ASSERT_EQ("persistent", topicName->getDomain()); - ASSERT_EQ("topic/name/with/slash", topicName->getLocalName()); - - topicName = TopicName::get("persistent://property/cluster/namespace/topic/ends/with/slash/"); - ASSERT_TRUE(topicName != NULL); - ASSERT_EQ(TopicName::getEncodedName("topic/ends/with/slash/"), topicName->getEncodedLocalName()); - - topicName = TopicName::get("persistent://property/cluster/namespace/`~!@#$%^&*()-_+=[]{}|\\;:'\"<>,./?"); - ASSERT_TRUE(topicName != NULL); - ASSERT_EQ(TopicName::getEncodedName("`~!@#$%^&*()-_+=[]{}|\\;:'\"<>,./?"), - topicName->getEncodedLocalName()); - - topicName = TopicName::get("persistent://property/cluster/namespace/topic@%*)(&!%$#@#$>getEncodedLocalName()); - - topicName = TopicName::get("persistent://property/cluster/namespace/topic//with//double//slash//"); - ASSERT_TRUE(topicName != NULL); - ASSERT_EQ(TopicName::getEncodedName("topic//with//double//slash//"), topicName->getEncodedLocalName()); - - topicName = TopicName::get("persistent://property/cluster/namespace//topic/starts/with/slash/"); - ASSERT_TRUE(topicName != NULL); - ASSERT_EQ(TopicName::getEncodedName("/topic/starts/with/slash/"), topicName->getEncodedLocalName()); -} -TEST(TopicNameTest, testEmptyClusterName) { - // Compare getters and setters - std::shared_ptr topicName = TopicName::get("persistent://property//namespace/topic"); - - ASSERT_FALSE(topicName); -} - -TEST(TopicNameTest, testExtraSlashes) { - std::shared_ptr topicName = TopicName::get("persistent://property/cluster//namespace/topic"); - ASSERT_FALSE(topicName); - topicName = TopicName::get("persistent://property//cluster//namespace//topic"); - ASSERT_FALSE(topicName); -} - -TEST(TopicNameTest, testIllegalCharacters) { - std::shared_ptr topicName = - TopicName::get("persistent://prop!!!erty/cluster&)&Name/name%%%space/topic"); - ASSERT_FALSE(topicName); -} - -TEST(TopicNameTest, testIllegalUrl) { - std::shared_ptr topicName = TopicName::get("persistent:::/property/cluster/namespace/topic"); - ASSERT_FALSE(topicName); -} - -TEST(TopicNameTest, testEmptyString) { - std::shared_ptr topicName = TopicName::get(""); - ASSERT_FALSE(topicName); -} - -TEST(TopicNameTest, testExtraArguments) { - std::shared_ptr topicName = - TopicName::get("persistent:::/property/cluster/namespace/topic/some/extra/args"); - ASSERT_FALSE(topicName); -} - -TEST(TopicNameTest, testPartitionIndex) { - // key: topic name, value: partition index - const std::map nameToPartition = { - {"persistent://public/default/xxx-partition-0", 0}, - {"xxx-partition-0", 0}, - {"xxx-partition-4", 4}, - {"xxx-partition-13", 13}, - {"xxx-partition-x", -1}, - // Following cases are not the right behavior, but it's Java client's behavior - {"xxx-partition--1", 1}, - {"xxx-partition-00", 0}, - {"xxx-partition-012", 12}, - }; - - for (const auto& kv : nameToPartition) { - const auto& name = kv.first; - const auto& partition = kv.second; - - auto topicName = TopicName::get(name); - ASSERT_EQ(topicName->getPartitionIndex(), TopicName::getPartitionIndex(name)); - ASSERT_EQ(topicName->getPartitionIndex(), partition); - } -} diff --git a/pulsar-client-cpp/tests/UnboundedBlockingQueueTest.cc b/pulsar-client-cpp/tests/UnboundedBlockingQueueTest.cc deleted file mode 100644 index 819c22e59ab24..0000000000000 --- a/pulsar-client-cpp/tests/UnboundedBlockingQueueTest.cc +++ /dev/null @@ -1,178 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include - -#include -#include - -class UnboundedProducerWorker { - private: - std::thread producerThread_; - UnboundedBlockingQueue& queue_; - - public: - UnboundedProducerWorker(UnboundedBlockingQueue& queue) : queue_(queue) {} - - void produce(int number) { - producerThread_ = std::thread(&UnboundedProducerWorker::pushNumbers, this, number); - } - - void pushNumbers(int number) { - for (int i = 1; i <= number; i++) { - queue_.push(i); - } - } - - void join() { producerThread_.join(); } -}; - -class UnboundedConsumerWorker { - private: - std::thread consumerThread_; - UnboundedBlockingQueue& queue_; - - public: - UnboundedConsumerWorker(UnboundedBlockingQueue& queue) : queue_(queue) {} - - void consume(int number) { - consumerThread_ = std::thread(&UnboundedConsumerWorker::popNumbers, this, number); - } - - void popNumbers(int number) { - for (int i = 1; i <= number; i++) { - int poppedElement; - queue_.pop(poppedElement); - } - } - - void join() { consumerThread_.join(); } -}; - -TEST(UnboundedBlockingQueueTest, testBasic) { - size_t size = 5; - UnboundedBlockingQueue queue(size); - - UnboundedProducerWorker producerWorker(queue); - producerWorker.produce(5); - - UnboundedConsumerWorker consumerWorker(queue); - consumerWorker.consume(5); - - producerWorker.join(); - consumerWorker.join(); - - size_t zero = 0; - ASSERT_EQ(zero, queue.size()); -} - -TEST(UnboundedBlockingQueueTest, testQueueOperations) { - size_t size = 5; - UnboundedBlockingQueue queue(size); - for (size_t i = 1; i <= size; i++) { - queue.push(i); - } - ASSERT_EQ(queue.size(), size); - - int cnt = 1; - for (BlockingQueue::const_iterator it = queue.begin(); it != queue.end(); it++) { - ASSERT_EQ(cnt, *it); - ++cnt; - } - - cnt = 1; - for (BlockingQueue::iterator it = queue.begin(); it != queue.end(); it++) { - ASSERT_EQ(cnt, *it); - ++cnt; - } - - int poppedElement; - for (size_t i = 1; i <= size; i++) { - queue.pop(poppedElement); - } - - ASSERT_FALSE(queue.peek(poppedElement)); -} - -TEST(UnboundedBlockingQueueTest, testBlockingProducer) { - size_t size = 5; - UnboundedBlockingQueue queue(size); - - UnboundedProducerWorker producerWorker(queue); - producerWorker.produce(8); - - UnboundedConsumerWorker consumerWorker(queue); - consumerWorker.consume(5); - - producerWorker.join(); - consumerWorker.join(); - - size_t three = 3; - ASSERT_EQ(three, queue.size()); -} - -TEST(UnboundedBlockingQueueTest, testBlockingConsumer) { - size_t size = 5; - UnboundedBlockingQueue queue(size); - - UnboundedProducerWorker producerWorker(queue); - producerWorker.produce(5); - - UnboundedConsumerWorker consumerWorker(queue); - consumerWorker.consume(8); - - producerWorker.pushNumbers(3); - - producerWorker.join(); - consumerWorker.join(); - - size_t zero = 0; - ASSERT_EQ(zero, queue.size()); -} - -TEST(UnboundedBlockingQueueTest, testTimeout) { - size_t size = 5; - UnboundedBlockingQueue queue(size); - int value; - bool popReturn = queue.pop(value, std::chrono::seconds(1)); - std::this_thread::sleep_for(std::chrono::seconds(2)); - ASSERT_FALSE(popReturn); -} - -TEST(UnboundedBlockingQueueTest, testCloseInterruptOnEmpty) { - UnboundedBlockingQueue queue(10); - pulsar::Latch latch(1); - - auto thread = std::thread([&]() { - int v; - bool res = queue.pop(v); - ASSERT_FALSE(res); - latch.countdown(); - }); - - // Sleep to allow for background thread to call pop and be blocked there - std::this_thread::sleep_for(std::chrono::seconds(1)); - - queue.close(); - bool wasUnblocked = latch.wait(std::chrono::seconds(5)); - - ASSERT_TRUE(wasUnblocked); - thread.join(); -} diff --git a/pulsar-client-cpp/tests/UrlTest.cc b/pulsar-client-cpp/tests/UrlTest.cc deleted file mode 100644 index 6cd2d1c828651..0000000000000 --- a/pulsar-client-cpp/tests/UrlTest.cc +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "Url.h" -#include - -using namespace pulsar; - -TEST(UrlTest, testUrl) { - Url url; - - ASSERT_TRUE(Url::parse("http://example.com", url)); - ASSERT_EQ("http", url.protocol()); - ASSERT_EQ(80, url.port()); - - ASSERT_TRUE(Url::parse("https://example.com", url)); - ASSERT_EQ("https", url.protocol()); - ASSERT_EQ(443, url.port()); - - ASSERT_TRUE(Url::parse("http://example.com:8080", url)); - ASSERT_EQ("http", url.protocol()); - ASSERT_EQ(8080, url.port()); - - ASSERT_TRUE(Url::parse("http://example.com:8080/", url)); - ASSERT_EQ("http", url.protocol()); - ASSERT_EQ(8080, url.port()); - - ASSERT_TRUE(Url::parse("http://example.com", url)); - ASSERT_EQ("example.com", url.host()); - ASSERT_EQ("http", url.protocol()); - ASSERT_EQ(80, url.port()); - - ASSERT_TRUE(Url::parse("http://example.com:8080/test/my/path", url)); - ASSERT_EQ("example.com", url.host()); - ASSERT_EQ("http", url.protocol()); - ASSERT_EQ(8080, url.port()); - - ASSERT_TRUE(Url::parse("http://example.com:8080/test/my/path?key=value#adsasda", url)); - ASSERT_EQ("example.com", url.host()); - ASSERT_EQ("http", url.protocol()); - ASSERT_EQ(8080, url.port()); - - ASSERT_TRUE(Url::parse("pulsar://example.com:8080", url)); - ASSERT_EQ("example.com", url.host()); - ASSERT_EQ("pulsar", url.protocol()); - ASSERT_EQ(8080, url.port()); - - ASSERT_TRUE(Url::parse("pulsar://example.com", url)); - ASSERT_EQ("example.com", url.host()); - ASSERT_EQ("pulsar", url.protocol()); - ASSERT_EQ(6650, url.port()); - - ASSERT_TRUE( - Url::parse("http://env-broker3.messaging.cluster.company.com:4080/lookup/v2/destination/persistent/" - "cmscpp/gq1/TESTNS.4/TOPIC_1490664894335_1?authoritative=false", - url)); - ASSERT_EQ("http", url.protocol()); - ASSERT_EQ(4080, url.port()); - ASSERT_EQ("/lookup/v2/destination/persistent/cmscpp/gq1/TESTNS.4/TOPIC_1490664894335_1", url.path()); - ASSERT_EQ("/lookup/v2/destination/persistent/cmscpp/gq1/TESTNS.4/", url.pathWithoutFile()); - ASSERT_EQ("TOPIC_1490664894335_1", url.file()); - ASSERT_EQ("?authoritative=false", url.parameter()); - - ASSERT_TRUE(Url::parse( - "http://abc.com:8090/ads/ad/asd/TOPIC_1490664894335_1?authoritative=false,temp=true", url)); - ASSERT_EQ("http", url.protocol()); - ASSERT_EQ(8090, url.port()); - ASSERT_EQ("/ads/ad/asd/TOPIC_1490664894335_1", url.path()); - ASSERT_EQ("/ads/ad/asd/", url.pathWithoutFile()); - ASSERT_EQ("TOPIC_1490664894335_1", url.file()); - ASSERT_EQ("?authoritative=false,temp=true", url.parameter()); -} diff --git a/pulsar-client-cpp/tests/VersionTest.cc b/pulsar-client-cpp/tests/VersionTest.cc deleted file mode 100644 index 57e1e78376200..0000000000000 --- a/pulsar-client-cpp/tests/VersionTest.cc +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -TEST(VersionTest, testMacro) { -#ifdef PULSAR_VERSION - ASSERT_GE(PULSAR_VERSION, 2000000); - ASSERT_LE(PULSAR_VERSION, 999999999); -#else - FAIL(); -#endif -} diff --git a/pulsar-client-cpp/tests/WaitUtils.h b/pulsar-client-cpp/tests/WaitUtils.h deleted file mode 100644 index abe3efccff488..0000000000000 --- a/pulsar-client-cpp/tests/WaitUtils.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#pragma once - -#include -#include -#include - -namespace pulsar { - -template -inline void waitUntil(std::chrono::duration timeout, std::function condition) { - auto timeoutMs = std::chrono::duration_cast(timeout).count(); - while (timeoutMs > 0) { - auto now = std::chrono::high_resolution_clock::now(); - if (condition()) { - break; - } - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - auto elapsed = std::chrono::duration_cast( - std::chrono::high_resolution_clock::now() - now) - .count(); - timeoutMs -= elapsed; - } -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/tests/ZLibCompressionTest.cc b/pulsar-client-cpp/tests/ZLibCompressionTest.cc deleted file mode 100644 index c510db159fc9d..0000000000000 --- a/pulsar-client-cpp/tests/ZLibCompressionTest.cc +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -using namespace pulsar; - -TEST(ZLibCompressionTest, compressDecompress) { - CompressionCodecZLib codec; - - std::string payload = "Payload to compress"; - SharedBuffer compressed = codec.encode(SharedBuffer::copy(payload.c_str(), payload.size())); - - SharedBuffer uncompressed; - bool res = codec.decode(compressed, payload.size(), uncompressed); - ASSERT_TRUE(res); - ASSERT_EQ(payload, std::string(uncompressed.data(), uncompressed.readableBytes())); -} - -// Java and C++ are using different ZLib settings when compressing, so the resulting -// compressed blobs are slightly different. Both should lead to the same result when -// decompressing -TEST(ZLibCompressionTest, decodeCppCompressed) { - CompressionCodecZLib codec; - - const uint8_t compressed[] = {0x78, 0x9c, 0x63, 0x60, 0x80, 0x01, 0x00, 0x00, 0x0a, 0x00, 0x01}; - - SharedBuffer uncompressed; - uint32_t uncompressedSize = 10; - - bool res = codec.decode(SharedBuffer::copy((const char*)compressed, sizeof(compressed)), uncompressedSize, - uncompressed); - ASSERT_TRUE(res); - ASSERT_EQ(uncompressedSize, uncompressed.readableBytes()); -} - -TEST(ZLibCompressionTest, decodeJavaCompressed) { - CompressionCodecZLib codec; - - const uint8_t compressed[] = {0x78, 0x9c, 0x62, 0x60, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff}; - - SharedBuffer uncompressed; - uint32_t uncompressedSize = 10; - - bool res = codec.decode(SharedBuffer::copy((const char*)compressed, sizeof(compressed)), uncompressedSize, - uncompressed); - ASSERT_TRUE(res); - ASSERT_EQ(uncompressedSize, uncompressed.readableBytes()); -} diff --git a/pulsar-client-cpp/tests/ZTSClientTest.cc b/pulsar-client-cpp/tests/ZTSClientTest.cc deleted file mode 100644 index b338e79555166..0000000000000 --- a/pulsar-client-cpp/tests/ZTSClientTest.cc +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "lib/auth/athenz/ZTSClient.h" -#include - -using namespace pulsar; - -namespace pulsar { - -class ZTSClientWrapper { - public: - static PrivateKeyUri parseUri(const char* uri) { return ZTSClient::parseUri(uri); } -}; -} // namespace pulsar - -TEST(ZTSClientTest, testZTSClient) { - { - PrivateKeyUri uri = ZTSClientWrapper::parseUri("file:/path/to/private.key"); - ASSERT_EQ("file", uri.scheme); - ASSERT_EQ("/path/to/private.key", uri.path); - } - - { - PrivateKeyUri uri = ZTSClientWrapper::parseUri("file:///path/to/private.key"); - ASSERT_EQ("file", uri.scheme); - ASSERT_EQ("/path/to/private.key", uri.path); - } - - { - PrivateKeyUri uri = ZTSClientWrapper::parseUri("file:./path/to/private.key"); - ASSERT_EQ("file", uri.scheme); - ASSERT_EQ("./path/to/private.key", uri.path); - } - - { - PrivateKeyUri uri = ZTSClientWrapper::parseUri("file://./path/to/private.key"); - ASSERT_EQ("file", uri.scheme); - ASSERT_EQ("./path/to/private.key", uri.path); - } - - { - PrivateKeyUri uri = ZTSClientWrapper::parseUri("data:application/x-pem-file;base64,SGVsbG8gV29ybGQK"); - ASSERT_EQ("data", uri.scheme); - ASSERT_EQ("application/x-pem-file;base64", uri.mediaTypeAndEncodingType); - ASSERT_EQ("SGVsbG8gV29ybGQK", uri.data); - } - - { - PrivateKeyUri uri = ZTSClientWrapper::parseUri(""); - ASSERT_EQ("", uri.scheme); - ASSERT_EQ("", uri.path); - ASSERT_EQ("", uri.mediaTypeAndEncodingType); - ASSERT_EQ("", uri.data); - } - - { - PrivateKeyUri uri = ZTSClientWrapper::parseUri("/path/to/private.key"); - ASSERT_EQ("", uri.scheme); - ASSERT_EQ("", uri.path); - ASSERT_EQ("", uri.mediaTypeAndEncodingType); - ASSERT_EQ("", uri.data); - } -} diff --git a/pulsar-client-cpp/tests/ZeroQueueSizeTest.cc b/pulsar-client-cpp/tests/ZeroQueueSizeTest.cc deleted file mode 100644 index 59780fed3a8ee..0000000000000 --- a/pulsar-client-cpp/tests/ZeroQueueSizeTest.cc +++ /dev/null @@ -1,286 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include "ConsumerTest.h" -#include -#include -#include -#include -#include -#include - -DECLARE_LOG_OBJECT() - -using namespace pulsar; - -static int totalMessages = 10; -static int globalCount = 0; -static std::string lookupUrl = "pulsar://localhost:6650"; -static std::string contentBase = "msg-"; - -static void messageListenerFunction(Consumer consumer, const Message& msg, Latch& latch) { - ASSERT_EQ(0, ConsumerTest::getNumOfMessagesInQueue(consumer)); - std::ostringstream ss; - ss << contentBase << globalCount; - ASSERT_EQ(ss.str(), msg.getDataAsString()); - globalCount++; - latch.countdown(); - ASSERT_EQ(0, ConsumerTest::getNumOfMessagesInQueue(consumer)); -} - -TEST(ZeroQueueSizeTest, testProduceConsume) { - Client client(lookupUrl); - std::string topicName = "zero-queue-size"; - std::string subName = "my-sub-name"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consConfig; - consConfig.setReceiverQueueSize(0); - result = client.subscribe(topicName, subName, consConfig, consumer); - ASSERT_EQ(ResultOk, result); - - for (int i = 0; i < totalMessages; i++) { - std::ostringstream ss; - ss << contentBase << i; - Message msg = MessageBuilder().setContent(ss.str()).build(); - result = producer.send(msg); - ASSERT_EQ(ResultOk, result); - } - - for (int i = 0; i < totalMessages; i++) { - ASSERT_EQ(0, ConsumerTest::getNumOfMessagesInQueue(consumer)); - std::ostringstream ss; - ss << contentBase << i; - Message receivedMsg; - consumer.receive(receivedMsg); - ASSERT_EQ(ss.str(), receivedMsg.getDataAsString()); - ASSERT_EQ(0, ConsumerTest::getNumOfMessagesInQueue(consumer)); - } - - consumer.unsubscribe(); - consumer.close(); - producer.close(); - client.close(); -} - -TEST(ZeroQueueSizeTest, testMessageListener) { - Client client(lookupUrl); - std::string topicName = "zero-queue-size-listener"; - std::string subName = "my-sub-name"; - - Producer producer; - Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultOk, result); - - Consumer consumer; - ConsumerConfiguration consConfig; - consConfig.setReceiverQueueSize(0); - Latch latch(totalMessages); - consConfig.setMessageListener( - std::bind(messageListenerFunction, std::placeholders::_1, std::placeholders::_2, latch)); - result = client.subscribe(topicName, subName, consConfig, consumer); - ASSERT_EQ(ResultOk, result); - - globalCount = 0; - - for (int i = 0; i < totalMessages; i++) { - std::ostringstream ss; - ss << contentBase << i; - Message msg = MessageBuilder().setContent(ss.str()).build(); - result = producer.send(msg); - ASSERT_EQ(ResultOk, result); - } - - ASSERT_TRUE(latch.wait(std::chrono::seconds(30))); - ASSERT_EQ(globalCount, totalMessages); - - consumer.unsubscribe(); - consumer.close(); - producer.close(); - client.close(); -} - -static ConsumerConfiguration zeroQueueSharedConsumerConf( - const std::string& name, std::function callback) { - ConsumerConfiguration conf; - conf.setConsumerType(ConsumerShared); - conf.setReceiverQueueSize(0); - conf.setSubscriptionInitialPosition(InitialPositionEarliest); - conf.setMessageListener([name, callback](Consumer consumer, const Message& msg) { - LOG_INFO(name << " received " << msg.getDataAsString() << " from " << msg.getMessageId()); - callback(consumer, msg); - }); - return conf; -} - -class IntVector { - public: - size_t add(int i) { - std::lock_guard lock(mutex_); - data_.emplace_back(i); - return data_.size(); - } - - std::vector data() const { - std::lock_guard lock(mutex_); - return data_; - } - - private: - std::vector data_; - mutable std::mutex mutex_; -}; - -TEST(ZeroQueueSizeTest, testPauseResume) { - Client client(lookupUrl); - const auto topic = "ZeroQueueSizeTestPauseListener-" + std::to_string(time(nullptr)); - const auto subscription = "my-sub"; - - auto intToMessage = [](int i) { return MessageBuilder().setContent(std::to_string(i)).build(); }; - auto messageToInt = [](const Message& msg) { return std::stoi(msg.getDataAsString()); }; - - // 1. Produce 10 messages - Producer producer; - const auto producerConf = ProducerConfiguration().setBatchingEnabled(false); - ASSERT_EQ(ResultOk, client.createProducer(topic, producerConf, producer)); - for (int i = 0; i < 10; i++) { - MessageId id; - ASSERT_EQ(ResultOk, producer.send(intToMessage(i), id)); - LOG_INFO("Send " << i << " to " << id); - } - - // 2. consumer-1 receives 1 message and pause - std::mutex mtx; - std::condition_variable condConsumer1FirstMessage; - std::condition_variable condConsumer1Completed; - IntVector messages1; - const auto conf1 = zeroQueueSharedConsumerConf("consumer-1", [&](Consumer consumer, const Message& msg) { - const auto numReceived = messages1.add(messageToInt(msg)); - if (numReceived == 1) { - ASSERT_EQ(ResultOk, consumer.pauseMessageListener()); - condConsumer1FirstMessage.notify_all(); - } else if (numReceived == 5) { - ASSERT_EQ(ResultOk, consumer.pauseMessageListener()); - condConsumer1Completed.notify_all(); - } - }); - Consumer consumer1; - ASSERT_EQ(ResultOk, client.subscribe(topic, subscription, conf1, consumer1)); - { - std::unique_lock lock(mtx); - ASSERT_EQ(condConsumer1FirstMessage.wait_for(lock, std::chrono::seconds(3)), - std::cv_status::no_timeout); - ASSERT_EQ(messages1.data(), (std::vector{0})); - } - - // 3. consumer-2 receives 5 messages and pause - std::condition_variable condConsumer2Completed; - IntVector messages2; - const auto conf2 = zeroQueueSharedConsumerConf("consumer-2", [&](Consumer consumer, const Message& msg) { - const int numReceived = messages2.add(messageToInt(msg)); - if (numReceived == 5) { - ASSERT_EQ(ResultOk, consumer.pauseMessageListener()); - condConsumer2Completed.notify_all(); - } - }); - Consumer consumer2; - ASSERT_EQ(ResultOk, client.subscribe(topic, subscription, conf2, consumer2)); - { - std::unique_lock lock(mtx); - ASSERT_EQ(condConsumer2Completed.wait_for(lock, std::chrono::seconds(3)), std::cv_status::no_timeout); - ASSERT_EQ(messages2.data(), (std::vector{1, 2, 3, 4, 5})); - } - - // 4. consumer-1 resumes listening, and receives last 4 messages - ASSERT_EQ(ResultOk, consumer1.resumeMessageListener()); - { - std::unique_lock lock(mtx); - ASSERT_EQ(condConsumer1Completed.wait_for(lock, std::chrono::seconds(3)), std::cv_status::no_timeout); - ASSERT_EQ(messages1.data(), (std::vector{0, 6, 7, 8, 9})); - } - - client.close(); -} - -TEST(ZeroQueueSizeTest, testPauseResumeNoReconnection) { - Client client(lookupUrl); - const auto topic = "ZeroQueueSizeTestPauseResumeNoReconnection-" + std::to_string(time(nullptr)); - - std::mutex mtx; - std::condition_variable cond; - bool running = true; - - auto notify = [&mtx, &cond, &running] { - std::unique_lock lock(mtx); - running = false; - cond.notify_all(); - }; - auto wait = [&mtx, &cond, &running] { - std::unique_lock lock(mtx); - running = true; - while (running) { - cond.wait(lock); - } - }; - - std::mutex mtxForMessages; - std::vector receivedMessages; - - ConsumerConfiguration consumerConf; - consumerConf.setReceiverQueueSize(0); - consumerConf.setMessageListener( - [&mtxForMessages, &receivedMessages, ¬ify](Consumer consumer, const Message& msg) { - std::unique_lock lock(mtxForMessages); - receivedMessages.emplace_back(msg.getDataAsString()); - lock.unlock(); - consumer.acknowledge(msg); - notify(); // notify the consumer that a new message arrived - }); - - Consumer consumer; - ASSERT_EQ(ResultOk, client.subscribe(topic, "my-sub", consumerConf, consumer)); - - Producer producer; - ASSERT_EQ(ResultOk, - client.createProducer(topic, ProducerConfiguration().setBatchingEnabled(false), producer)); - - constexpr int numMessages = 300; - for (int i = 0; i < numMessages; i++) { - const auto message = MessageBuilder().setContent(std::to_string(i)).build(); - consumer.resumeMessageListener(); - producer.sendAsync(message, {}); - wait(); // wait until a new message is received - consumer.pauseMessageListener(); - } - - std::unique_lock lock(mtxForMessages); - ASSERT_EQ(receivedMessages.size(), numMessages); - for (int i = 0; i < numMessages; i++) { - ASSERT_EQ(i, std::stoi(receivedMessages[i])); - } - lock.unlock(); - - client.close(); -} diff --git a/pulsar-client-cpp/tests/authentication.conf b/pulsar-client-cpp/tests/authentication.conf deleted file mode 100644 index 6e01a1500b8b8..0000000000000 --- a/pulsar-client-cpp/tests/authentication.conf +++ /dev/null @@ -1,288 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -### --- General broker settings --- ### - -# Zookeeper quorum connection string -zookeeperServers= - -# Configuration Store connection string -configurationStoreServers= - -brokerServicePort=9885 -brokerServicePortTls=9886 - -# Port to use to server HTTP request -webServicePort=9765 -webServicePortTls=9766 - -# Hostname or IP address the service binds on, default is 0.0.0.0. -bindAddress=0.0.0.0 - -# Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getCanonicalHostName() is used. -advertisedAddress=localhost - -# Name of the cluster to which this broker belongs to -clusterName=cluster - -# Zookeeper session timeout in milliseconds -zooKeeperSessionTimeoutMillis=30000 - -# Time to wait for broker graceful shutdown. After this time elapses, the process will be killed -brokerShutdownTimeoutMs=3000 - -# Enable backlog quota check. Enforces action on topic when the quota is reached -backlogQuotaCheckEnabled=true - -# How often to check for topics that have reached the quota -backlogQuotaCheckIntervalInSeconds=60 - -# Default per-topic backlog quota limit -backlogQuotaDefaultLimitGB=10 - -# Enable the deletion of inactive topics -brokerDeleteInactiveTopicsEnabled=true - -# How often to check for inactive topics -brokerDeleteInactiveTopicsFrequencySeconds=60 - -# How frequently to proactively check and purge expired messages -messageExpiryCheckIntervalInMinutes=5 - -# Enable check for minimum allowed client library version -clientLibraryVersionCheckEnabled=false - -# Allow client libraries with no version information -clientLibraryVersionCheckAllowUnversioned=true - -# Path for the file used to determine the rotation status for the broker when responding -# to service discovery health checks -statusFilePath=/usr/local/apache/htdocs - -# Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending -# messages to consumer once, this limit reaches until consumer starts acknowledging messages back -# Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restriction -maxUnackedMessagesPerConsumer=50000 - -### --- Authentication --- ### - -# Enable TLS -tlsEnabled=true -tlsCertificateFilePath=./pulsar-broker/src/test/resources/authentication/tls/broker-cert.pem -tlsKeyFilePath=./pulsar-broker/src/test/resources/authentication/tls/broker-key.pem -tlsTrustCertsFilePath=./pulsar-broker/src/test/resources/authentication/tls/cacert.pem -tlsAllowInsecureConnection=true - -# Enable authentication -authenticationEnabled=true - -# Authentication provider name list, which is comma separated list of class names -authenticationProviders=org.apache.pulsar.broker.authentication.AuthenticationProviderTls - -# Enforce authorization -authorizationEnabled=true - -# Role names that are treated as "super-user", meaning they will be able to do all admin -# operations and publish/consume from all topics -superUserRoles=localhost,superUser - -# Authentication settings of the broker itself. Used when the broker connects to other brokers, -# either in same or other clusters -brokerClientAuthenticationPlugin= -brokerClientAuthenticationParameters= - -### --- BookKeeper Client --- ### - -# Authentication plugin to use when connecting to bookies -bookkeeperClientAuthenticationPlugin= - -# BookKeeper auth plugin implementatation specifics parameters name and values -bookkeeperClientAuthenticationParametersName= -bookkeeperClientAuthenticationParameters= - -# Timeout for BK add / read operations -bookkeeperClientTimeoutInSeconds=30 - -# Speculative reads are initiated if a read request doesn't complete within a certain time -# Using a value of 0, is disabling the speculative reads -bookkeeperClientSpeculativeReadTimeoutInMillis=0 - -# Enable bookies health check. Bookies that have more than the configured number of failure within -# the interval will be quarantined for some time. During this period, new ledgers won't be created -# on these bookies -bookkeeperClientHealthCheckEnabled=true -bookkeeperClientHealthCheckIntervalSeconds=60 -bookkeeperClientHealthCheckErrorThresholdPerInterval=5 -bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800 - -# Enable rack-aware bookie selection policy. BK will chose bookies from different racks when -# forming a new bookie ensemble -bookkeeperClientRackawarePolicyEnabled=true - -# Enable region-aware bookie selection policy. BK will chose bookies from -# different regions and racks when forming a new bookie ensemble -# If enabled, the value of bookkeeperClientRackawarePolicyEnabled is ignored -bookkeeperClientRegionawarePolicyEnabled=false - -# Minimum number of racks per write quorum. BK rack-aware bookie selection policy will try to -# get bookies from at least 'bookkeeperClientMinNumRacksPerWriteQuorum' racks for a write quorum. -bookkeeperClientMinNumRacksPerWriteQuorum=1 - -# Enforces rack-aware bookie selection policy to pick bookies from 'bookkeeperClientMinNumRacksPerWriteQuorum' -# racks for a writeQuorum. -# If BK can't find bookie then it would throw BKNotEnoughBookiesException instead of picking random one. -bookkeeperClientEnforceMinNumRacksPerWriteQuorum=false - -# Enable/disable reordering read sequence on reading entries. -bookkeeperClientReorderReadSequenceEnabled=false - -# Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie -# outside the specified groups will not be used by the broker -bookkeeperClientIsolationGroups= - -### --- Managed Ledger --- ### - -# Number of bookies to use when creating a ledger -managedLedgerDefaultEnsembleSize=1 - -# Number of copies to store for each message -managedLedgerDefaultWriteQuorum=1 - -# Number of guaranteed copies (acks to wait before write is complete) -managedLedgerDefaultAckQuorum=1 - -# Amount of memory to use for caching data payload in managed ledger. This memory -# is allocated from JVM direct memory and it's shared across all the topics -# running in the same broker -managedLedgerCacheSizeMB=1024 - -# Threshold to which bring down the cache level when eviction is triggered -managedLedgerCacheEvictionWatermark=0.9 - -# Rate limit the amount of writes generated by consumer acking the messages -managedLedgerDefaultMarkDeleteRateLimit=0.1 - -# Max number of entries to append to a ledger before triggering a rollover -# A ledger rollover is triggered after the min rollover time has passed -# and one of the following conditions is true: -# * The max rollover time has been reached -# * The max entries have been written to the ledger -# * The max ledger size has been written to the ledger -managedLedgerMaxEntriesPerLedger=50000 - -# Minimum time between ledger rollover for a topic -managedLedgerMinLedgerRolloverTimeMinutes=10 - -# Maximum time before forcing a ledger rollover for a topic -managedLedgerMaxLedgerRolloverTimeMinutes=240 - -# Max number of entries to append to a cursor ledger -managedLedgerCursorMaxEntriesPerLedger=50000 - -# Max time before triggering a rollover on a cursor ledger -managedLedgerCursorRolloverTimeInSeconds=14400 - - - -### --- Load balancer --- ### - -# Enable load balancer -loadBalancerEnabled=false - -# Strategy to assign a new bundle -loadBalancerPlacementStrategy=weightedRandomSelection - -# Percentage of change to trigger load report update -loadBalancerReportUpdateThresholdPercentage=10 - -# maximum interval to update load report -loadBalancerReportUpdateMaxIntervalMinutes=15 - -# Frequency of report to collect -loadBalancerHostUsageCheckIntervalMinutes=1 - -# Load shedding interval. Broker periodically checks whether some traffic should be offload from -# some over-loaded broker to other under-loaded brokers -loadBalancerSheddingIntervalMinutes=30 - -# Prevent the same topics to be shed and moved to other broker more than once within this timeframe -loadBalancerSheddingGracePeriodMinutes=30 - -# Usage threshold to determine a broker as under-loaded -loadBalancerBrokerUnderloadedThresholdPercentage=1 - -# Usage threshold to determine a broker as over-loaded -loadBalancerBrokerOverloadedThresholdPercentage=85 - -# Interval to update namespace bundle resource quota -loadBalancerResourceQuotaUpdateIntervalMinutes=15 - -# Usage threshold to determine a broker is having just right level of load -loadBalancerBrokerComfortLoadLevelPercentage=65 - -# enable/disable namespace bundle auto split -loadBalancerAutoBundleSplitEnabled=false - -# interval to detect & split hot namespace bundle -loadBalancerNamespaceBundleSplitIntervalMinutes=15 - -# maximum topics in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxTopics=1000 - -# maximum sessions (producers + consumers) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxSessions=1000 - -# maximum msgRate (in + out) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxMsgRate=1000 - -# maximum bandwidth (in + out) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxBandwidthMbytes=100 - -# maximum number of bundles in a namespace -loadBalancerNamespaceMaximumBundles=128 - -### --- Replication --- ### - -# Enable replication metrics -replicationMetricsEnabled=true - -# Max number of connections to open for each broker in a remote cluster -# More connections host-to-host lead to better throughput over high-latency -# links. -replicationConnectionsPerBroker=16 - -# Replicator producer queue size -replicationProducerQueueSize=1000 - -# Default message retention time. 0 means retention is disabled. -1 means data is not removed by time quota -defaultRetentionTimeInMinutes=0 - -# Default retention size. 0 means retention is disabled. -1 means data is not removed by size quota -defaultRetentionSizeInMB=0 - -# How often to check whether the connections are still alive -keepAliveIntervalSeconds=30 - -### --- Deprecated config variables --- ### - -# Deprecated. Use configurationStoreServers -globalZookeeperServers= - -# Deprecated. Use brokerDeleteInactiveTopicsFrequencySeconds -brokerServicePurgeInactiveFrequencyInSeconds=60 diff --git a/pulsar-client-cpp/tests/c/c_BasicEndToEndTest.cc b/pulsar-client-cpp/tests/c/c_BasicEndToEndTest.cc deleted file mode 100644 index ae01befe953d6..0000000000000 --- a/pulsar-client-cpp/tests/c/c_BasicEndToEndTest.cc +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include -#include - -#include -#include - -struct send_ctx { - pulsar_result result; - char *msg_id; - std::promise *promise; -}; - -struct receive_ctx { - pulsar_result result; - pulsar_consumer_t *consumer; - char *data; - std::promise *promise; -}; - -static void send_callback(pulsar_result async_result, pulsar_message_id_t *msg_id, void *ctx) { - struct send_ctx *send_ctx = (struct send_ctx *)ctx; - send_ctx->result = async_result; - if (async_result == pulsar_result_Ok) { - const char *msg_id_str = pulsar_message_id_str(msg_id); - send_ctx->msg_id = (char *)malloc(strlen(msg_id_str) * sizeof(char)); - strcpy(send_ctx->msg_id, msg_id_str); - } - send_ctx->promise->set_value(); - pulsar_message_id_free(msg_id); -} - -static void receive_callback(pulsar_result async_result, pulsar_message_t *msg, void *ctx) { - struct receive_ctx *receive_ctx = (struct receive_ctx *)ctx; - receive_ctx->result = async_result; - if (async_result == pulsar_result_Ok && - pulsar_consumer_acknowledge(receive_ctx->consumer, msg) == pulsar_result_Ok) { - const char *data = (const char *)pulsar_message_get_data(msg); - receive_ctx->data = (char *)malloc(strlen(data) * sizeof(char)); - strcpy(receive_ctx->data, data); - } - receive_ctx->promise->set_value(); - pulsar_message_free(msg); -} - -TEST(c_BasicEndToEndTest, testAsyncProduceConsume) { - const char *lookup_url = "pulsar://localhost:6650"; - const char *topic_name = "persistent://public/default/test-c-produce-consume"; - const char *sub_name = "my-sub-name"; - - pulsar_client_configuration_t *conf = pulsar_client_configuration_create(); - pulsar_client_t *client = pulsar_client_create(lookup_url, conf); - - pulsar_producer_configuration_t *producer_conf = pulsar_producer_configuration_create(); - pulsar_producer_t *producer; - pulsar_result result = pulsar_client_create_producer(client, topic_name, producer_conf, &producer); - ASSERT_EQ(pulsar_result_Ok, result); - - pulsar_consumer_configuration_t *consumer_conf = pulsar_consumer_configuration_create(); - pulsar_consumer_t *consumer; - result = pulsar_client_subscribe(client, topic_name, sub_name, consumer_conf, &consumer); - ASSERT_EQ(pulsar_result_Ok, result); - - ASSERT_STREQ(topic_name, pulsar_producer_get_topic(producer)); - ASSERT_STREQ(topic_name, pulsar_consumer_get_topic(consumer)); - ASSERT_STREQ(sub_name, pulsar_consumer_get_subscription_name(consumer)); - - // send asynchronously - std::promise send_promise; - std::future send_future = send_promise.get_future(); - struct send_ctx send_ctx = {pulsar_result_UnknownError, NULL, &send_promise}; - const char *content = "msg-1-content"; - pulsar_message_t *msg = pulsar_message_create(); - pulsar_message_set_content(msg, content, strlen(content)); - ASSERT_STREQ("(-1,-1,-1,-1)", pulsar_message_id_str(pulsar_message_get_message_id(msg))); - pulsar_producer_send_async(producer, msg, send_callback, &send_ctx); - send_future.get(); - ASSERT_EQ(pulsar_result_Ok, send_ctx.result); - ASSERT_STRNE("(-1,-1,-1,-1)", send_ctx.msg_id); - delete send_ctx.msg_id; - - // receive asynchronously - std::promise receive_promise; - std::future receive_future = receive_promise.get_future(); - struct receive_ctx receive_ctx = {pulsar_result_UnknownError, consumer, NULL, &receive_promise}; - pulsar_consumer_receive_async(consumer, receive_callback, &receive_ctx); - receive_future.get(); - ASSERT_EQ(pulsar_result_Ok, receive_ctx.result); - ASSERT_STREQ(content, receive_ctx.data); - delete receive_ctx.data; - - ASSERT_EQ(pulsar_result_Ok, pulsar_consumer_unsubscribe(consumer)); - ASSERT_EQ(pulsar_result_AlreadyClosed, pulsar_consumer_close(consumer)); - ASSERT_EQ(pulsar_result_Ok, pulsar_producer_close(producer)); - ASSERT_EQ(pulsar_result_Ok, pulsar_client_close(client)); - - pulsar_consumer_free(consumer); - pulsar_consumer_configuration_free(consumer_conf); - pulsar_producer_free(producer); - pulsar_producer_configuration_free(producer_conf); - pulsar_client_free(client); - pulsar_client_configuration_free(conf); -} diff --git a/pulsar-client-cpp/tests/c/c_ConsumerConfigurationTest.cc b/pulsar-client-cpp/tests/c/c_ConsumerConfigurationTest.cc deleted file mode 100644 index f2bea959e9454..0000000000000 --- a/pulsar-client-cpp/tests/c/c_ConsumerConfigurationTest.cc +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -TEST(C_ConsumerConfigurationTest, testCApiConfig) { - pulsar_consumer_configuration_t *consumer_conf = pulsar_consumer_configuration_create(); - - ASSERT_EQ(pulsar_consumer_configuration_get_max_pending_chunked_message(consumer_conf), 10); - pulsar_consumer_configuration_set_max_pending_chunked_message(consumer_conf, 100); - ASSERT_EQ(pulsar_consumer_configuration_get_max_pending_chunked_message(consumer_conf), 100); - - ASSERT_EQ(pulsar_consumer_configuration_is_auto_ack_oldest_chunked_message_on_queue_full(consumer_conf), - 0); - pulsar_consumer_configuration_set_auto_ack_oldest_chunked_message_on_queue_full(consumer_conf, 1); - ASSERT_EQ(pulsar_consumer_configuration_is_auto_ack_oldest_chunked_message_on_queue_full(consumer_conf), - 1); -} diff --git a/pulsar-client-cpp/tests/c/c_ProducerConfigurationTest.cc b/pulsar-client-cpp/tests/c/c_ProducerConfigurationTest.cc deleted file mode 100644 index 507b3a91cfce3..0000000000000 --- a/pulsar-client-cpp/tests/c/c_ProducerConfigurationTest.cc +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -TEST(C_ProducerConfigurationTest, testCApiConfig) { - pulsar_producer_configuration_t *producer_conf = pulsar_producer_configuration_create(); - - ASSERT_EQ(pulsar_producer_configuration_is_chunking_enabled(producer_conf), 0); - pulsar_producer_configuration_set_chunking_enabled(producer_conf, 1); - ASSERT_EQ(pulsar_producer_configuration_is_chunking_enabled(producer_conf), 1); -} diff --git a/pulsar-client-cpp/tests/client.conf b/pulsar-client-cpp/tests/client.conf deleted file mode 100644 index 3fcfd49612e0c..0000000000000 --- a/pulsar-client-cpp/tests/client.conf +++ /dev/null @@ -1,27 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Pulsar Client configuration -webServiceUrl=https://localhost:9766/ -brokerServiceUrl=pulsar+ssl://localhost:9886/ -useTls=true -tlsAllowInsecureConnection=true -tlsTrustCertsFilePath=./pulsar-broker/src/test/resources/authentication/tls/cacert.pem -authPlugin=org.apache.pulsar.client.impl.auth.AuthenticationTls -authParams=tlsCertFile:./pulsar-broker/src/test/resources/authentication/tls/client-cert.pem,tlsKeyFile:./pulsar-broker/src/test/resources/authentication/tls/client-key.pem diff --git a/pulsar-client-cpp/tests/main.cc b/pulsar-client-cpp/tests/main.cc deleted file mode 100644 index 06599258f10da..0000000000000 --- a/pulsar-client-cpp/tests/main.cc +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include - -int main(int argc, char **argv) { - ::testing::InitGoogleMock(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/pulsar-client-cpp/tests/mocks/GMockMessage.h b/pulsar-client-cpp/tests/mocks/GMockMessage.h deleted file mode 100644 index b2ff65f75aff7..0000000000000 --- a/pulsar-client-cpp/tests/mocks/GMockMessage.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef MOCK_MESSAGE_HPP_ -#define MOCK_MESSAGE_HPP_ - -#include -#include - -namespace pulsar { -// TODO: For the mock tests, we need to make all methods and destructor virtual in Message class -class GMockMessage : public Message { - public: - MOCK_CONST_METHOD0(hasPartitionKey, bool()); - - MOCK_CONST_METHOD0(getPartitionKey, const std::string&()); -}; -} // namespace pulsar - -#endif // MOCK_MESSAGE_HPP_ diff --git a/pulsar-client-cpp/tests/standalone.conf b/pulsar-client-cpp/tests/standalone.conf deleted file mode 100644 index 3d604063039e5..0000000000000 --- a/pulsar-client-cpp/tests/standalone.conf +++ /dev/null @@ -1,289 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -### --- General broker settings --- ### - -# Zookeeper quorum connection string -zookeeperServers= - -# Configuration Store connection string -configurationStoreServers= - -brokerServicePort=8885 - -# Port to use to server HTTP request -webServicePort=8765 - -# Hostname or IP address the service binds on, default is 0.0.0.0. -bindAddress=0.0.0.0 - -# Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getCanonicalHostName() is used. -advertisedAddress=localhost - -# Name of the cluster to which this broker belongs to -clusterName=standalone - -# Zookeeper session timeout in milliseconds -zooKeeperSessionTimeoutMillis=30000 - -# Time to wait for broker graceful shutdown. After this time elapses, the process will be killed -brokerShutdownTimeoutMs=3000 - -# Enable backlog quota check. Enforces action on topic when the quota is reached -backlogQuotaCheckEnabled=true - -# How often to check for topics that have reached the quota -backlogQuotaCheckIntervalInSeconds=60 - -# Default per-topic backlog quota limit -backlogQuotaDefaultLimitGB=10 - -# Enable the deletion of inactive topics -brokerDeleteInactiveTopicsEnabled=true - -# How often to check for inactive topics -brokerDeleteInactiveTopicsFrequencySeconds=60 - -# How frequently to proactively check and purge expired messages -messageExpiryCheckIntervalInMinutes=5 - -# Enable check for minimum allowed client library version -clientLibraryVersionCheckEnabled=false - -# Allow client libraries with no version information -clientLibraryVersionCheckAllowUnversioned=true - -# Path for the file used to determine the rotation status for the broker when responding -# to service discovery health checks -statusFilePath=/usr/local/apache/htdocs - -# Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending -# messages to consumer once, this limit reaches until consumer starts acknowledging messages back -# Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restriction -maxUnackedMessagesPerConsumer=50000 - -### --- Authentication --- ### - -# Enable authentication -authenticationEnabled=false - -# Authentication provider name list, which is comma separated list of class names -authenticationProviders=false - -# Enforce authorization -authorizationEnabled=false - -# Role names that are treated as "super-user", meaning they will be able to do all admin -# operations and publish/consume from all topics -superUserRoles= - -# Authentication settings of the broker itself. Used when the broker connects to other brokers, -# either in same or other clusters -brokerClientAuthenticationPlugin= -brokerClientAuthenticationParameters= - - -### --- BookKeeper Client --- ### - -# Authentication plugin to use when connecting to bookies -bookkeeperClientAuthenticationPlugin= - -# BookKeeper auth plugin implementatation specifics parameters name and values -bookkeeperClientAuthenticationParametersName= -bookkeeperClientAuthenticationParameters= - -# Timeout for BK add / read operations -bookkeeperClientTimeoutInSeconds=30 - -# Speculative reads are initiated if a read request doesn't complete within a certain time -# Using a value of 0, is disabling the speculative reads -bookkeeperClientSpeculativeReadTimeoutInMillis=0 - -# Enable bookies health check. Bookies that have more than the configured number of failure within -# the interval will be quarantined for some time. During this period, new ledgers won't be created -# on these bookies -bookkeeperClientHealthCheckEnabled=true -bookkeeperClientHealthCheckIntervalSeconds=60 -bookkeeperClientHealthCheckErrorThresholdPerInterval=5 -bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800 - -# Enable rack-aware bookie selection policy. BK will chose bookies from different racks when -# forming a new bookie ensemble -bookkeeperClientRackawarePolicyEnabled=true - -# Enable region-aware bookie selection policy. BK will chose bookies from -# different regions and racks when forming a new bookie ensemble -# If enabled, the value of bookkeeperClientRackawarePolicyEnabled is ignored -bookkeeperClientRegionawarePolicyEnabled=false - -# Minimum number of racks per write quorum. BK rack-aware bookie selection policy will try to -# get bookies from at least 'bookkeeperClientMinNumRacksPerWriteQuorum' racks for a write quorum. -bookkeeperClientMinNumRacksPerWriteQuorum=1 - -# Enforces rack-aware bookie selection policy to pick bookies from 'bookkeeperClientMinNumRacksPerWriteQuorum' -# racks for a writeQuorum. -# If BK can't find bookie then it would throw BKNotEnoughBookiesException instead of picking random one. -bookkeeperClientEnforceMinNumRacksPerWriteQuorum=false - -# Enable/disable reordering read sequence on reading entries. -bookkeeperClientReorderReadSequenceEnabled=false - -# Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie -# outside the specified groups will not be used by the broker -bookkeeperClientIsolationGroups= - -### --- Managed Ledger --- ### - -# Number of bookies to use when creating a ledger -managedLedgerDefaultEnsembleSize=1 - -# Number of copies to store for each message -managedLedgerDefaultWriteQuorum=1 - -# Number of guaranteed copies (acks to wait before write is complete) -managedLedgerDefaultAckQuorum=1 - -# Amount of memory to use for caching data payload in managed ledger. This memory -# is allocated from JVM direct memory and it's shared across all the topics -# running in the same broker -managedLedgerCacheSizeMB=1024 - -# Threshold to which bring down the cache level when eviction is triggered -managedLedgerCacheEvictionWatermark=0.9 - -# Rate limit the amount of writes generated by consumer acking the messages -managedLedgerDefaultMarkDeleteRateLimit=0.1 - -# Max number of entries to append to a ledger before triggering a rollover -# A ledger rollover is triggered after the min rollover time has passed -# and one of the following conditions is true: -# * The max rollover time has been reached -# * The max entries have been written to the ledger -# * The max ledger size has been written to the ledger -managedLedgerMaxEntriesPerLedger=50000 - -# Minimum time between ledger rollover for a topic -managedLedgerMinLedgerRolloverTimeMinutes=10 - -# Maximum time before forcing a ledger rollover for a topic -managedLedgerMaxLedgerRolloverTimeMinutes=240 - -# Max number of entries to append to a cursor ledger -managedLedgerCursorMaxEntriesPerLedger=50000 - -# Max time before triggering a rollover on a cursor ledger -managedLedgerCursorRolloverTimeInSeconds=14400 - - - -### --- Load balancer --- ### - -# Enable load balancer -loadBalancerEnabled=false - -# Strategy to assign a new bundle -loadBalancerPlacementStrategy=weightedRandomSelection - -# Percentage of change to trigger load report update -loadBalancerReportUpdateThresholdPercentage=10 - -# maximum interval to update load report -loadBalancerReportUpdateMaxIntervalMinutes=15 - -# Frequency of report to collect -loadBalancerHostUsageCheckIntervalMinutes=1 - -# Load shedding interval. Broker periodically checks whether some traffic should be offload from -# some over-loaded broker to other under-loaded brokers -loadBalancerSheddingIntervalMinutes=30 - -# Prevent the same topics to be shed and moved to other broker more than once within this timeframe -loadBalancerSheddingGracePeriodMinutes=30 - -# Usage threshold to determine a broker as under-loaded -loadBalancerBrokerUnderloadedThresholdPercentage=1 - -# Usage threshold to determine a broker as over-loaded -loadBalancerBrokerOverloadedThresholdPercentage=85 - -# Interval to update namespace bundle resource quota -loadBalancerResourceQuotaUpdateIntervalMinutes=15 - -# Usage threshold to determine a broker is having just right level of load -loadBalancerBrokerComfortLoadLevelPercentage=65 - -# enable/disable namespace bundle auto split -loadBalancerAutoBundleSplitEnabled=false - -# interval to detect & split hot namespace bundle -loadBalancerNamespaceBundleSplitIntervalMinutes=15 - -# maximum topics in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxTopics=1000 - -# maximum sessions (producers + consumers) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxSessions=1000 - -# maximum msgRate (in + out) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxMsgRate=1000 - -# maximum bandwidth (in + out) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxBandwidthMbytes=100 - -# maximum number of bundles in a namespace -loadBalancerNamespaceMaximumBundles=128 - -### --- Replication --- ### - -# Enable replication metrics -replicationMetricsEnabled=true - -# Max number of connections to open for each broker in a remote cluster -# More connections host-to-host lead to better throughput over high-latency -# links. -replicationConnectionsPerBroker=16 - -# Replicator producer queue size -replicationProducerQueueSize=1000 - -# Default message retention time. 0 means retention is disabled. -1 means data is not removed by time quota -defaultRetentionTimeInMinutes=0 - -# Default retention size. 0 means retention is disabled. -1 means data is not removed by size quota -defaultRetentionSizeInMB=0 - -# How often to check whether the connections are still alive -keepAliveIntervalSeconds=30 - -# Enable topic auto creation if new producer or consumer connected (disable auto creation with value false) -allowAutoTopicCreation=true - -# The type of topic that is allowed to be automatically created.(partitioned/non-partitioned) -allowAutoTopicCreationType=non-partitioned - -# The number of partitioned topics that is allowed to be automatically created if allowAutoTopicCreationType is partitioned. -defaultNumPartitions=1 - -### --- Deprecated config variables --- ### - -# Deprecated. Use configurationStoreServers -globalZookeeperServers={{ zookeeper_servers }} - -# Deprecated. Use brokerDeleteInactiveTopicsFrequencySeconds -brokerServicePurgeInactiveFrequencyInSeconds=60 diff --git a/pulsar-client-cpp/vcpkg.json b/pulsar-client-cpp/vcpkg.json deleted file mode 100644 index 735507e642f4f..0000000000000 --- a/pulsar-client-cpp/vcpkg.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "name": "pulsar-cpp", - "version": "2.8.0", - "description": "Pulsar C++ SDK", - "dependencies": [ - "boost-accumulators", - "boost-algorithm", - "boost-any", - "boost-circular-buffer", - "boost-asio", - "boost-date-time", - "boost-predef", - "boost-program-options", - "boost-property-tree", - "boost-random", - "boost-serialization", - "boost-xpressive", - "curl", - "openssl", - "protobuf", - "snappy", - "zlib", - "zstd", - "log4cxx", - { - "name": "dlfcn-win32", - "platform": "windows" - } - ] -} diff --git a/pulsar-client-cpp/wireshark/CMakeLists.txt b/pulsar-client-cpp/wireshark/CMakeLists.txt deleted file mode 100644 index 49d8b341b9860..0000000000000 --- a/pulsar-client-cpp/wireshark/CMakeLists.txt +++ /dev/null @@ -1,83 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set(CMAKE_CXX_FLAGS "-O3 -g ${CMAKE_CXX_FLAGS}") - -MESSAGE(STATUS "Use WIRESHARK_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") - -if(CMAKE_BUILD_TYPE STREQUAL "Debug") - add_definitions("-DDEBUG") -endif() - -# Wireshark dependency's -find_library(WIRESHARK_LIB wireshark) -find_library(WIRESHARK_UTIL_LIB wsutil) -find_path(WIRESHARK_INCLUDE_PATH wireshark/config.h) -find_library(GLIB_LIB glib) -include_directories(${GLIB_INCLUDE_DIRS}) -include(FindPkgConfig) -pkg_check_modules(GLIB glib-2.0) -include_directories(${WIRESHARK_INCLUDE_PATH}/wireshark ${GLIB_INCLUDE_DIRS} ../lib ) - -MESSAGE(STATUS "Use WIRESHARK_LIB: ${WIRESHARK_LIB}") -MESSAGE(STATUS "Use WIRESHARK_UTIL_LIB: ${WIRESHARK_UTIL_LIB}") -MESSAGE(STATUS "Use WIRESHARK_INCLUDE_PATH: ${WIRESHARK_INCLUDE_PATH}") -MESSAGE(STATUS "Use GLIB_INCLUDE_DIRS: ${GLIB_INCLUDE_DIRS}") - -# Protobuf libs -if (NOT PROTOC_PATH) - set(PROTOC_PATH protoc) -endif() - -include_directories(${Protobuf_INCLUDE_DIRS}) -find_library(Protobuf_LIBRARIES protobuf libprotobuf) -find_path(Protobuf_INCLUDE_DIRS google/protobuf/stubs/common.h) - -set(AUTOGEN_DIR ${CMAKE_BINARY_DIR}/generated) -file(MAKE_DIRECTORY ${AUTOGEN_DIR}) -set(LIB_AUTOGEN_DIR ${AUTOGEN_DIR}/lib) -file(MAKE_DIRECTORY ${LIB_AUTOGEN_DIR}) -include_directories(${LIB_AUTOGEN_DIR}) - -# Protobuf generation is only supported natively starting from CMake 3.8 -# Using custom command for now -set(PROTO_SOURCES ${LIB_AUTOGEN_DIR}/PulsarApi.pb.cc ${LIB_AUTOGEN_DIR}/PulsarApi.pb.h) -ADD_CUSTOM_COMMAND( - OUTPUT ${PROTO_SOURCES} - COMMAND ${PROTOC_PATH} -I ../../pulsar-common/src/main/proto ../../pulsar-common/src/main/proto/PulsarApi.proto --cpp_out=${LIB_AUTOGEN_DIR} - DEPENDS - ../../pulsar-common/src/main/proto/PulsarApi.proto - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) -link_libraries(${Protobuf_LIBRARIES}) - -# Build wireshark shared lib -add_library(pulsar-dissector SHARED pulsarDissector.cc ${PROTO_SOURCES}) -SET(CMAKE_SHARED_LIBRARY_PREFIX ) -SET(CMAKE_SHARED_LIBRARY_SUFFIX .so) -set_target_properties(pulsar-dissector PROPERTIES PREFIX "" DEFINE_SYMBOL "") - -if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") - set(CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS "${CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS} -undefined dynamic_lookup") -endif() - -if (APPLE) - target_link_libraries(pulsar-dissector -Wl,-all_load ${PROTO_LIBRARIES}) -else () - target_link_libraries(pulsar-dissector ${PROTOBUF_LIBRARIES}) -endif () \ No newline at end of file diff --git a/pulsar-client-cpp/wireshark/README.md b/pulsar-client-cpp/wireshark/README.md deleted file mode 100644 index c591dbb3992d6..0000000000000 --- a/pulsar-client-cpp/wireshark/README.md +++ /dev/null @@ -1,110 +0,0 @@ - - -# Pulsar Wireshark dissector - -The Pulsar Wireshark dissector allows to automatically decode the Pulsar binary protocol -and visualize useful debug information (linking requests with responses, latency stats, etc.) - -## Install Wireshark - -Based on your operating system, run the following command. - -- macOS - -```bash -brew install homebrew/cask/wireshark -``` - -- Ubuntu - -```bash -sudo apt install wireshark -``` - -## Install dependencies - -To build the Wireshark plugin, install Wireshark with the development headers - -> **NOTE** -> -> Make sure the Wireshark application version is the same as the Wireshark headers version. - -- macOS - -```shell -$ brew install wireshark -``` - -- Ubuntu - -```shell -$ sudo apt install wireshark-dev -``` - -## Compile the dissector - -> **Tip** -> -> If the compiler cannot find the Wireshark headers, add the include path manually. -> `-DWIRESHARK_INCLUDE_PATH=` - -Compile the dissector. - -```shell -cd pulsar-client-cpp -cmake -DBUILD_WIRESHARK=ON . -make pulsar-dissector -``` - -This creates the `pulsar-dissector.so` plugin library in the Wireshark directory. - -## Install Wireshark dissector - -Copy the dissector in the appropriate location so that Wireshark can find it at startup. - -### Find the Personal Plugins Location - -1. Open Wireshark. -2. Click **About Wireshark**. -3. Click **Folders** tab. - -You can see the location of personal plugins, which is important for the next step. - -Example - -Wireshark 3.6.0 on macOS - -```shell -~/.local/lib/wireshark/plugins/3-6/ -``` - -### Copy Wireshark dissector to appropriate location - -```shell -mkdir -p ~/.local/lib/wireshark/plugins/3-6/epan -cd pulsar-client-cpp/wireshark -cp pulsar-dissector.so ~/.local/lib/wireshark/plugins/3-6/epan -``` - -### Complete installation - -Reboot Wireshark. You can see the pulsar-dissector in **View > Internals > Dissector Tables**. diff --git a/pulsar-client-cpp/wireshark/pulsarDissector.cc b/pulsar-client-cpp/wireshark/pulsarDissector.cc deleted file mode 100644 index 9ff311e2e68b5..0000000000000 --- a/pulsar-client-cpp/wireshark/pulsarDissector.cc +++ /dev/null @@ -1,1227 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "PulsarApi.pb.h" - -#ifdef VERSION -#undef VERSION -#endif - -/* Version number of package */ -#define VERSION "0.0.1" - -const static int PULSAR_PORT = 6650; - -static int proto_pulsar = -1; -static int hf_pulsar_error = -1; -static int hf_pulsar_error_message = -1; -static int hf_pulsar_cmd_type = -1; -static int hf_pulsar_frame_size = -1; -static int hf_pulsar_cmd_size = -1; - -static int hf_pulsar_client_version = -1; -static int hf_pulsar_auth_method = -1; -static int hf_pulsar_auth_data = -1; -static int hf_pulsar_protocol_version = -1; -static int hf_pulsar_server_version = -1; - -static int hf_pulsar_topic = -1; -static int hf_pulsar_subscription = -1; -static int hf_pulsar_subType = -1; -static int hf_pulsar_consumer_id = -1; -static int hf_pulsar_producer_id = -1; -static int hf_pulsar_server_error = -1; -static int hf_pulsar_ack_type = -1; -static int hf_pulsar_request_id = -1; -static int hf_pulsar_consumer_name = -1; -static int hf_pulsar_producer_name = -1; - -static int hf_pulsar_publish_time = -1; -static int hf_pulsar_deliver_at_time = -1; -static int hf_pulsar_event_time = -1; -static int hf_pulsar_deliver_after_time = -1; - -static int hf_pulsar_chunk_id = -1; -static int hf_pulsar_num_chunks_from_msg = -1; -static int hf_pulsar_uuid = -1; -static int hf_pulsar_compression_type = -1; -static int hf_pulsar_uncompressed_size = -1; -static int hf_pulsar_partition_key = -1; -static int hf_pulsar_ordering_key = -1; -static int hf_pulsar_encryption_algo = -1; -static int hf_pulsar_encryption_param = -1; -static int hf_pulsar_encryption_keys = -1; -static int hf_pulsar_replicated_from = -1; -static int hf_pulsar_replicate_to = -1; -static int hf_pulsar_property = -1; - -static int hf_pulsar_txnid_least_bits = -1; -static int hf_pulsar_txnid_most_bits = -1; - -static int hf_pulsar_request_in = -1; -static int hf_pulsar_response_in = -1; -static int hf_pulsar_publish_latency = -1; - -static int hf_pulsar_sequence_id = -1; -static int hf_pulsar_highest_sequence_id = -1; -static int hf_pulsar_message_id = -1; -static int hf_pulsar_message_permits = -1; - -static int ett_pulsar = -1; - -const static int FRAME_SIZE_LEN = 4; -const static guint16 MAGIC_BROKER_ENTRY_METADATA = 0x0e02; -const static guint16 MAGIC_CRC32C = 0x0e01; - -static pulsar::proto::BaseCommand command; - -using namespace pulsar::proto; - -static const value_string pulsar_cmd_names[] = { - {BaseCommand::CONNECT, "Connect"}, - {BaseCommand::CONNECTED, "Connected"}, - {BaseCommand::SUBSCRIBE, "Subscribe"}, - {BaseCommand::PRODUCER, "Producer"}, - {BaseCommand::SEND, "Send"}, - {BaseCommand::SEND_RECEIPT, "SendReceipt"}, - {BaseCommand::SEND_ERROR, "SendError"}, - {BaseCommand::MESSAGE, "Message"}, - {BaseCommand::ACK, "Ack"}, - {BaseCommand::FLOW, "Flow"}, - {BaseCommand::UNSUBSCRIBE, "Unsubscribe"}, - {BaseCommand::SUCCESS, "Success"}, - {BaseCommand::ERROR, "Error"}, - {BaseCommand::CLOSE_PRODUCER, "CloseProducer"}, - {BaseCommand::CLOSE_CONSUMER, "CloseConsumer"}, - {BaseCommand::PRODUCER_SUCCESS, "ProducerSuccess"}, - {BaseCommand::PING, "Ping"}, - {BaseCommand::PONG, "Pong"}, - {BaseCommand::REDELIVER_UNACKNOWLEDGED_MESSAGES, "RedeliverUnacknowledgedMessages"}, - {BaseCommand::PARTITIONED_METADATA, "PartitionedMetadata"}, - {BaseCommand::PARTITIONED_METADATA_RESPONSE, "PartitionedMetadataResponse"}, - {BaseCommand::LOOKUP, "Lookup"}, - {BaseCommand::LOOKUP_RESPONSE, "LookupResponse"}, - {BaseCommand::CONSUMER_STATS, "ConsumerStats"}, - {BaseCommand::CONSUMER_STATS_RESPONSE, "ConsumerStatsResponse"}, - {BaseCommand::SEEK, "Seek"}, - {BaseCommand::GET_LAST_MESSAGE_ID, "GetLastMessageId"}, - {BaseCommand::GET_LAST_MESSAGE_ID_RESPONSE, "GetLastMessageIdResponse"}, - {BaseCommand::ACTIVE_CONSUMER_CHANGE, "ActiveConsumerChange"}, - {BaseCommand::GET_TOPICS_OF_NAMESPACE, "GetTopicsOfNamespace"}, - {BaseCommand::GET_TOPICS_OF_NAMESPACE_RESPONSE, "GetTopicsOfNamespaceResponse"}, - {BaseCommand::GET_SCHEMA, "GetSchema"}, - {BaseCommand::GET_SCHEMA_RESPONSE, "GetSchemaResponse"}, - {BaseCommand::AUTH_CHALLENGE, "AuthChallenge"}, - {BaseCommand::AUTH_RESPONSE, "AuthResponse"}, - {BaseCommand::ACK_RESPONSE, "AckResponse"}, - {BaseCommand::GET_OR_CREATE_SCHEMA, "GetOrCreateSchema"}, - {BaseCommand::AUTH_CHALLENGE, "AuthChallenge"}, - {BaseCommand::AUTH_RESPONSE, "AuthResponse"}, - {BaseCommand::ACK_RESPONSE, "AckResponse"}, - {BaseCommand::GET_OR_CREATE_SCHEMA, "GetOrCreateSchema"}, - {BaseCommand::GET_OR_CREATE_SCHEMA_RESPONSE, "GetOrCreateSchemaResponse"}, - {BaseCommand::NEW_TXN, "NewTxn"}, - {BaseCommand::NEW_TXN_RESPONSE, "NewTxnResponse"}, - {BaseCommand::ADD_PARTITION_TO_TXN, "AddPartitionToTxn"}, - {BaseCommand::ADD_SUBSCRIPTION_TO_TXN, "AddSubscriptionToTxn"}, - {BaseCommand::ADD_SUBSCRIPTION_TO_TXN_RESPONSE, "AddSubscriptionToTxnResponse"}, - {BaseCommand::END_TXN, "EndTxn"}, - {BaseCommand::END_TXN_RESPONSE, "EndTxnResponse"}, - {BaseCommand::END_TXN_ON_PARTITION, "EndTxnOnPartition"}, - {BaseCommand::END_TXN_ON_PARTITION_RESPONSE, "EndTxnOnPartitionResponse"}, - {BaseCommand::END_TXN_ON_SUBSCRIPTION, "EndTxnOnSubscription"}, - {BaseCommand::END_TXN_ON_SUBSCRIPTION_RESPONSE, "EndTxnOnSubscriptionResponse"}, - {BaseCommand::TC_CLIENT_CONNECT_REQUEST, "TcClientConnectRequest"}, - {BaseCommand::TC_CLIENT_CONNECT_RESPONSE, "TcClientConnectResponse"}, -}; - -static const value_string auth_methods_vs[] = { - {AuthMethodNone, "None"}, // - {AuthMethodYcaV1, "YCAv1"}, // - {AuthMethodAthens, "Athens"} // -}; - -static const value_string server_errors_vs[] = { - {UnknownError, "UnknownError"}, - {MetadataError, "MetadataError"}, - {PersistenceError, "PersistenceError"}, - {AuthenticationError, "AuthenticationError"}, - {AuthorizationError, "AuthorizationError"}, - {ConsumerBusy, "ConsumerBusy"}, - {ServiceNotReady, "ServiceNotReady"}, - {ProducerBlockedQuotaExceededError, "ProducerBlockedQuotaExceededError"}, - {ProducerBlockedQuotaExceededException, "ProducerBlockedQuotaExceededException"}, - {ChecksumError, "ChecksumError"}, - {UnsupportedVersionError, "UnsupportedVersionError"}, - {TopicNotFound, "TopicNotFound"}, - {SubscriptionNotFound, "SubscriptionNotFound"}, - {ConsumerNotFound, "ConsumerNotFound"}, - {TooManyRequests, "TooManyRequests"}, - {TopicTerminatedError, "TopicTerminatedError"}, - {ProducerBusy, "ProducerBusy"}, - {InvalidTopicName, "InvalidTopicName"}, - {IncompatibleSchema, "IncompatibleSchema"}, - {ConsumerAssignError, "ConsumerAssignError"}, - {TransactionCoordinatorNotFound, "TransactionCoordinatorNotFound"}, - {InvalidTxnStatus, "InvalidTxnStatus"}, - {NotAllowedError, "NotAllowedError"}, - {TransactionConflict, "TransactionConflict"}, - {TransactionNotFound, "TransactionNotFound"}, - {ProducerFenced, "ProducerFenced"}, -}; - -static const value_string ack_type_vs[] = {{CommandAck::Individual, "Individual"}, - {CommandAck::Cumulative, "Cumulative"}}; - -static const value_string protocol_version_vs[] = { - {v0, "v0"}, {v1, "v1"}, {v2, "v2"}, {v3, "v3"}, {v4, "v4"}, {v5, "v5"}, {v6, "v6"}, - {v7, "v7"}, {v8, "v8"}, {v9, "v9"}, {v10, "v10"}, {v11, "v11"}, {v12, "v12"}, {v13, "v13"}, - {v14, "v14"}, {v15, "v15"}, {v16, "v16"}, {v17, "v17"}, {v18, "v18"}, {v19, "v19"}, -}; - -static const value_string sub_type_names_vs[] = { - {CommandSubscribe::Exclusive, "Exclusive"}, - {CommandSubscribe::Shared, "Shared"}, - {CommandSubscribe::Failover, "Failover"}, - {CommandSubscribe::Key_Shared, "Key_Shared"}, -}; - -static const value_string compression_type_name_vs[] = { - {CompressionType::NONE, "NONE"}, {CompressionType::LZ4, "LZ4"}, {CompressionType::ZLIB, "ZLIB"}, - {CompressionType::ZSTD, "ZSTD"}, {CompressionType::SNAPPY, "SNAPPY"}, -}; - -static const char* to_str(int value, const value_string* values) { - return val_to_str(value, values, "Unknown (%d)"); -} - -struct MessageIdComparator { - bool operator()(const MessageIdData& a, const MessageIdData& b) const { - if (a.ledgerid() < b.ledgerid()) { - return true; - } else if (a.ledgerid() == b.ledgerid()) { - return a.entryid() < b.entryid(); - } else { - return false; - } - } -}; - -struct RequestData { - uint32_t requestFrame; - nstime_t requestTimestamp; - uint32_t ackFrame; - nstime_t ackTimestamp; - - RequestData() : requestFrame(UINT32_MAX), ackFrame(UINT32_MAX) {} -}; - -struct RequestResponseData : public RequestData { - uint64_t id; // producer / consumer id -}; - -struct ProducerData { - std::string topic; - std::string producerName; - - std::map messages; -}; - -struct ConsumerData { - std::string topic; - std::string subscriptionName; - std::string consumerName; - CommandSubscribe::SubType subType; - - // Link messages to acks for the consumer - std::map messages; -}; - -struct ConnectionState { - std::map producers; - std::map consumers; - std::map requests; -}; - -static void dissect_message_metadata(proto_tree* frame_tree, tvbuff_t* tvb, int offset, int maxOffset) { - if (tvb_get_ntohs(tvb, offset) == MAGIC_BROKER_ENTRY_METADATA) { - offset += 2; - auto brokerEntryMetadataSize = (int)tvb_get_ntohl(tvb, offset); - offset += brokerEntryMetadataSize + 2; -#ifdef DEBUG - proto_tree_add_debug_text(frame_tree, "[DEBUG] MAGIC_BROKER_ENTRY_METADATA %d", - brokerEntryMetadataSize); -#endif - } - if (tvb_get_ntohs(tvb, offset) == MAGIC_CRC32C) { -#ifdef DEBUG - auto checksum = tvb_get_ntohl(tvb, offset); - proto_tree_add_debug_text(frame_tree, "[DEBUG] CRC32C %d", checksum); -#endif - // Skip CRC32C Magic (2) and CRC32C checksum (4) - offset += 2; - offset += 4; - } - // Decode message metadata - auto metadataSize = tvb_get_ntohl(tvb, offset); - offset += 4; -#ifdef DEBUG - proto_tree_add_debug_text(frame_tree, "[DEBUG] MetadataSize %d, maxOffset %d", metadataSize, maxOffset); -#endif - - if (offset + metadataSize > maxOffset) { - // Not enough data to dissect metadata -#ifdef DEBUG - proto_tree_add_debug_text(frame_tree, "[DEBUG] Not enough data to dissect message metadata"); -#endif - return; - } - - static MessageMetadata msgMetadata; - auto ptr = tvb_get_ptr(tvb, offset, metadataSize); - - if (!msgMetadata.ParseFromArray(ptr, metadataSize)) { - proto_tree_add_boolean_format(frame_tree, hf_pulsar_error, tvb, offset, metadataSize, true, - "Error parsing protocol buffer message metadata"); - return; - } - -#ifdef DEBUG - proto_tree_add_debug_text(frame_tree, "[DEBUG] MessageMetadata Utf8DebugString : %s", - msgMetadata.Utf8DebugString().c_str()); - proto_tree_add_debug_text(frame_tree, "[DEBUG] MessageMetadata SerializeAsString : %s", - msgMetadata.SerializeAsString().c_str()); -#endif - - proto_item* md_tree = - proto_tree_add_subtree_format(frame_tree, tvb, offset, metadataSize, ett_pulsar, nullptr, - "MessageMetadata / %s / %" G_GINT64_MODIFIER "u", - msgMetadata.producer_name().c_str(), msgMetadata.sequence_id()); - proto_tree_add_string(md_tree, hf_pulsar_producer_name, tvb, offset, metadataSize, - msgMetadata.producer_name().c_str()); - - // IDs - proto_tree_add_uint64(md_tree, hf_pulsar_sequence_id, tvb, offset, metadataSize, - msgMetadata.sequence_id()); - if (msgMetadata.has_highest_sequence_id()) { - proto_tree_add_uint64(md_tree, hf_pulsar_highest_sequence_id, tvb, offset, metadataSize, - msgMetadata.highest_sequence_id()); - } - - if (msgMetadata.has_chunk_id()) { - proto_tree_add_uint(md_tree, hf_pulsar_chunk_id, tvb, offset, metadataSize, msgMetadata.chunk_id()); - if (msgMetadata.has_num_chunks_from_msg()) { - proto_tree_add_uint(md_tree, hf_pulsar_num_chunks_from_msg, tvb, offset, metadataSize, - msgMetadata.num_chunks_from_msg()); - } - } - - if (msgMetadata.has_uuid()) { - proto_tree_add_string(md_tree, hf_pulsar_uuid, tvb, offset, metadataSize, msgMetadata.uuid().c_str()); - } - - // Times - proto_tree_add_uint64(md_tree, hf_pulsar_publish_time, tvb, offset, metadataSize, - msgMetadata.publish_time()); - - if (msgMetadata.has_deliver_at_time()) { - proto_tree_add_uint64(md_tree, hf_pulsar_deliver_at_time, tvb, offset, metadataSize, - msgMetadata.deliver_at_time()); - proto_tree_add_uint64(md_tree, hf_pulsar_deliver_after_time, tvb, offset, metadataSize, - msgMetadata.deliver_at_time() - msgMetadata.publish_time()); - } - - if (msgMetadata.has_event_time()) { - proto_tree_add_uint64(md_tree, hf_pulsar_event_time, tvb, offset, metadataSize, - msgMetadata.event_time()); - } - - // Compression - if (msgMetadata.has_compression()) { - proto_tree_add_string(md_tree, hf_pulsar_compression_type, tvb, offset, metadataSize, - to_str(msgMetadata.compression(), compression_type_name_vs)); - } - - if (msgMetadata.has_uncompressed_size()) { - proto_tree_add_uint(md_tree, hf_pulsar_uncompressed_size, tvb, offset, metadataSize, - msgMetadata.uncompressed_size()); - } - - // Keys - if (msgMetadata.has_partition_key()) { - proto_tree_add_string(md_tree, hf_pulsar_partition_key, tvb, offset, metadataSize, - msgMetadata.partition_key().c_str()); - } - - if (msgMetadata.has_ordering_key()) { - proto_tree_add_string(md_tree, hf_pulsar_ordering_key, tvb, offset, metadataSize, - msgMetadata.ordering_key().c_str()); - } - - // Encryption - if (msgMetadata.has_encryption_algo()) { - proto_tree_add_string(md_tree, hf_pulsar_encryption_algo, tvb, offset, metadataSize, - msgMetadata.encryption_algo().c_str()); - } - if (msgMetadata.has_encryption_param()) { - proto_tree_add_string(md_tree, hf_pulsar_encryption_param, tvb, offset, metadataSize, - msgMetadata.encryption_param().c_str()); - } - if (msgMetadata.encryption_keys_size() > 0) { - proto_item* encryption_keys_tree = proto_tree_add_subtree_format( - md_tree, tvb, offset, msgMetadata.encryption_param().size(), ett_pulsar, nullptr, - "EncryptionParam / %s", msgMetadata.encryption_algo().c_str()); - for (int i = 0; i < msgMetadata.encryption_keys().size(); i++) { - const auto& encryption_key = msgMetadata.encryption_keys(i); - proto_tree_add_string_format( - encryption_keys_tree, hf_pulsar_encryption_keys, tvb, offset, metadataSize, "", "%s : %s", - encryption_key.has_key() ? encryption_key.key().c_str() : "", - encryption_key.has_value() ? encryption_key.value().c_str() : ""); - } - } - - // Properties - if (msgMetadata.properties_size() > 0) { - proto_item* properties_tree = proto_tree_add_subtree_format(md_tree, tvb, offset, metadataSize, - ett_pulsar, nullptr, "Properties"); - for (int i = 0; i < msgMetadata.properties_size(); i++) { - const KeyValue& kv = msgMetadata.properties(i); - proto_tree_add_string_format(properties_tree, hf_pulsar_property, tvb, offset, metadataSize, "", - "%s : %s", kv.key().c_str(), kv.value().c_str()); - } - } - - // Replication - if (msgMetadata.has_replicated_from()) { - proto_tree_add_string(md_tree, hf_pulsar_replicated_from, tvb, offset, metadataSize, - msgMetadata.replicated_from().c_str()); - } - - if (msgMetadata.replicate_to_size() > 0) { - proto_item* replicate_tree = proto_tree_add_subtree_format(md_tree, tvb, offset, metadataSize, - ett_pulsar, nullptr, "Replicate to"); - for (int i = 0; i < msgMetadata.replicate_to_size(); i++) { - proto_tree_add_string_format(replicate_tree, hf_pulsar_replicated_from, tvb, offset, metadataSize, - "", "%s", msgMetadata.replicate_to(i).c_str()); - } - } - - // Transaction - if (msgMetadata.has_txnid_least_bits()) { - proto_tree_add_uint64(md_tree, hf_pulsar_txnid_least_bits, tvb, offset, metadataSize, - msgMetadata.txnid_least_bits()); - } - - if (msgMetadata.has_txnid_most_bits()) { - proto_tree_add_uint64(md_tree, hf_pulsar_txnid_most_bits, tvb, offset, metadataSize, - msgMetadata.txnid_most_bits()); - } - - // Payloads - offset += metadataSize; - uint32_t payloadSize = maxOffset - offset; - proto_tree_add_subtree_format(md_tree, tvb, offset, payloadSize, ett_pulsar, nullptr, "Payload / size=%u", - payloadSize); -} - -void link_to_request_frame(proto_tree* cmd_tree, tvbuff_t* tvb, int offset, int size, - const RequestData& reqData) { - if (reqData.requestFrame != UINT32_MAX) { - proto_tree* item = - proto_tree_add_uint(cmd_tree, hf_pulsar_request_in, tvb, offset, size, reqData.requestFrame); - PROTO_ITEM_SET_GENERATED(item); - - nstime_t latency; - nstime_delta(&latency, &reqData.ackTimestamp, &reqData.requestTimestamp); - item = proto_tree_add_time(cmd_tree, hf_pulsar_publish_latency, tvb, offset, size, &latency); - PROTO_ITEM_SET_GENERATED(item); - } -} - -void link_to_response_frame(proto_tree* cmd_tree, tvbuff_t* tvb, int offset, int size, - const RequestData& reqData) { - if (reqData.ackFrame != UINT32_MAX) { - proto_tree* item = - proto_tree_add_uint(cmd_tree, hf_pulsar_response_in, tvb, offset, size, reqData.ackFrame); - PROTO_ITEM_SET_GENERATED(item); - - nstime_t latency; - nstime_delta(&latency, &reqData.ackTimestamp, &reqData.requestTimestamp); - item = proto_tree_add_time(cmd_tree, hf_pulsar_publish_latency, tvb, offset, size, &latency); - PROTO_ITEM_SET_GENERATED(item); - } -} - -////////// - -/* This method dissects fully reassembled messages */ -static int dissect_pulsar_message(tvbuff_t* tvb, packet_info* pinfo, proto_tree* tree, void* data _U_) { - uint32_t offset = FRAME_SIZE_LEN; - int maxOffset = tvb_captured_length(tvb); - auto cmdSize = (uint32_t)tvb_get_ntohl(tvb, offset); - offset += 4; - - if (offset + cmdSize > maxOffset) { - // Not enough data to dissect -#ifdef DEBUG - proto_tree_add_debug_text(tree, "[Debug] Not enough data to dissect command"); -#endif - return maxOffset; - } - - col_set_str(pinfo->cinfo, COL_PROTOCOL, "Pulsar"); - - conversation_t* conversation = find_or_create_conversation(pinfo); - auto state = (ConnectionState*)conversation_get_proto_data(conversation, proto_pulsar); - if (state == nullptr) { - state = new ConnectionState(); - conversation_add_proto_data(conversation, proto_pulsar, state); - } - - auto ptr = (uint8_t*)tvb_get_ptr(tvb, offset, cmdSize); - if (!command.ParseFromArray(ptr, cmdSize)) { - proto_tree_add_boolean_format(tree, hf_pulsar_error, tvb, offset, cmdSize, true, - "Error parsing protocol buffer command"); - return maxOffset; - } - - int cmdOffset = offset; - offset += cmdSize; - - col_add_str(pinfo->cinfo, COL_INFO, to_str(command.type(), pulsar_cmd_names)); - - proto_item* frame_tree = nullptr; - proto_item* cmd_tree = nullptr; - if (tree) { /* we are being asked for details */ - proto_item* ti = proto_tree_add_item(tree, proto_pulsar, tvb, 0, -1, ENC_NA); - frame_tree = proto_item_add_subtree(ti, ett_pulsar); - proto_tree_add_item(frame_tree, hf_pulsar_frame_size, tvb, 0, 4, ENC_BIG_ENDIAN); - proto_tree_add_item(frame_tree, hf_pulsar_cmd_size, tvb, 4, 4, ENC_BIG_ENDIAN); - cmd_tree = proto_tree_add_subtree_format(frame_tree, tvb, 8, cmdSize, ett_pulsar, nullptr, - "Command %s", to_str(command.type(), pulsar_cmd_names)); - proto_tree_add_string(cmd_tree, hf_pulsar_cmd_type, tvb, 8, cmdSize, - to_str(command.type(), pulsar_cmd_names)); - } - - switch (command.type()) { - case BaseCommand::CONNECT: { - const CommandConnect& connect = command.connect(); - if (tree) { - proto_tree_add_string(cmd_tree, hf_pulsar_client_version, tvb, cmdOffset, cmdSize, - connect.client_version().c_str()); - proto_tree_add_string(cmd_tree, hf_pulsar_protocol_version, tvb, cmdOffset, cmdSize, - to_str(connect.protocol_version(), protocol_version_vs)); - proto_tree_add_string(cmd_tree, hf_pulsar_auth_method, tvb, cmdOffset, cmdSize, - to_str(connect.auth_method(), auth_methods_vs)); - if (connect.has_auth_data()) { - proto_tree_add_string(cmd_tree, hf_pulsar_auth_data, tvb, cmdOffset, cmdSize, - connect.auth_data().c_str()); - } - } - break; - } - case BaseCommand::CONNECTED: { - const CommandConnected& connected = command.connected(); - if (tree) { - proto_tree_add_string(cmd_tree, hf_pulsar_server_version, tvb, cmdOffset, cmdSize, - connected.server_version().c_str()); - proto_tree_add_string(cmd_tree, hf_pulsar_protocol_version, tvb, cmdOffset, cmdSize, - to_str(connected.protocol_version(), protocol_version_vs)); - } - break; - } - case BaseCommand::SUBSCRIBE: { - const CommandSubscribe& subscribe = command.subscribe(); - RequestData& reqData = state->requests[subscribe.request_id()]; - reqData.requestFrame = pinfo->fd->num; - reqData.requestTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.requestTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - ConsumerData& consumerData = state->consumers[subscribe.consumer_id()]; - consumerData.topic = subscribe.topic(); - consumerData.subscriptionName = subscribe.subscription(); - consumerData.consumerName = subscribe.consumer_name(); - consumerData.subType = subscribe.subtype(); - - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s / %s / %s / %s", - to_str(subscribe.subtype(), sub_type_names_vs), subscribe.topic().c_str(), - subscribe.subscription().c_str(), subscribe.consumer_name().c_str()); - - if (tree) { - proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - subscribe.topic().c_str()); - proto_tree_add_string(cmd_tree, hf_pulsar_subscription, tvb, cmdOffset, cmdSize, - subscribe.subscription().c_str()); - proto_tree_add_string(cmd_tree, hf_pulsar_subType, tvb, cmdOffset, cmdSize, - to_str(subscribe.subtype(), sub_type_names_vs)); - proto_tree_add_uint64(cmd_tree, hf_pulsar_consumer_id, tvb, cmdOffset, cmdSize, - subscribe.consumer_id()); - proto_tree_add_uint64(cmd_tree, hf_pulsar_request_id, tvb, cmdOffset, cmdSize, - subscribe.request_id()); - proto_tree_add_string( - cmd_tree, hf_pulsar_consumer_name, tvb, cmdOffset, cmdSize, - subscribe.has_consumer_name() ? subscribe.consumer_name().c_str() : ""); - } - break; - } - case BaseCommand::PRODUCER: { - const CommandProducer& producer = command.producer(); - RequestResponseData& reqData = state->requests[producer.request_id()]; - reqData.requestFrame = pinfo->fd->num; - reqData.requestTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.requestTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - reqData.id = producer.producer_id(); - - state->producers[producer.producer_id()].topic = producer.topic(); - - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s", producer.topic().c_str()); - - if (tree) { - proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - producer.topic().c_str()); - proto_tree_add_uint64(cmd_tree, hf_pulsar_producer_id, tvb, cmdOffset, cmdSize, - producer.producer_id()); - proto_tree_add_uint64(cmd_tree, hf_pulsar_request_id, tvb, cmdOffset, cmdSize, - producer.request_id()); - proto_tree_add_string( - cmd_tree, hf_pulsar_producer_name, tvb, cmdOffset, cmdSize, - producer.has_producer_name() ? producer.producer_name().c_str() : ""); - - link_to_response_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - case BaseCommand::SEND: { - const CommandSend& send = command.send(); - RequestData& reqData = state->producers[send.producer_id()].messages[send.sequence_id()]; - reqData.requestFrame = pinfo->fd->num; - reqData.requestTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.requestTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - ProducerData& producerData = state->producers[send.producer_id()]; - - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s / %" G_GINT64_MODIFIER "u", - producerData.producerName.c_str(), send.sequence_id()); - - if (tree) { - proto_tree_add_uint64(cmd_tree, hf_pulsar_producer_id, tvb, cmdOffset, cmdSize, - send.producer_id()); - proto_tree_add_uint64(cmd_tree, hf_pulsar_sequence_id, tvb, cmdOffset, cmdSize, - send.sequence_id()); - - // Decode message metadata - dissect_message_metadata(cmd_tree, tvb, offset, maxOffset); - - auto item = proto_tree_add_string(cmd_tree, hf_pulsar_producer_name, tvb, cmdOffset, cmdSize, - producerData.producerName.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - item = proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - producerData.topic.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - // Pair with frame information - link_to_response_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - case BaseCommand::SEND_RECEIPT: { - const CommandSendReceipt& send_receipt = command.send_receipt(); - RequestData& reqData = - state->producers[send_receipt.producer_id()].messages[send_receipt.sequence_id()]; - reqData.ackFrame = pinfo->fd->num; - reqData.ackTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.ackTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - ProducerData& producerData = state->producers[send_receipt.producer_id()]; - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s / %" G_GINT64_MODIFIER "u", - producerData.producerName.c_str(), send_receipt.sequence_id()); - - if (tree) { - proto_tree_add_uint64(cmd_tree, hf_pulsar_producer_id, tvb, cmdOffset, cmdSize, - send_receipt.producer_id()); - proto_tree_add_uint64(cmd_tree, hf_pulsar_sequence_id, tvb, cmdOffset, cmdSize, - send_receipt.sequence_id()); - if (send_receipt.has_message_id()) { - const MessageIdData& messageId = send_receipt.message_id(); - proto_tree_add_string_format(cmd_tree, hf_pulsar_message_id, tvb, cmdOffset, cmdSize, "", - "Message Id: %" G_GINT64_MODIFIER "u:%" G_GINT64_MODIFIER - "u", - messageId.ledgerid(), messageId.entryid()); - } - - auto item = proto_tree_add_string(cmd_tree, hf_pulsar_producer_name, tvb, cmdOffset, cmdSize, - producerData.producerName.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - item = proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - producerData.topic.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - link_to_request_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - case BaseCommand::SEND_ERROR: { - const CommandSendError& send_error = command.send_error(); - RequestData& reqData = - state->producers[send_error.producer_id()].messages[send_error.sequence_id()]; - reqData.ackFrame = pinfo->fd->num; - reqData.ackTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.ackTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - ProducerData& producerData = state->producers[send_error.producer_id()]; - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s / %" G_GINT64_MODIFIER "u", - producerData.producerName.c_str(), send_error.sequence_id()); - - if (tree) { - proto_tree_add_boolean_format(frame_tree, hf_pulsar_error, tvb, cmdOffset, cmdSize, true, - "Error in sending operation"); - proto_tree_add_uint64(cmd_tree, hf_pulsar_producer_id, tvb, cmdOffset, cmdSize, - send_error.producer_id()); - proto_tree_add_uint64(cmd_tree, hf_pulsar_sequence_id, tvb, cmdOffset, cmdSize, - send_error.sequence_id()); - - auto item = proto_tree_add_string(cmd_tree, hf_pulsar_server_error, tvb, cmdOffset, cmdSize, - to_str(send_error.error(), server_errors_vs)); - - PROTO_ITEM_SET_GENERATED(item); - - item = proto_tree_add_string(cmd_tree, hf_pulsar_producer_name, tvb, cmdOffset, cmdSize, - producerData.producerName.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - item = proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - producerData.topic.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - // Pair with frame information - link_to_request_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - case BaseCommand::MESSAGE: { - const CommandMessage& message = command.message(); - state->consumers[message.consumer_id()].messages[message.message_id()]; - RequestData& reqData = state->consumers[message.consumer_id()].messages[message.message_id()]; - reqData.requestFrame = pinfo->fd->num; - reqData.requestTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.requestTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - const ConsumerData& consumerData = state->consumers[message.consumer_id()]; - - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s / %" G_GINT64_MODIFIER "u:%" G_GINT64_MODIFIER "u", - consumerData.consumerName.c_str(), message.message_id().ledgerid(), - message.message_id().entryid()); - - if (tree) { - proto_tree_add_uint64(cmd_tree, hf_pulsar_consumer_id, tvb, cmdOffset, cmdSize, - message.consumer_id()); - - dissect_message_metadata(cmd_tree, tvb, offset, maxOffset); - - auto item = proto_tree_add_string(cmd_tree, hf_pulsar_consumer_name, tvb, cmdOffset, cmdSize, - consumerData.consumerName.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - item = proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - consumerData.topic.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - // Pair with frame information - link_to_response_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - case BaseCommand::ACK: { - const CommandAck& ack = command.ack(); - RequestData& reqData = state->consumers[ack.consumer_id()].messages[ack.message_id().Get(0)]; - reqData.ackFrame = pinfo->fd->num; - reqData.ackTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.ackTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - const ConsumerData& consumerData = state->consumers[ack.consumer_id()]; - - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s / %" G_GINT64_MODIFIER "u:%" G_GINT64_MODIFIER "u", - consumerData.consumerName.c_str(), ack.message_id().Get(0).ledgerid(), - ack.message_id().Get(0).entryid()); - - if (tree) { - proto_tree_add_uint64(cmd_tree, hf_pulsar_consumer_id, tvb, cmdOffset, cmdSize, - ack.consumer_id()); - proto_tree_add_string(cmd_tree, hf_pulsar_ack_type, tvb, cmdOffset, cmdSize, - to_str(ack.ack_type(), ack_type_vs)); - - auto item = proto_tree_add_string(cmd_tree, hf_pulsar_consumer_name, tvb, cmdOffset, cmdSize, - consumerData.consumerName.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - item = proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - consumerData.topic.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - // Pair with frame information - link_to_request_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - case BaseCommand::FLOW: { - const CommandFlow& flow = command.flow(); - const ConsumerData& consumerData = state->consumers[flow.consumer_id()]; - - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s / %d", consumerData.consumerName.c_str(), - flow.messagepermits()); - - if (tree) { - proto_tree_add_uint64(cmd_tree, hf_pulsar_consumer_id, tvb, cmdOffset, cmdSize, - flow.consumer_id()); - proto_tree_add_uint(cmd_tree, hf_pulsar_message_permits, tvb, cmdOffset, cmdSize, - flow.messagepermits()); - - auto item = proto_tree_add_string(cmd_tree, hf_pulsar_consumer_name, tvb, cmdOffset, cmdSize, - consumerData.consumerName.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - item = proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - consumerData.topic.c_str()); - PROTO_ITEM_SET_GENERATED(item); - } - break; - } - case BaseCommand::UNSUBSCRIBE: { - const CommandUnsubscribe& unsubscribe = command.unsubscribe(); - RequestData& reqData = state->requests[unsubscribe.request_id()]; - reqData.requestFrame = pinfo->fd->num; - reqData.requestTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.requestTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - ConsumerData& consumerData = state->consumers[unsubscribe.consumer_id()]; - - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s / %s / %s / %s", - to_str(consumerData.subType, sub_type_names_vs), consumerData.topic.c_str(), - consumerData.subscriptionName.c_str(), consumerData.consumerName.c_str()); - - if (tree) { - proto_tree_add_uint64(cmd_tree, hf_pulsar_consumer_id, tvb, cmdOffset, cmdSize, - unsubscribe.consumer_id()); - proto_tree_add_uint64(cmd_tree, hf_pulsar_request_id, tvb, cmdOffset, cmdSize, - unsubscribe.request_id()); - auto item = proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - consumerData.topic.c_str()); - PROTO_ITEM_IS_GENERATED(item); - item = proto_tree_add_string(cmd_tree, hf_pulsar_subscription, tvb, cmdOffset, cmdSize, - consumerData.subscriptionName.c_str()); - PROTO_ITEM_IS_GENERATED(item); - proto_tree_add_string(cmd_tree, hf_pulsar_subType, tvb, cmdOffset, cmdSize, - to_str(consumerData.subType, sub_type_names_vs)); - PROTO_ITEM_IS_GENERATED(item); - - proto_tree_add_string(cmd_tree, hf_pulsar_consumer_name, tvb, cmdOffset, cmdSize, - consumerData.consumerName.c_str()); - PROTO_ITEM_IS_GENERATED(item); - - link_to_response_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - - case BaseCommand::SUCCESS: { - const CommandSuccess& success = command.success(); - RequestResponseData& reqData = state->requests[success.request_id()]; - reqData.ackFrame = pinfo->fd->num; - reqData.ackTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.ackTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - if (tree) { - proto_tree_add_uint64(cmd_tree, hf_pulsar_request_id, tvb, cmdOffset, cmdSize, - success.request_id()); - - link_to_request_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - case BaseCommand::ERROR: { - const CommandError& error = command.error(); - RequestResponseData& reqData = state->requests[error.request_id()]; - reqData.ackFrame = pinfo->fd->num; - reqData.ackTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.ackTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - if (tree) { - proto_tree_add_boolean_format(frame_tree, hf_pulsar_error, tvb, cmdOffset, cmdSize, true, - "Request failed"); - proto_tree_add_uint64(cmd_tree, hf_pulsar_request_id, tvb, cmdOffset, cmdSize, - error.request_id()); - proto_tree_add_string(cmd_tree, hf_pulsar_server_error, tvb, cmdOffset, cmdSize, - to_str(error.error(), server_errors_vs)); - proto_tree_add_string(cmd_tree, hf_pulsar_error_message, tvb, cmdOffset, cmdSize, - error.message().c_str()); - - link_to_request_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - case BaseCommand::CLOSE_PRODUCER: { - const CommandCloseProducer& close_producer = command.close_producer(); - RequestData& reqData = state->requests[close_producer.request_id()]; - reqData.requestFrame = pinfo->fd->num; - reqData.requestTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.requestTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - ProducerData& producerData = state->producers[close_producer.producer_id()]; - - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s", producerData.topic.c_str()); - - if (tree) { - proto_tree_add_uint64(cmd_tree, hf_pulsar_producer_id, tvb, cmdOffset, cmdSize, - close_producer.producer_id()); - proto_tree_add_uint64(cmd_tree, hf_pulsar_request_id, tvb, cmdOffset, cmdSize, - close_producer.request_id()); - auto item = proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - producerData.topic.c_str()); - PROTO_ITEM_IS_GENERATED(item); - - proto_tree_add_string(cmd_tree, hf_pulsar_producer_name, tvb, cmdOffset, cmdSize, - producerData.producerName.c_str()); - PROTO_ITEM_IS_GENERATED(item); - - link_to_response_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - - case BaseCommand::CLOSE_CONSUMER: { - const CommandCloseConsumer& close_consumer = command.close_consumer(); - RequestData& reqData = state->requests[close_consumer.request_id()]; - reqData.requestFrame = pinfo->fd->num; - reqData.requestTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.requestTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - - ConsumerData& consumerData = state->consumers[close_consumer.consumer_id()]; - - col_append_fstr(pinfo->cinfo, COL_INFO, " / %s / %s / %s / %s", - to_str(consumerData.subType, sub_type_names_vs), consumerData.topic.c_str(), - consumerData.subscriptionName.c_str(), consumerData.consumerName.c_str()); - - if (tree) { - proto_tree_add_uint64(cmd_tree, hf_pulsar_consumer_id, tvb, cmdOffset, cmdSize, - close_consumer.consumer_id()); - proto_tree_add_uint64(cmd_tree, hf_pulsar_request_id, tvb, cmdOffset, cmdSize, - close_consumer.request_id()); - auto item = proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - consumerData.topic.c_str()); - PROTO_ITEM_IS_GENERATED(item); - item = proto_tree_add_string(cmd_tree, hf_pulsar_subscription, tvb, cmdOffset, cmdSize, - consumerData.subscriptionName.c_str()); - PROTO_ITEM_IS_GENERATED(item); - proto_tree_add_string(cmd_tree, hf_pulsar_subType, tvb, cmdOffset, cmdSize, - to_str(consumerData.subType, sub_type_names_vs)); - PROTO_ITEM_IS_GENERATED(item); - - proto_tree_add_string(cmd_tree, hf_pulsar_consumer_name, tvb, cmdOffset, cmdSize, - consumerData.consumerName.c_str()); - PROTO_ITEM_IS_GENERATED(item); - - link_to_response_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - - case BaseCommand::PRODUCER_SUCCESS: { - const CommandProducerSuccess& success = command.producer_success(); - RequestResponseData& reqData = state->requests[success.request_id()]; - reqData.ackFrame = pinfo->fd->num; - reqData.ackTimestamp.secs = pinfo->fd->abs_ts.secs; - reqData.ackTimestamp.nsecs = pinfo->fd->abs_ts.nsecs; - uint64_t producerId = reqData.id; - ProducerData& producerData = state->producers[producerId]; - producerData.producerName = success.producer_name(); - - if (tree) { - proto_tree_add_uint64(cmd_tree, hf_pulsar_request_id, tvb, cmdOffset, cmdSize, - success.request_id()); - proto_tree_add_string(cmd_tree, hf_pulsar_producer_name, tvb, cmdOffset, cmdSize, - success.producer_name().c_str()); - - auto item = proto_tree_add_uint64(cmd_tree, hf_pulsar_producer_id, tvb, cmdOffset, cmdSize, - producerId); - PROTO_ITEM_SET_GENERATED(item); - - item = proto_tree_add_string(cmd_tree, hf_pulsar_topic, tvb, cmdOffset, cmdSize, - producerData.topic.c_str()); - PROTO_ITEM_SET_GENERATED(item); - - link_to_request_frame(cmd_tree, tvb, cmdOffset, cmdSize, reqData); - } - break; - } - case BaseCommand::PING: - break; - case BaseCommand::PONG: - break; - case BaseCommand::REDELIVER_UNACKNOWLEDGED_MESSAGES: - break; - case BaseCommand::PARTITIONED_METADATA: - break; - case BaseCommand::PARTITIONED_METADATA_RESPONSE: - break; - case BaseCommand::LOOKUP: - break; - case BaseCommand::LOOKUP_RESPONSE: - break; - case BaseCommand::CONSUMER_STATS: - break; - case BaseCommand::CONSUMER_STATS_RESPONSE: - break; - case BaseCommand::REACHED_END_OF_TOPIC: - break; - case BaseCommand::SEEK: - break; - case BaseCommand::GET_LAST_MESSAGE_ID: - break; - case BaseCommand::GET_LAST_MESSAGE_ID_RESPONSE: - break; - case BaseCommand::ACTIVE_CONSUMER_CHANGE: - break; - case BaseCommand::GET_TOPICS_OF_NAMESPACE: - break; - case BaseCommand::GET_TOPICS_OF_NAMESPACE_RESPONSE: - break; - case BaseCommand::GET_SCHEMA: - break; - case BaseCommand::GET_SCHEMA_RESPONSE: - break; - case BaseCommand::AUTH_CHALLENGE: - break; - case BaseCommand::AUTH_RESPONSE: - break; - case BaseCommand::ACK_RESPONSE: - break; - case BaseCommand::GET_OR_CREATE_SCHEMA: - break; - case BaseCommand::GET_OR_CREATE_SCHEMA_RESPONSE: - break; - case BaseCommand::NEW_TXN: - break; - case BaseCommand::NEW_TXN_RESPONSE: - break; - case BaseCommand::ADD_PARTITION_TO_TXN: - break; - case BaseCommand::ADD_PARTITION_TO_TXN_RESPONSE: - break; - case BaseCommand::ADD_SUBSCRIPTION_TO_TXN: - break; - case BaseCommand::ADD_SUBSCRIPTION_TO_TXN_RESPONSE: - break; - case BaseCommand::END_TXN: - break; - case BaseCommand::END_TXN_RESPONSE: - break; - case BaseCommand::END_TXN_ON_PARTITION: - break; - case BaseCommand::END_TXN_ON_PARTITION_RESPONSE: - break; - case BaseCommand::END_TXN_ON_SUBSCRIPTION: - break; - case BaseCommand::END_TXN_ON_SUBSCRIPTION_RESPONSE: - break; - case BaseCommand::TC_CLIENT_CONNECT_REQUEST: - break; - case BaseCommand::TC_CLIENT_CONNECT_RESPONSE: - break; - } - - return maxOffset; -} - -/* determine PDU length of protocol Pulsar */ -static uint32_t get_pulsar_message_len(packet_info* pinfo _U_, tvbuff_t* tvb, int offset, void* data _U_) { - auto len = (uint32_t)tvb_get_ntohl(tvb, offset); - return FRAME_SIZE_LEN + len; -} - -static int dissect_pulsar(tvbuff_t* tvb, packet_info* pinfo, proto_tree* tree, void* data _U_) { - tcp_dissect_pdus(tvb, pinfo, tree, 1, FRAME_SIZE_LEN, get_pulsar_message_len, dissect_pulsar_message, - data); - return tvb_captured_length(tvb); -} - -static hf_register_info hf[] = { - // - {&hf_pulsar_error, {"Error", "apache.pulsar.error", FT_BOOLEAN, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_error_message, - {"Message", "apache.pulsar.error_message", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_cmd_type, - {"Command Type", "apache.pulsar.cmd.type", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_frame_size, - {"Frame size", "apache.pulsar.frame_size", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_cmd_size, - {"Command size", "apache.pulsar.cmd_size", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - - {&hf_pulsar_client_version, - {"Client version", "apache.pulsar.client_version", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_auth_method, - {"Auth method", "apache.pulsar.auth_method", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_auth_data, - {"Auth data", "apache.pulsar.auth_data", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_protocol_version, - {"Protocol version", "apache.pulsar.protocol_version", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, - - {&hf_pulsar_server_version, - {"Server version", "apache.pulsar.server_version", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_topic, {"Topic", "apache.pulsar.topic", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_subscription, - {"Subscription", "apache.pulsar.subscription", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_subType, - {"Subscription type:", "apache.pulsar.sub_type", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_consumer_id, - {"Consumer Id", "apache.pulsar.consumer_id", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_producer_id, - {"Producer Id", "apache.pulsar.producer_id", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - - {&hf_pulsar_server_error, - {"Server error", "apache.pulsar.server_error", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_ack_type, {"Ack type", "apache.pulsar.ack_type", FT_STRING, 0, NULL, 0x0, NULL, HFILL}}, // - - {&hf_pulsar_request_id, - {"Request Id", "apache.pulsar.request_id", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_consumer_name, - {"Consumer Name", "apache.pulsar.consumer_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_producer_name, - {"Producer Name", "apache.pulsar.producer_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - - {&hf_pulsar_sequence_id, - {"Sequence Id", "apache.pulsar.sequence_id", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_highest_sequence_id, - {"Highest Sequence Id", "apache.pulsar.highest_sequence_id", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, - HFILL}}, // - {&hf_pulsar_uuid, {"UUID", "apache.pulsar.uuid", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - - {&hf_pulsar_message_id, - {"Message Id", "apache.pulsar.message_id", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_message_permits, - {"Message Permits", "apache.pulsar.message_permits", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - - {&hf_pulsar_publish_time, - {"Publish Time", "apache.pulsar.publish_time", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_deliver_at_time, - {"Deliver At Time", "apache.pulsar.deliver_at_time", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_event_time, - {"Event Time", "apache.pulsar.event_time", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_deliver_after_time, - {"Deliver After Time", "apache.pulsar.deliver_after_time", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, - HFILL}}, // - - {&hf_pulsar_chunk_id, - {"Chunk Id", "apache.pulsar.chunk_id", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_num_chunks_from_msg, - {"Num Chunks From Msg", "apache.pulsar.num_chunks_from_msg", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, - HFILL}}, // - {&hf_pulsar_compression_type, - {"Compression Type", "apache.pulsar.compression_type", FT_STRING, BASE_NONE, NULL, 0x0, NULL, - HFILL}}, // - {&hf_pulsar_uncompressed_size, - {"UnCompression Size", "apache.pulsar.uncompressed_size", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, - HFILL}}, // - - {&hf_pulsar_replicated_from, - {"Replicated from", "apache.pulsar.replicated_from", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_partition_key, - {"Partition Key", "apache.pulsar.partition_key", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_ordering_key, - {"Ordering Key", "apache.pulsar.ordering_key", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_encryption_algo, - {"Encryption Algo", "apache.pulsar.encryption_algo", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_encryption_param, - {"Encryption Param", "apache.pulsar.encryption_param", FT_STRING, BASE_NONE, NULL, 0x0, NULL, - HFILL}}, // - - {&hf_pulsar_replicate_to, - {"Replicate to", "apache.pulsar.replicate_to", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_property, - {"Property", "apache.pulsar.property", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_encryption_keys, - {"Encryption Keys", "apache.pulsar.encryption_keys", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}}, // - - {&hf_pulsar_txnid_least_bits, - {"TxnId Least Bits", "apache.pulsar.txnid_least_bits", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - {&hf_pulsar_txnid_most_bits, - {"TxnId Most Bits", "apache.pulsar.txnid_most_bits", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL}}, // - - {&hf_pulsar_request_in, - {"Request in frame", "apache.pulsar.request_in", FT_FRAMENUM, BASE_NONE, NULL, 0, - "This packet is a response to the packet with this number", HFILL}}, // - {&hf_pulsar_response_in, - {"Response in frame", "apache.pulsar.response_in", FT_FRAMENUM, BASE_NONE, NULL, 0, - "This packet will be responded in the packet with this number", HFILL}}, // - {&hf_pulsar_publish_latency, - {"Latency", "apache.pulsar.publish_latency", FT_RELATIVE_TIME, BASE_NONE, NULL, 0x0, - "How long time it took to ACK message", HFILL}}, -}; - -//////////////// -/// -void proto_register_pulsar() { - // register the new protocol, protocol fields, and subtrees - static dissector_handle_t pulsar_handle; - - proto_pulsar = proto_register_protocol("Pulsar Wire Protocol", /* name */ - "Apache Pulsar", /* short name */ - "apache.pulsar" /* abbrev */ - ); - - /* Setup protocol subtree array */ - static int* ett[] = {&ett_pulsar}; - - proto_register_field_array(proto_pulsar, hf, array_length(hf)); - proto_register_subtree_array(ett, array_length(ett)); - - pulsar_handle = create_dissector_handle(&dissect_pulsar, proto_pulsar); - dissector_add_uint("tcp.port", PULSAR_PORT, pulsar_handle); - register_postdissector(pulsar_handle); -} - -extern "C" { - -extern __attribute__((unused)) WS_DLL_PUBLIC_DEF const gchar plugin_version[] = VERSION; -extern __attribute__((unused)) WS_DLL_PUBLIC_DEF const int plugin_want_major = VERSION_MAJOR; -extern __attribute__((unused)) WS_DLL_PUBLIC_DEF const int plugin_want_minor = VERSION_MINOR; - -WS_DLL_PUBLIC void plugin_register(void); - -__attribute__((unused)) static void proto_reg_handoff_pulsar(void) {} - -/* Start the functions we need for the plugin stuff */ -void plugin_register(void) { - static proto_plugin plug; - plug.register_protoinfo = proto_register_pulsar; - plug.register_handoff = proto_reg_handoff_pulsar; /* or nullptr */ - proto_register_plugin(&plug); -} -} diff --git a/pulsar-functions/instance/pom.xml b/pulsar-functions/instance/pom.xml index 2410142570f46..beb0990e8c597 100644 --- a/pulsar-functions/instance/pom.xml +++ b/pulsar-functions/instance/pom.xml @@ -238,9 +238,6 @@ building python instance - - diff --git a/pulsar-functions/instance/src/main/python/util.py b/pulsar-functions/instance/src/main/python/util.py index 782c15c0e8c23..48ba2f0e6d7cc 100755 --- a/pulsar-functions/instance/src/main/python/util.py +++ b/pulsar-functions/instance/src/main/python/util.py @@ -26,6 +26,7 @@ import sys import importlib from threading import Timer +from pulsar.functions import serde import log @@ -60,7 +61,13 @@ def import_class_from_path(from_path, full_class_name): mod = importlib.import_module(class_name) return mod else: - mod = importlib.import_module(classname_path) + # Serde modules is being used in unqualified form instead of using + # the full name `pulsar.functions.serde`, so we have to make sure + # it gets resolved correctly. + if classname_path == 'serde': + mod = serde + else: + mod = importlib.import_module(classname_path) retval = getattr(mod, class_name) return retval @@ -91,4 +98,4 @@ def start(self): self.thread.start() def cancel(self): - self.thread.cancel() \ No newline at end of file + self.thread.cancel() diff --git a/src/stage-release.sh b/src/stage-release.sh index 1f90f410f10a7..0057ac6a7fd97 100755 --- a/src/stage-release.sh +++ b/src/stage-release.sh @@ -38,12 +38,6 @@ cp $PULSAR_PATH/distribution/offloaders/target/apache-pulsar-offloaders-$VERSION cp -r $PULSAR_PATH/distribution/io/target/apache-pulsar-io-connectors-$VERSION-bin $DEST_PATH/connectors -mkdir $DEST_PATH/RPMS -cp -r $PULSAR_PATH/pulsar-client-cpp/pkg/rpm/RPMS/x86_64/* $DEST_PATH/RPMS - -mkdir $DEST_PATH/DEB -cp -r $PULSAR_PATH/pulsar-client-cpp/pkg/deb/BUILD/DEB/* $DEST_PATH/DEB - # Sign all files cd $DEST_PATH find . -type f | grep -v LICENSE | grep -v README | xargs $PULSAR_PATH/src/sign-release.sh From 5e8902c76330bccf96f97dcb165fbed50cd56dac Mon Sep 17 00:00:00 2001 From: AloysZhang Date: Fri, 30 Sep 2022 10:29:36 +0800 Subject: [PATCH 37/59] support setting bundle number for default namespace (#17722) --- .../pulsar/PulsarClusterMetadataSetup.java | 30 +++++++++++---- .../zookeeper/ClusterMetadataSetupTest.java | 38 +++++++++++++++++++ 2 files changed, 61 insertions(+), 7 deletions(-) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java b/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java index 7b498455d4bcf..1b4edf8686eac 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java @@ -64,10 +64,18 @@ */ public class PulsarClusterMetadataSetup { + private static final int DEFAULT_BUNDLE_NUMBER = 16; + private static class Arguments { @Parameter(names = { "-c", "--cluster" }, description = "Cluster name", required = true) private String cluster; + @Parameter(names = {"-bn", + "--default-namespace-bundle-number"}, + description = "The bundle numbers for the default namespaces(public/default), default is 16", + required = false) + private int numberOfDefaultNamespaceBundles; + @Parameter(names = { "-uw", "--web-service-url" }, description = "Web-service URL for new cluster", required = true) private String clusterWebServiceUrl; @@ -243,9 +251,11 @@ public static void main(String[] args) throws Exception { System.err.println("Number of transaction coordinators must greater than 0"); System.exit(1); } - + int bundleNumberForDefaultNamespace = + arguments.numberOfDefaultNamespaceBundles > 0 ? arguments.numberOfDefaultNamespaceBundles + : DEFAULT_BUNDLE_NUMBER; try { - initializeCluster(arguments); + initializeCluster(arguments, bundleNumberForDefaultNamespace); } catch (Exception e) { System.err.println("Unexpected error occured."); e.printStackTrace(System.err); @@ -254,7 +264,7 @@ public static void main(String[] args) throws Exception { } } - private static void initializeCluster(Arguments arguments) throws Exception { + private static void initializeCluster(Arguments arguments, int bundleNumberForDefaultNamespace) throws Exception { log.info("Setting up cluster {} with metadata-store={} configuration-metadata-store={}", arguments.cluster, arguments.metadataStoreUrl, arguments.configurationMetadataStore); @@ -331,7 +341,7 @@ private static void initializeCluster(Arguments arguments) throws Exception { // Create default namespace createNamespaceIfAbsent(resources, NamespaceName.get(TopicName.PUBLIC_TENANT, TopicName.DEFAULT_NAMESPACE), - arguments.cluster); + arguments.cluster, bundleNumberForDefaultNamespace); // Create system namespace createNamespaceIfAbsent(resources, NamespaceName.SYSTEM_NAMESPACE, arguments.cluster); @@ -363,17 +373,18 @@ static void createTenantIfAbsent(PulsarResources resources, String tenant, Strin } } - static void createNamespaceIfAbsent(PulsarResources resources, NamespaceName namespaceName, String cluster) - throws IOException { + static void createNamespaceIfAbsent(PulsarResources resources, NamespaceName namespaceName, + String cluster, int bundleNumber) throws IOException { NamespaceResources namespaceResources = resources.getNamespaceResources(); if (!namespaceResources.namespaceExists(namespaceName)) { Policies policies = new Policies(); - policies.bundles = getBundles(16); + policies.bundles = getBundles(bundleNumber); policies.replication_clusters = Collections.singleton(cluster); namespaceResources.createPolicies(namespaceName, policies); } else { + log.info("Namespace {} already exists.", namespaceName); namespaceResources.setPolicies(namespaceName, policies -> { policies.replication_clusters.add(cluster); return policies; @@ -381,6 +392,11 @@ static void createNamespaceIfAbsent(PulsarResources resources, NamespaceName nam } } + static void createNamespaceIfAbsent(PulsarResources resources, NamespaceName namespaceName, + String cluster) throws IOException { + createNamespaceIfAbsent(resources, namespaceName, cluster, DEFAULT_BUNDLE_NUMBER); + } + static void createPartitionedTopic(MetadataStore configStore, TopicName topicName, int numPartitions) throws InterruptedException, IOException, ExecutionException { PulsarResources resources = new PulsarResources(null, configStore); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/zookeeper/ClusterMetadataSetupTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/zookeeper/ClusterMetadataSetupTest.java index 75b385dba4d5d..088087dce681f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/zookeeper/ClusterMetadataSetupTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/zookeeper/ClusterMetadataSetupTest.java @@ -22,6 +22,8 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; + +import com.fasterxml.jackson.databind.type.TypeFactory; import java.io.Closeable; import java.io.File; import java.io.IOException; @@ -44,6 +46,8 @@ import org.apache.pulsar.PulsarInitialNamespaceSetup; import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.broker.resources.TenantResources; +import org.apache.pulsar.common.policies.data.Policies; +import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.metadata.api.MetadataStoreConfig; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; @@ -86,6 +90,40 @@ public void testReSetupClusterMetadata() throws Exception { assertEquals(data1, data3); } + @DataProvider(name = "bundleNumberForDefaultNamespace") + public static Object[][] bundleNumberForDefaultNamespace() { + return new Object[][] { { 0 }, { 128 } }; + } + + @Test(dataProvider = "bundleNumberForDefaultNamespace") + public void testSetBundleNumberForDefaultNamespace(int bundleNumber) throws Exception { + String[] args = { + "--cluster", "testSetDefaultNamespaceBundleNumber-cluster", + "--zookeeper", "127.0.0.1:" + localZkS.getZookeeperPort(), + "--configuration-store", "127.0.0.1:" + localZkS.getZookeeperPort(), + "--web-service-url", "http://127.0.0.1:8080", + "--web-service-url-tls", "https://127.0.0.1:8443", + "--broker-service-url", "pulsar://127.0.0.1:6650", + "--broker-service-url-tls","pulsar+ssl://127.0.0.1:6651", + "--default-namespace-bundle-number", String.valueOf(bundleNumber) + }; + PulsarClusterMetadataSetup.main(args); + try (ZooKeeper zk = ZooKeeperClient.newBuilder() + .connectString("127.0.0.1:" + localZkS.getZookeeperPort()) + .build()) { + Policies policies = + ObjectMapperFactory.getThreadLocal().readValue( + zk.getData("/admin/policies/public/default", false, null), + TypeFactory.defaultInstance().constructSimpleType(Policies.class, null)); + assertNotNull(policies); + if (bundleNumber > 0) { + assertEquals(policies.bundles.getNumBundles(), bundleNumber); + } else { + assertEquals(policies.bundles.getNumBundles(), 16); + } + } + } + @DataProvider(name = "useMetadataStoreUrl") public static Object[][] useMetadataStoreUrlDataSet() { return new Object[][] { { Boolean.TRUE }, { Boolean.FALSE } }; From 15a347ca999befe3ea3bd246d34309ad50fbcbe2 Mon Sep 17 00:00:00 2001 From: Xiaoyu Hou Date: Fri, 30 Sep 2022 10:33:37 +0800 Subject: [PATCH 38/59] [improve][java-client]Add init capacity for messages in BatchMessageContainerImpl (#17822) --- .../impl/AbstractBatchMessageContainer.java | 8 +++ .../impl/BatchMessageContainerImpl.java | 5 +- .../impl/BatchMessageContainerImplTest.java | 59 +++++++++++++++++++ 3 files changed, 70 insertions(+), 2 deletions(-) diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java index 73f1e6d088906..9b4d1b7d683dd 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.impl; +import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.List; import lombok.extern.slf4j.Slf4j; @@ -46,10 +47,12 @@ public abstract class AbstractBatchMessageContainer implements BatchMessageConta protected long currentTxnidLeastBits = -1L; protected static final int INITIAL_BATCH_BUFFER_SIZE = 1024; + protected static final int INITIAL_MESSAGES_NUM = 32; // This will be the largest size for a batch sent from this particular producer. This is used as a baseline to // allocate a new buffer that can hold the entire batch without needing costly reallocations protected int maxBatchSize = INITIAL_BATCH_BUFFER_SIZE; + protected int maxMessagesNum = INITIAL_MESSAGES_NUM; @Override public boolean haveEnoughSpace(MessageImpl msg) { @@ -71,6 +74,11 @@ public int getNumMessagesInBatch() { return numMessagesInBatch; } + @VisibleForTesting + public int getMaxMessagesNum() { + return maxMessagesNum; + } + @Override public long getCurrentBatchSize() { return currentBatchSizeBytes; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java index d9416b614a4b7..49cbc56d2a647 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java @@ -58,7 +58,7 @@ class BatchMessageContainerImpl extends AbstractBatchMessageContainer { @Setter private long highestSequenceId = -1L; private ByteBuf batchedMessageMetadataAndPayload; - private List> messages = new ArrayList<>(); + private List> messages = new ArrayList<>(maxMessagesNum); protected SendCallback previousCallback = null; // keep track of callbacks for individual messages being published in a batch protected SendCallback firstCallback; @@ -168,12 +168,13 @@ private ByteBuf getCompressedBatchMetadataAndPayload() { // Update the current max batch size using the uncompressed size, which is what we need in any case to // accumulate the batch content maxBatchSize = Math.max(maxBatchSize, uncompressedSize); + maxMessagesNum = Math.max(maxMessagesNum, numMessagesInBatch); return compressedPayload; } @Override public void clear() { - messages = new ArrayList<>(); + messages = new ArrayList<>(maxMessagesNum); firstCallback = null; previousCallback = null; messageMetadata.clear(); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java index 13d238aba36ec..a4498b952cbf7 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java @@ -18,14 +18,18 @@ */ package org.apache.pulsar.client.impl; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; import io.netty.buffer.ByteBufAllocator; +import io.netty.util.ReferenceCountUtil; import java.lang.reflect.Field; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.Schema; @@ -78,4 +82,59 @@ public void recoveryAfterOom() { // after oom, our add can self-healing, won't throw exception batchMessageContainer.add(message2, null); } + + @Test + public void testMessagesSize() throws Exception { + ProducerImpl producer = mock(ProducerImpl.class); + + final ProducerConfigurationData producerConfigurationData = new ProducerConfigurationData(); + producerConfigurationData.setCompressionType(CompressionType.NONE); + PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + MemoryLimitController memoryLimitController = mock(MemoryLimitController.class); + when(pulsarClient.getMemoryLimitController()).thenReturn(memoryLimitController); + try { + Field clientFiled = HandlerState.class.getDeclaredField("client"); + clientFiled.setAccessible(true); + clientFiled.set(producer, pulsarClient); + } catch (Exception e){ + Assert.fail(e.getMessage()); + } + + ByteBuffer payload = ByteBuffer.wrap("payload".getBytes(StandardCharsets.UTF_8)); + + when(producer.getConfiguration()).thenReturn(producerConfigurationData); + when(producer.encryptMessage(any(), any())).thenReturn(ByteBufAllocator.DEFAULT.buffer().writeBytes(payload)); + + final int initNum = 32; + BatchMessageContainerImpl batchMessageContainer = new BatchMessageContainerImpl(producer); + assertEquals(batchMessageContainer.getMaxMessagesNum(), initNum); + + addMessagesAndCreateOpSendMsg(batchMessageContainer, 10); + assertEquals(batchMessageContainer.getMaxMessagesNum(), initNum); + + addMessagesAndCreateOpSendMsg(batchMessageContainer, 200); + assertEquals(batchMessageContainer.getMaxMessagesNum(), 200); + + addMessagesAndCreateOpSendMsg(batchMessageContainer, 10); + assertEquals(batchMessageContainer.getMaxMessagesNum(), 200); + } + + private void addMessagesAndCreateOpSendMsg(BatchMessageContainerImpl batchMessageContainer, int num) + throws Exception{ + ArrayList> messages = new ArrayList<>(); + for (int i = 0; i < num; ++i) { + MessageMetadata messageMetadata = new MessageMetadata(); + messageMetadata.setSequenceId(i); + messageMetadata.setProducerName("producer"); + messageMetadata.setPublishTime(System.currentTimeMillis()); + ByteBuffer payload = ByteBuffer.wrap("payload".getBytes(StandardCharsets.UTF_8)); + MessageImpl message = MessageImpl.create(messageMetadata, payload, Schema.BYTES, null); + messages.add(message); + batchMessageContainer.add(message, null); + } + + batchMessageContainer.createOpSendMsg(); + batchMessageContainer.clear(); + messages.forEach(ReferenceCountUtil::safeRelease); + } } From fb7307d8f4998e42b18df3a4599fd7ec34cb04a9 Mon Sep 17 00:00:00 2001 From: Qiang Zhao Date: Fri, 30 Sep 2022 11:38:49 +0800 Subject: [PATCH 39/59] [fix][broker] Fix the wrong NIC speed rate unit. (#17890) --- .../org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java index 1e48052a6f736..26528491f1ebb 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java @@ -177,7 +177,7 @@ public static double getTotalNicLimit(List nics, BitRateUnit bitRateUnit log.error("[LinuxInfo] Failed to get total nic limit.", e); return 0d; } - }).sum(), BitRateUnit.Bit); + }).sum(), BitRateUnit.Megabit); } /** From 85b1138593a49a29c269266ca3bc1692b00f0971 Mon Sep 17 00:00:00 2001 From: fengyubiao <9947090@qq.com> Date: Fri, 30 Sep 2022 17:32:55 +0800 Subject: [PATCH 40/59] [fix][flaky-test]NegativeAcksTest.testNegativeAcksWithBatchAckEnabled (#17893) Fixes: #16864 ### Motivation I think it is a wrong configuration(`ackTimeout 1s`) when writing the code, the original design is set `negativeAckRedeliveryDelay 1s` The process expects: - send 10 messages in one batch - submit a batch. - receive 10 messages, do negative acknowledge - after `1s`, will trigger `redelivery` - receive 10 messages again The real process: - send 1 message - Reach the batch time limit, and submit a batch. return `msgId_1` - send 9 messages in another batch - submit a batch. return `msgId_2` - receive 10 messages, do negative acknowledge - push the `msgId_1` to `negativeAcksTracker` - push the `msgId_2` to `unAckedMessageTracker` - after `1s`, will trigger redelivery `msgId_2` by `unAckedMessageTracker` - receive 9 messages( `msgId_2` ) again - after `60s`, will trigger redelivery `msgId_1` by `negativeAcksTracker`. (High light) Test execution timeout! - receive 1 messages( `msgId_1` ) again ### Modifications - remove conf: `ackTimeout` - set `negativeAckRedeliveryDelay 1s` ### Documentation - [x] `doc-not-needed` ### Matching PR in forked repository PR in forked repository: - https://github.com/poorbarcode/pulsar/pull/18 --- .../java/org/apache/pulsar/client/impl/NegativeAcksTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java index 769b832ab778d..34f8ff368a960 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java @@ -311,7 +311,7 @@ public void testNegativeAcksWithBatchAckEnabled() throws Exception { .acknowledgmentGroupTime(0, TimeUnit.SECONDS) .subscriptionType(SubscriptionType.Shared) .enableBatchIndexAcknowledgment(true) - .ackTimeout(1000, TimeUnit.MILLISECONDS) + .negativeAckRedeliveryDelay(1, TimeUnit.SECONDS) .subscribe(); @Cleanup From 7b2626000b3461e73fde638f37e67993f1576574 Mon Sep 17 00:00:00 2001 From: momo-jun <60642177+momo-jun@users.noreply.github.com> Date: Fri, 30 Sep 2022 19:08:59 +0800 Subject: [PATCH 41/59] [improve][doc] Improve TLS encryption (#17808) * improve TLS encryption * fix review comments * preview fix * add file and syntax for page redirection * update the process to create PEM certs * add more description for mTLS --- site2/docs/client-libraries-node.md | 2 +- site2/docs/client-libraries-python.md | 2 +- site2/docs/cookbooks-encryption.md | 333 +------------- site2/docs/security-encryption.md | 338 +++++++++++++- site2/docs/security-overview.md | 7 +- site2/docs/security-tls-authentication.md | 2 +- site2/docs/security-tls-transport.md | 526 ++++++++++------------ site2/website/sidebars.json | 6 +- 8 files changed, 590 insertions(+), 626 deletions(-) diff --git a/site2/docs/client-libraries-node.md b/site2/docs/client-libraries-node.md index e0739763bdb3d..c7ad568e07fe1 100644 --- a/site2/docs/client-libraries-node.md +++ b/site2/docs/client-libraries-node.md @@ -451,4 +451,4 @@ The following static methods are available for the message id object: ## End-to-end encryption -Pulsar encryption allows applications to encrypt messages at producers and decrypt messages at consumers. See [cookbook](cookbooks-encryption.md) for more details. +Pulsar encryption allows applications to encrypt messages at producers and decrypt messages at consumers. See [Get started](security-encryption.md#get-started) for more details. diff --git a/site2/docs/client-libraries-python.md b/site2/docs/client-libraries-python.md index 6d45547aab2fa..460d2ec477f8f 100644 --- a/site2/docs/client-libraries-python.md +++ b/site2/docs/client-libraries-python.md @@ -494,4 +494,4 @@ consumer = client.subscribe( ## End-to-end encryption -Pulsar encryption allows applications to encrypt messages at producers and decrypt messages at consumers. See [cookbook](cookbooks-encryption.md) for more details. \ No newline at end of file +Pulsar encryption allows applications to encrypt messages at producers and decrypt messages at consumers. See [Get started](security-encryption.md#get-started) for more details. \ No newline at end of file diff --git a/site2/docs/cookbooks-encryption.md b/site2/docs/cookbooks-encryption.md index 9aa321e73c07a..9ea64d2a5c026 100644 --- a/site2/docs/cookbooks-encryption.md +++ b/site2/docs/cookbooks-encryption.md @@ -1,334 +1,11 @@ --- id: cookbooks-encryption -title: Configure end-to-end encryption -sidebar_label: "Configure end-to-end encryption" +title: Pulsar Encryption +sidebar_label: "Encryption" --- ````mdx-code-block -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -```` +import {Redirect} from '@docusaurus/router'; -[Pulsar encryption](security-encryption.md) allows clients to encrypt messages at producers and decrypt messages at consumers. - -## Prerequisites - -* Pulsar Java/Python/C++/Node.js client 2.7.1 or later versions. -* Pulsar Go client 0.6.0 or later versions. - -## Configure end-to-end encryption - -1. Create both public and private key pairs. - * ECDSA(for Java and Go clients) - ```shell - openssl ecparam -name secp521r1 -genkey -param_enc explicit -out test_ecdsa_privkey.pem - openssl ec -in test_ecdsa_privkey.pem -pubout -outform pem -out test_ecdsa_pubkey.pem - ``` - - * RSA (for Python, C++ and Node.js clients) - ```shell - openssl genrsa -out test_rsa_privkey.pem 2048 - openssl rsa -in test_rsa_privkey.pem -pubout -outform pkcs8 -out test_rsa_pubkey.pem - ``` - -2. Configure a `CryptoKeyReader` on producers, consumers or readers. - - ````mdx-code-block - - - - ```java - PulsarClient pulsarClient = PulsarClient.builder().serviceUrl("pulsar://localhost:6650").build(); - String topic = "persistent://my-tenant/my-ns/my-topic"; - // RawFileKeyReader is just an example implementation that's not provided by Pulsar - CryptoKeyReader keyReader = new RawFileKeyReader("test_ecdsa_pubkey.pem", "test_ecdsa_privkey.pem"); - - Producer producer = pulsarClient.newProducer() - .topic(topic) - .cryptoKeyReader(keyReader) - .addEncryptionKey("myappkey") - .create(); - - Consumer consumer = pulsarClient.newConsumer() - .topic(topic) - .subscriptionName("my-subscriber-name") - .cryptoKeyReader(keyReader) - .subscribe(); - - Reader reader = pulsarClient.newReader() - .topic(topic) - .startMessageId(MessageId.earliest) - .cryptoKeyReader(keyReader) - .create(); - ``` - - - - - ```python - from pulsar import Client, CryptoKeyReader - - client = Client('pulsar://localhost:6650') - topic = 'my-topic' - # CryptoKeyReader is a built-in implementation that reads public key and private key from files - key_reader = CryptoKeyReader('test_rsa_pubkey.pem', 'test_rsa_privkey.pem') - - producer = client.create_producer( - topic=topic, - encryption_key='myappkey', - crypto_key_reader=key_reader - ) - - consumer = client.subscribe( - topic=topic, - subscription_name='my-subscriber-name', - crypto_key_reader=key_reader - ) - - reader = client.create_reader( - topic=topic, - start_message_id=MessageId.earliest, - crypto_key_reader=key_reader - ) - - client.close() - ``` - - - - - ```cpp - Client client("pulsar://localhost:6650"); - std::string topic = "persistent://my-tenant/my-ns/my-topic"; - // DefaultCryptoKeyReader is a built-in implementation that reads public key and private key from files - auto keyReader = std::make_shared("test_rsa_pubkey.pem", "test_rsa_privkey.pem"); - - Producer producer; - ProducerConfiguration producerConf; - producerConf.setCryptoKeyReader(keyReader); - producerConf.addEncryptionKey("myappkey"); - client.createProducer(topic, producerConf, producer); - - Consumer consumer; - ConsumerConfiguration consumerConf; - consumerConf.setCryptoKeyReader(keyReader); - client.subscribe(topic, "my-subscriber-name", consumerConf, consumer); - - Reader reader; - ReaderConfiguration readerConf; - readerConf.setCryptoKeyReader(keyReader); - client.createReader(topic, MessageId::earliest(), readerConf, reader); - ``` - - - - - ```go - client, err := pulsar.NewClient(pulsar.ClientOptions{ - URL: "pulsar://localhost:6650", - }) - if err != nil { - log.Fatal(err) - } - - defer client.Close() - - topic := "persistent://my-tenant/my-ns/my-topic" - keyReader := crypto.NewFileKeyReader("test_ecdsa_pubkey.pem", "test_ecdsa_privkey.pem") - producer, err := client.CreateProducer(pulsar.ProducerOptions{ - Topic: topic, - Encryption: &pulsar.ProducerEncryptionInfo{ - KeyReader: keyReader, - Keys: []string{"myappkey"}, - }, - }) - if err != nil { - log.Fatal(err) - } - defer producer.Close() - - consumer, err := client.Subscribe(pulsar.ConsumerOptions{ - Topic: topic, - SubscriptionName: "my-subscriber-name", - Decryption: &pulsar.MessageDecryptionInfo{ - KeyReader: keyReader, - }, - }) - if err != nil { - log.Fatal(err) - } - defer consumer.Close() - - reader, err := client.CreateReader(pulsar.ReaderOptions{ - Topic: topic, - Decryption: &pulsar.MessageDecryptionInfo{ - KeyReader: keyReader, - }, - }) - if err != nil { - log.Fatal(err) - } - defer reader.Close() - ``` - - - - - ```javascript - const Pulsar = require('pulsar-client'); - - const topic = 'persistent://my-tenant/my-ns/my-topic'; - - (async () => { - // Create a client - const client = new Pulsar.Client({ - serviceUrl: 'pulsar://localhost:6650', - operationTimeoutSeconds: 30, - }); - - // Create a producer - const producer = await client.createProducer({ - topic: topic, - sendTimeoutMs: 30000, - batchingEnabled: true, - publicKeyPath: "test_rsa_pubkey.pem", - encryptionKey: "encryption-key" - }); - - // Create a consumer - const consumer = await client.subscribe({ - topic: topic, - subscription: 'my-subscriber-name', - subscriptionType: 'Shared', - ackTimeoutMs: 10000, - privateKeyPath: "test_rsa_privkey.pem" - }); - await consumer.close(); - await producer.close(); - await client.close(); - })(); - ``` - - - - ```` - -3. Optional: customize the `CryptoKeyReader` implementation. - - ````mdx-code-block - - - - ```java - class RawFileKeyReader implements CryptoKeyReader { - - String publicKeyFile = ""; - String privateKeyFile = ""; - - RawFileKeyReader(String pubKeyFile, String privKeyFile) { - publicKeyFile = pubKeyFile; - privateKeyFile = privKeyFile; - } - - @Override - public EncryptionKeyInfo getPublicKey(String keyName, Map keyMeta) { - EncryptionKeyInfo keyInfo = new EncryptionKeyInfo(); - try { - keyInfo.setKey(Files.readAllBytes(Paths.get(publicKeyFile))); - } catch (IOException e) { - System.out.println("ERROR: Failed to read public key from file " + publicKeyFile); - e.printStackTrace(); - } - return keyInfo; - } - - @Override - public EncryptionKeyInfo getPrivateKey(String keyName, Map keyMeta) { - EncryptionKeyInfo keyInfo = new EncryptionKeyInfo(); - try { - keyInfo.setKey(Files.readAllBytes(Paths.get(privateKeyFile))); - } catch (IOException e) { - System.out.println("ERROR: Failed to read private key from file " + privateKeyFile); - e.printStackTrace(); - } - return keyInfo; - } - } - ``` - - - - - Currently, customizing the `CryptoKeyReader` implementation is not supported in Python. However, you can use the default implementation by specifying the path of the private key and public keys. - - - - - ```cpp - class CustomCryptoKeyReader : public CryptoKeyReader { - public: - Result getPublicKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const override { - // TODO - return ResultOk; - } - - Result getPrivateKey(const std::string& keyName, std::map& metadata, - EncryptionKeyInfo& encKeyInfo) const override { - // TODO - return ResultOk; - } - }; - ``` - - - - - ```go - type CustomKeyReader struct { - publicKeyPath string - privateKeyPath string - } - - func (c *CustomKeyReader) PublicKey(keyName string, keyMeta map[string]string) (*EncryptionKeyInfo, error) { - keyInfo := &EncryptionKeyInfo{} - // TODO - return keyInfo, nil - } - - // PrivateKey read private key from the given path - func (c *CustomKeyReader) PrivateKey(keyName string, keyMeta map[string]string) (*EncryptionKeyInfo, error) { - keyInfo := &EncryptionKeyInfo{} - // TODO - return keyInfo, nil - } - ``` - - - - - Currently, customizing the `CryptoKeyReader` implementation is not supported in Python. However, you can use the default implementation by specifying the path of the private key and public keys. - - - - ```` - -## Encrypt a message with multiple keys - -:::note - -This is only available for Java clients. - -::: - -You can encrypt a message with more than one key. Producers add all such keys to the config and consumers can decrypt the message as long as they have access to at least one of the keys. Any one of the keys used for encrypting the message is sufficient to decrypt the message. - -For example, encrypt the messages using 2 keys (`myapp.messagekey1` and `myapp.messagekey2`): - -```java -PulsarClient.newProducer().addEncryptionKey("myapp.messagekey1").addEncryptionKey("myapp.messagekey2"); -``` \ No newline at end of file + +```` \ No newline at end of file diff --git a/site2/docs/security-encryption.md b/site2/docs/security-encryption.md index af21e5a33bd50..acbe7f003b50a 100644 --- a/site2/docs/security-encryption.md +++ b/site2/docs/security-encryption.md @@ -1,6 +1,6 @@ --- id: security-encryption -title: Pulsar Encryption +title: End-to-End Encryption sidebar_label: "End-to-End Encryption" --- @@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem'; Applications can use Pulsar end-to-end encryption (E2EE) to encrypt messages on the producer side and decrypt messages on the consumer side. You can use the public and private key pair that the application configures to perform encryption and decryption. Only the consumers with a valid key can decrypt the encrypted messages. -## How it works +## How it works in Pulsar Pulsar uses a dynamically generated symmetric AES key to encrypt messages (data). You can use the application-provided ECDSA (Elliptic Curve Digital Signature Algorithm) or RSA (Rivest–Shamir–Adleman) key pair to encrypt the AES key (data key), so you do not have to share the secret with everyone. @@ -35,7 +35,339 @@ If produced messages are consumed across application boundaries, you need to ens ## Get started -Pulsar encryption allows applications to encrypt messages on the producer side and decrypt messages on the consumer side. See [cookbook](cookbooks-encryption.md) for detailed instructions. +### Prerequisites + +* Pulsar Java/Python/C++/Node.js client 2.7.1 or later versions. +* Pulsar Go client 0.6.0 or later versions. + +### Configure end-to-end encryption + +1. Create both public and private key pairs. + + ````mdx-code-block + + + + ```shell + openssl ecparam -name secp521r1 -genkey -param_enc explicit -out test_ecdsa_privkey.pem + openssl ec -in test_ecdsa_privkey.pem -pubout -outform pem -out test_ecdsa_pubkey.pem + ``` + + + + + ```shell + openssl genrsa -out test_rsa_privkey.pem 2048 + openssl rsa -in test_rsa_privkey.pem -pubout -outform pkcs8 -out test_rsa_pubkey.pem + ``` + + + + ```` + +2. Configure a `CryptoKeyReader` on producers, consumers or readers. + + ````mdx-code-block + + + + ```java + PulsarClient pulsarClient = PulsarClient.builder().serviceUrl("pulsar://localhost:6650").build(); + String topic = "persistent://my-tenant/my-ns/my-topic"; + // RawFileKeyReader is just an example implementation that's not provided by Pulsar + CryptoKeyReader keyReader = new RawFileKeyReader("test_ecdsa_pubkey.pem", "test_ecdsa_privkey.pem"); + + Producer producer = pulsarClient.newProducer() + .topic(topic) + .cryptoKeyReader(keyReader) + .addEncryptionKey("myappkey") + .create(); + + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName("my-subscriber-name") + .cryptoKeyReader(keyReader) + .subscribe(); + + Reader reader = pulsarClient.newReader() + .topic(topic) + .startMessageId(MessageId.earliest) + .cryptoKeyReader(keyReader) + .create(); + ``` + + + + + ```python + from pulsar import Client, CryptoKeyReader + + client = Client('pulsar://localhost:6650') + topic = 'my-topic' + # CryptoKeyReader is a built-in implementation that reads public key and private key from files + key_reader = CryptoKeyReader('test_rsa_pubkey.pem', 'test_rsa_privkey.pem') + + producer = client.create_producer( + topic=topic, + encryption_key='myappkey', + crypto_key_reader=key_reader + ) + + consumer = client.subscribe( + topic=topic, + subscription_name='my-subscriber-name', + crypto_key_reader=key_reader + ) + + reader = client.create_reader( + topic=topic, + start_message_id=MessageId.earliest, + crypto_key_reader=key_reader + ) + + client.close() + ``` + + + + + ```cpp + Client client("pulsar://localhost:6650"); + std::string topic = "persistent://my-tenant/my-ns/my-topic"; + // DefaultCryptoKeyReader is a built-in implementation that reads public key and private key from files + auto keyReader = std::make_shared("test_rsa_pubkey.pem", "test_rsa_privkey.pem"); + + Producer producer; + ProducerConfiguration producerConf; + producerConf.setCryptoKeyReader(keyReader); + producerConf.addEncryptionKey("myappkey"); + client.createProducer(topic, producerConf, producer); + + Consumer consumer; + ConsumerConfiguration consumerConf; + consumerConf.setCryptoKeyReader(keyReader); + client.subscribe(topic, "my-subscriber-name", consumerConf, consumer); + + Reader reader; + ReaderConfiguration readerConf; + readerConf.setCryptoKeyReader(keyReader); + client.createReader(topic, MessageId::earliest(), readerConf, reader); + ``` + + + + + ```go + client, err := pulsar.NewClient(pulsar.ClientOptions{ + URL: "pulsar://localhost:6650", + }) + if err != nil { + log.Fatal(err) + } + + defer client.Close() + + topic := "persistent://my-tenant/my-ns/my-topic" + keyReader := crypto.NewFileKeyReader("test_ecdsa_pubkey.pem", "test_ecdsa_privkey.pem") + producer, err := client.CreateProducer(pulsar.ProducerOptions{ + Topic: topic, + Encryption: &pulsar.ProducerEncryptionInfo{ + KeyReader: keyReader, + Keys: []string{"myappkey"}, + }, + }) + if err != nil { + log.Fatal(err) + } + defer producer.Close() + + consumer, err := client.Subscribe(pulsar.ConsumerOptions{ + Topic: topic, + SubscriptionName: "my-subscriber-name", + Decryption: &pulsar.MessageDecryptionInfo{ + KeyReader: keyReader, + }, + }) + if err != nil { + log.Fatal(err) + } + defer consumer.Close() + + reader, err := client.CreateReader(pulsar.ReaderOptions{ + Topic: topic, + Decryption: &pulsar.MessageDecryptionInfo{ + KeyReader: keyReader, + }, + }) + if err != nil { + log.Fatal(err) + } + defer reader.Close() + ``` + + + + + ```javascript + const Pulsar = require('pulsar-client'); + + const topic = 'persistent://my-tenant/my-ns/my-topic'; + + (async () => { + // Create a client + const client = new Pulsar.Client({ + serviceUrl: 'pulsar://localhost:6650', + operationTimeoutSeconds: 30, + }); + + // Create a producer + const producer = await client.createProducer({ + topic: topic, + sendTimeoutMs: 30000, + batchingEnabled: true, + publicKeyPath: "test_rsa_pubkey.pem", + encryptionKey: "encryption-key" + }); + + // Create a consumer + const consumer = await client.subscribe({ + topic: topic, + subscription: 'my-subscriber-name', + subscriptionType: 'Shared', + ackTimeoutMs: 10000, + privateKeyPath: "test_rsa_privkey.pem" + }); + await consumer.close(); + await producer.close(); + await client.close(); + })(); + ``` + + + + ```` + +3. Optional: customize the `CryptoKeyReader` implementation. + + ````mdx-code-block + + + + ```java + class RawFileKeyReader implements CryptoKeyReader { + + String publicKeyFile = ""; + String privateKeyFile = ""; + + RawFileKeyReader(String pubKeyFile, String privKeyFile) { + publicKeyFile = pubKeyFile; + privateKeyFile = privKeyFile; + } + + @Override + public EncryptionKeyInfo getPublicKey(String keyName, Map keyMeta) { + EncryptionKeyInfo keyInfo = new EncryptionKeyInfo(); + try { + keyInfo.setKey(Files.readAllBytes(Paths.get(publicKeyFile))); + } catch (IOException e) { + System.out.println("ERROR: Failed to read public key from file " + publicKeyFile); + e.printStackTrace(); + } + return keyInfo; + } + + @Override + public EncryptionKeyInfo getPrivateKey(String keyName, Map keyMeta) { + EncryptionKeyInfo keyInfo = new EncryptionKeyInfo(); + try { + keyInfo.setKey(Files.readAllBytes(Paths.get(privateKeyFile))); + } catch (IOException e) { + System.out.println("ERROR: Failed to read private key from file " + privateKeyFile); + e.printStackTrace(); + } + return keyInfo; + } + } + ``` + + + + + Currently, customizing the `CryptoKeyReader` implementation is not supported in Python. However, you can use the default implementation by specifying the path of the private key and public keys. + + + + + ```cpp + class CustomCryptoKeyReader : public CryptoKeyReader { + public: + Result getPublicKey(const std::string& keyName, std::map& metadata, + EncryptionKeyInfo& encKeyInfo) const override { + // TODO + return ResultOk; + } + + Result getPrivateKey(const std::string& keyName, std::map& metadata, + EncryptionKeyInfo& encKeyInfo) const override { + // TODO + return ResultOk; + } + }; + ``` + + + + + ```go + type CustomKeyReader struct { + publicKeyPath string + privateKeyPath string + } + + func (c *CustomKeyReader) PublicKey(keyName string, keyMeta map[string]string) (*EncryptionKeyInfo, error) { + keyInfo := &EncryptionKeyInfo{} + // TODO + return keyInfo, nil + } + + // PrivateKey read private key from the given path + func (c *CustomKeyReader) PrivateKey(keyName string, keyMeta map[string]string) (*EncryptionKeyInfo, error) { + keyInfo := &EncryptionKeyInfo{} + // TODO + return keyInfo, nil + } + ``` + + + + + Currently, customizing the `CryptoKeyReader` implementation is not supported in Python. However, you can use the default implementation by specifying the path of the private key and public keys. + + + + ```` + +### Encrypt a message with multiple keys + +:::note + +This is only available for Java clients. + +::: + +You can encrypt a message with more than one key. Producers add all such keys to the config and consumers can decrypt the message as long as they have access to at least one of the keys. Any one of the keys used for encrypting the message is sufficient to decrypt the message. + +For example, encrypt the messages using 2 keys (`myapp.messagekey1` and `myapp.messagekey2`): + +```java +PulsarClient.newProducer().addEncryptionKey("myapp.messagekey1").addEncryptionKey("myapp.messagekey2"); +``` ## Troubleshoot diff --git a/site2/docs/security-overview.md b/site2/docs/security-overview.md index 658612df4c73c..3e9a90874bd54 100644 --- a/site2/docs/security-overview.md +++ b/site2/docs/security-overview.md @@ -17,9 +17,10 @@ Apache Pulsar uses an [Authentication Provider](#authentication) or an [Authenti ## Encryption -Encryption ensures that if an attacker gets access to your data, the attacker cannot read the data without also having access to the encryption keys. Encryption provides an important mechanism for protecting your data at-rest and in-transit to meet your security requirements for cryptographic algorithms and key management. +Encryption ensures that if an attacker gets access to your data, the attacker cannot read the data without also having access to the encryption keys. Encryption provides an important mechanism for protecting your data in-transit to meet your security requirements for cryptographic algorithms and key management. **What's next?** + * To configure end-to-end encryption, see [End-to-end encryption](security-encryption.md) for more details. * To configure transport layer encryption, see [TLS encryption](security-tls-transport.md) for more details. @@ -34,14 +35,14 @@ Pulsar broker validates the authentication credentials when a connection is esta Pulsar broker supports learning whether a particular client supports authentication refreshing. If a client supports authentication refreshing and the credential is expired, the authentication provider calls the `refreshAuthentication` method to initiate the refreshing process. If a client does not support authentication refreshing and the credential is expired, the broker disconnects the client. **What's next?** -Currently, Pulsar supports the following authentication providers: + +Pulsar supports the following authentication providers, and you can configure multiple authentication providers. - [TLS authentication](security-tls-authentication.md) - [Athenz authentication](security-athenz.md) - [Kerberos authentication](security-kerberos.md) - [JSON Web Token (JWT) authentication](security-jwt.md) - [OAuth 2.0 authentication](security-oauth2.md) - [HTTP basic authentication](security-basic-auth.md) -You can also configure Pulsar to support multiple authentication providers. :::note diff --git a/site2/docs/security-tls-authentication.md b/site2/docs/security-tls-authentication.md index f2e393abd74f6..ad8aa2f50c999 100644 --- a/site2/docs/security-tls-authentication.md +++ b/site2/docs/security-tls-authentication.md @@ -43,7 +43,7 @@ openssl req -config openssl.cnf \ :::note -If `openssl.cnf` is not specified, read [Certificate authority](security-tls-transport.md#certificate-authority) to get `openssl.cnf`. +If `openssl.cnf` is not specified, read [Certificate authority](security-tls-transport.md#create-a-certificate-authority) to get `openssl.cnf`. ::: diff --git a/site2/docs/security-tls-transport.md b/site2/docs/security-tls-transport.md index 453b6ce608abc..0597083430461 100644 --- a/site2/docs/security-tls-transport.md +++ b/site2/docs/security-tls-transport.md @@ -1,7 +1,7 @@ --- id: security-tls-transport -title: Transport Encryption using TLS -sidebar_label: "Transport Encryption using TLS" +title: TLS Encryption +sidebar_label: "TLS Encryption" --- @@ -12,169 +12,178 @@ import TabItem from '@theme/TabItem'; ## TLS overview -By default, Apache Pulsar clients communicate with the Apache Pulsar service in plain text. This means that all data is sent in the clear. You can use TLS to encrypt this traffic to protect the traffic from the snooping of a man-in-the-middle attacker. +Transport Layer Security (TLS) is a form of [public key cryptography](https://en.wikipedia.org/wiki/Public-key_cryptography). By default, Pulsar clients communicate with Pulsar services in plain text. This means that all data is sent in the clear. You can use TLS to encrypt this traffic to protect the traffic from the snooping of a man-in-the-middle attacker. -You can also configure TLS for both encryption and authentication. Use this guide to configure just TLS transport encryption and refer to [here](security-tls-authentication.md) for TLS authentication configuration. Alternatively, you can use [another authentication mechanism](security-athenz.md) on top of TLS transport encryption. +This section introduces how to configure TLS encryption in Pulsar. For how to configure TLS authentication in Pulsar, refer to [TLS authentication](security-tls-authentication.md). Alternatively, you can use another [Athenz authentication](security-athenz.md) on top of TLS transport encryption. -> Note that enabling TLS may impact the performance due to encryption overhead. +:::note -## TLS concepts +Enabling TLS encryption may impact the performance due to encryption overhead. -TLS is a form of [public key cryptography](https://en.wikipedia.org/wiki/Public-key_cryptography). Using key pairs consisting of a public key and a private key can perform the encryption. The public key encrypts the messages and the private key decrypts the messages. +::: -To use TLS transport encryption, you need two kinds of key pairs, **server key pairs** and a **certificate authority**. +### TLS certificates -You can use a third kind of key pair, **client key pairs**, for [client authentication](security-tls-authentication.md). +TLS certificates include the following three types. Each certificate (key pair) contains both a public key that encrypts messages and a private key that decrypts messages. +* Certificate Authority (CA) + * CA private key is distributed to all parties involved. + * CA public key (**trust cert**) is used for signing a certificate for either broker or clients. +* Server key pairs +* Client key pairs (for mutual TLS) -You should store the **certificate authority** private key in a very secure location (a fully encrypted, disconnected, air gapped computer). As for the certificate authority public key, the **trust cert**, you can freely share it. +For both server and client certificates, the private key with a certificate request is generated first, and the public key (the certificate) is generated after the **trust cert** signs the certificate request. When [TLS authentication](security-tls-authentication.md) is enabled, the server uses the **trust cert** to verify that the client has a key pair that the certificate authority signs. The Common Name (CN) of a client certificate is used as the client's role token, while the Subject Alternative Name (SAN) of a server certificate is used for [Hostname verification](#hostname-verification). -For both client and server key pairs, the administrator first generates a private key and a certificate request, then uses the certificate authority private key to sign the certificate request, and finally generates a certificate. This certificate is the public key for the server/client key pair. +:::note -For TLS transport encryption, the clients can use the **trust cert** to verify that the server has a key pair that the certificate authority signs when the clients are talking to the server. A man-in-the-middle attacker does not have access to the certificate authority, so they couldn't create a server with such a key pair. +The validity of these certificates is 365 days. It's highly recommended to use `sha256` or `sha512` as the signature algorithm, while `sha1` is not supported. -For TLS authentication, the server uses the **trust cert** to verify that the client has a key pair that the certificate authority signed. The common name of the **client cert** is then used as the client's role token (see [Overview](security-overview.md)). +::: -Pulsar uses [netty-tcnative](https://github.com/netty/netty-tcnative) and [Conscrypt](https://github.com/google/conscrypt) as security providers. There are two certificate formats: -* Java KeyStore(JKS): Pulsar uses Conscrypt by default for both broker service and Web service. -* CAcerts: Pulsar uses netty-tcnative by default, which includes two implementations, OpenSSL (default) and JDK. When OpenSSL is unavailable, JDK is used. +### Certificate formats -## Create TLS certificates +You can use either one of the following certificate formats to configure TLS encryption: +* Recommended: Privacy Enhanced Mail (PEM). + See [Configure TLS encryption with PEM](#configure-tls-encryption-with-pem) for detailed instructions. +* Optional: Java [KeyStore](https://en.wikipedia.org/wiki/Java_KeyStore) (JKS). + See [Configure TLS encryption with KeyStore](#configure-tls-encryption-with-keystore) for detailed instructions. -Creating TLS certificates for Pulsar involves creating a [certificate authority](#certificate-authority) (CA), [server certificate](#server-certificate), and [client certificate](#client-certificate). +### Hostname verification -Follow the guide below to set up a certificate authority. You can also refer to plenty of resources on the internet for more details. We recommend [this guide](https://jamielinux.com/docs/openssl-certificate-authority/index.html) for your detailed reference. +Hostname verification is a TLS security feature whereby a client can refuse to connect to a server if the Subject Alternative Name (SAN) does not match the hostname that the hostname is connecting to. -### Certificate authority +By default, Pulsar clients disable hostname verification, as it requires that each broker has a DNS record and a unique cert. -1. Create the certificate for the CA. You can use CA to sign both the broker and client certificates. This ensures that each party will trust the others. You should store CA in a very secure location (ideally completely disconnected from networks, air gapped, and fully encrypted). +One scenario where you may want to enable hostname verification is where you have multiple proxy nodes behind a VIP, and the VIP has a DNS record, for example, `pulsar.mycompany.com`. In this case, you can generate a TLS cert with `pulsar.mycompany.com` as the SAN, and then enable hostname verification on the client. -2. Entering the following command to create a directory for your CA, and place [this openssl configuration file](https://github.com/apache/pulsar/tree/master/site2/website/static/examples/openssl.cnf) in the directory. You may want to modify the default answers for company name and department in the configuration file. Export the location of the CA directory to the environment variable, CA_HOME. The configuration file uses this environment variable to find the rest of the files and directories that the CA needs. +To enable hostname verification in Pulsar, ensure that SAN exactly matches the fully qualified domain name (FQDN) of the server. The client compares the SAN with the DNS domain name to ensure that it is connecting to the desired server. See [Configure clients](security-tls-transport.md#configure-clients) for more details. -```bash -mkdir my-ca -cd my-ca -wget https://raw.githubusercontent.com/apache/pulsar-site/main/site2/website/static/examples/openssl.cnf -export CA_HOME=$(pwd) -``` +Moreover, as the administrator has full control of the CA, a bad actor is unlikely to be able to pull off a man-in-the-middle attack. `allowInsecureConnection` allows the client to connect to servers whose cert has not been signed by an approved CA. The client disables `allowInsecureConnection` by default, and you should always disable `allowInsecureConnection` in production environments. As long as you disable `allowInsecureConnection`, a man-in-the-middle attack requires that the attacker has access to the CA. -3. Enter the commands below to create the necessary directories, keys and certs. +## Configure TLS encryption with PEM -```bash -mkdir certs crl newcerts private -chmod 700 private/ -touch index.txt -echo 1000 > serial -openssl genrsa -aes256 -out private/ca.key.pem 4096 -# You need enter a password in the command above -chmod 400 private/ca.key.pem -openssl req -config openssl.cnf -key private/ca.key.pem \ - -new -x509 -days 7300 -sha256 -extensions v3_ca \ - -out certs/ca.cert.pem -# You must enter the same password in the previous openssl command -chmod 444 certs/ca.cert.pem -``` +By default, Pulsar uses [netty-tcnative](https://github.com/netty/netty-tcnative). It includes two implementations, `OpenSSL` (default) and `JDK`. When `OpenSSL` is unavailable, `JDK` is used. -:::tip +### Create TLS certificates -The default `openssl` on macOS doesn't work for the commands above. You must upgrade the `openssl` via Homebrew: +Creating TLS certificates involves creating a [certificate authority](#create-a-certificate-authority), a [server certificate](#create-a-server-certificate), and a [client certificate](#create-a-client-certificate). -```bash -brew install openssl -export PATH="/usr/local/Cellar/openssl@3/3.0.1/bin:$PATH" -``` +#### Create a certificate authority -The version `3.0.1` might change in the future. Use the actual path from the output of `brew install` command. +You can use a certificate authority (CA) to sign both server and client certificates. This ensures that each party trusts the others. Store CA in a very secure location (ideally completely disconnected from networks, air-gapped, and fully encrypted). -::: +Use the following command to create a CA. -4. After you answer the question prompts, CA-related files are stored in the `./my-ca` directory. Within that directory: + ```bash + openssl genrsa -out ca.key.pem 2048 + openssl req -x509 -new -nodes -key ca.key.pem -subj "/CN=CARoot" -days 365 -out ca.cert.pem + ``` -* `certs/ca.cert.pem` is the public certificate. This public certificate is meant to be distributed to all parties involved. -* `private/ca.key.pem` is the private key. You only need it when you are signing a new certificate for either broker or clients and you must safely guard this private key. + :::note -### Server certificate + The default `openssl` on macOS doesn't work for the commands above. You need to upgrade `openssl` via Homebrew: -Once you have created a CA certificate, you can create certificate requests and sign them with the CA. + ```bash + brew install openssl + export PATH="/usr/local/Cellar/openssl@3/3.0.1/bin:$PATH" + ``` -The following commands ask you a few questions and then create the certificates. When you are asked for the common name, you should match the hostname of the broker. You can also use a wildcard to match a group of broker hostnames, for example, `*.broker.usw.example.com`. This ensures that multiple machines can reuse the same certificate. + Use the actual path from the output of the `brew install` command. Note that version number `3.0.1` might change. -:::tip + ::: -Sometimes matching the hostname is not possible or makes no sense, -such as when you create the brokers with random hostnames, or you -plan to connect to the hosts via their IP. In these cases, you -should configure the client to disable TLS hostname verification. For more -details, you can see [the host verification section in client configuration](#hostname-verification). +#### Create a server certificate -::: +Once you have created a CA, you can create certificate requests and sign them with the CA. -1. Enter the command below to generate the key. +1. Generate the server's private key. -```bash -openssl genrsa -out broker.key.pem 2048 -``` + ```bash + openssl genrsa -out broker.key.pem 2048 + ``` -The broker expects the key to be in [PKCS 8](https://en.wikipedia.org/wiki/PKCS_8) format, so enter the following command to convert it. + The broker expects the key to be in [PKCS 8](https://en.wikipedia.org/wiki/PKCS_8) format. Enter the following command to convert it. -```bash -openssl pkcs8 -topk8 -inform PEM -outform PEM \ - -in broker.key.pem -out broker.key-pk8.pem -nocrypt -``` + ```bash + openssl pkcs8 -topk8 -inform PEM -outform PEM -in broker.key.pem -out broker.key-pk8.pem -nocrypt + ``` -2. Enter the following command to generate the certificate request. +2. Create a `broker.conf` file with the following content: -```bash -openssl req -config openssl.cnf \ - -key broker.key.pem -new -sha256 -out broker.csr.pem -``` + ```properties + [ req ] + default_bits = 2048 + prompt = no + default_md = sha256 + distinguished_name = dn + + [ v3_ext ] + authorityKeyIdentifier=keyid,issuer:always + basicConstraints=CA:FALSE + keyUsage=critical, digitalSignature, keyEncipherment + extendedKeyUsage=serverAuth + subjectAltName=@alt_names + + [ dn ] + CN = broker + + [ alt_names ] + DNS.1 = pulsar + DNS.2 = pulsar.default + IP.1 = 127.0.0.1 + IP.2 = 192.168.1.2 + ``` + + :::tip -3. Sign it with the certificate authority by entering the command below. + To configure [hostname verification](#hostname-verification), you need to enter the hostname of the broker in `alt_names` as the Subject Alternative Name (SAN). To ensure that multiple machines can reuse the same certificate, you can also use a wildcard to match a group of broker hostnames, for example, `*.broker.usw.example.com`. -```bash -openssl ca -config openssl.cnf -extensions server_cert \ - -days 1000 -notext -md sha256 \ - -in broker.csr.pem -out broker.cert.pem -``` + ::: -At this point, you have a cert, `broker.cert.pem`, and a key, `broker.key-pk8.pem`, which you can use along with `ca.cert.pem` to configure TLS transport encryption for your broker and proxy nodes. +3. Generate the certificate request. -### Client certificate + ```bash + openssl req -new -config broker.conf -key broker.key.pem -out broker.csr.pem -sha256 + ``` -1. Enter the command below to generate the key. +4. Sign the certificate with the CA. -```bash -openssl genrsa -out client.key.pem 2048 -``` + ```bash + openssl x509 -req -in broker.csr.pem -CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial -out broker.cert.pem -days 365 -extensions v3_ext -extfile broker.conf -sha256 + ``` -The client expects the key to be in [PKCS 8](https://en.wikipedia.org/wiki/PKCS_8) format, so enter the following command to convert it. +At this point, you have a cert, `broker.cert.pem`, and a key, `broker.key-pk8.pem`, which you can use along with `ca.cert.pem` to configure TLS encryption for your brokers and proxies. -```bash -openssl pkcs8 -topk8 -inform PEM -outform PEM \ - -in client.key.pem -out client.key-pk8.pem -nocrypt -``` +#### Create a client certificate -2. Enter the following command to generate the certificate request. +1. Generate the client's private key. -```bash -openssl req -config openssl.cnf \ - -key client.key.pem -new -sha256 -out client.csr.pem -``` + ```bash + openssl genrsa -out client.key.pem 2048 + ``` -3. Sign it with the certificate authority by entering the command below. + The client expects the key to be in [PKCS 8](https://en.wikipedia.org/wiki/PKCS_8) format. Enter the following command to convert it. -```bash -openssl ca -config openssl.cnf -extensions client_cert \ - -days 1000 -notext -md sha256 \ - -in client.csr.pem -out client.cert.pem -``` + ```bash + openssl pkcs8 -topk8 -inform PEM -outform PEM -in client.key.pem -out client.key-pk8.pem -nocrypt + ``` + +2. Generate the certificate request. Note that the value of `CN` is used as the client's role token. -At this point, you have a cert `client.cert.pem` and a key `client.key-pk8.pem`, which you can use along with `ca.cert.pem` to configure TLS encryption for your client. + ```bash + openssl req -new -subj "/CN=client" -key client.key.pem -out client.csr.pem -sha256 + ``` + +3. Sign the certificate with the CA. + + ```bash + openssl x509 -req -in client.csr.pem -CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial -out client.cert.pem -days 365 -sha256 + ``` -## Configure brokers +At this point, you have a cert `client.cert.pem` and a key `client.key-pk8.pem`, which you can use along with `ca.cert.pem` to configure TLS encryption for your clients. -To configure a Pulsar [broker](reference-terminology.md#broker) to use TLS transport encryption, you need to make some changes to `broker.conf`, which locates in the `conf` directory of your [Pulsar installation](getting-started-standalone.md). +### Configure brokers -Add these values to the configuration file (substituting the appropriate certificate paths where necessary): +To configure a Pulsar [broker](reference-terminology.md#broker) to use TLS encryption, you need to add these values to `broker.conf` in the `conf` directory of your Pulsar installation. Substitute the appropriate certificate paths where necessary. ```properties brokerServicePortTls=6651 @@ -190,29 +199,29 @@ brokerClientCertificateFilePath=/path/to/client.cert.pem brokerClientKeyFilePath=/path/to/client.key-pk8.pem ``` -> You can find a full list of parameters available in the `conf/broker.conf` file, -> as well as the default values for those parameters, in [Broker Configuration](reference-configuration.md#broker) +#### Configure TLS Protocol Version and Cipher -### TLS Protocol Version and Cipher +To configure the broker (and proxy) to require specific TLS protocol versions and ciphers for TLS negotiation, you can use the TLS protocol versions and ciphers to stop clients from requesting downgraded TLS protocol versions or ciphers that may have weaknesses. -You can configure the broker (and proxy) to require specific TLS protocol versions and ciphers for TLS negiotation. You can use the TLS protocol versions and ciphers to stop clients from requesting downgraded TLS protocol versions or ciphers that may have weaknesses. +By default, Pulsar uses OpenSSL when it is available, otherwise, Pulsar defaults back to the JDK implementation. OpenSSL currently supports `TLSv1.1`, `TLSv1.2` and `TLSv1.3`. You can acquire a list of supported ciphers from the OpenSSL ciphers command, i.e. `openssl ciphers -tls1_3`. -Both the TLS protocol versions and cipher properties can take multiple values, separated by commas. The possible values for protocol version and ciphers depend on the TLS provider that you are using. Pulsar uses OpenSSL if the OpenSSL is available, but if the OpenSSL is not available, Pulsar defaults back to the JDK implementation. +Both the TLS protocol versions and cipher properties can take multiple values, separated by commas. The possible values for protocol versions and ciphers depend on the TLS provider that you are using. ```properties tlsProtocols=TLSv1.3,TLSv1.2 tlsCiphers=TLS_DH_RSA_WITH_AES_256_GCM_SHA384,TLS_DH_RSA_WITH_AES_256_CBC_SHA ``` -OpenSSL currently supports `TLSv1.1`, `TLSv1.2` and `TLSv1.3` for the protocol version. You can acquire a list of supported cipher from the openssl ciphers command, i.e. `openssl ciphers -tls1_3`. +* `tlsProtocols=TLSv1.3,TLSv1.2`: List out the TLS protocols that you are going to accept from clients. By default, it is not set. +* `tlsCiphers=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`: A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS network protocol. By default, it is null. See [OpenSSL Ciphers](https://www.openssl.org/docs/man1.0.2/apps/ciphers.html) and [JDK Ciphers](http://docs.oracle.com/javase/8/docs/technotes/guides/security/StandardNames.html#ciphersuites) for more details. For JDK 11, you can obtain a list of supported values from the documentation: - [TLS protocol](https://docs.oracle.com/en/java/javase/11/security/oracle-providers.html#GUID-7093246A-31A3-4304-AC5F-5FB6400405E2__SUNJSSEPROVIDERPROTOCOLPARAMETERS-BBF75009) - [Ciphers](https://docs.oracle.com/en/java/javase/11/security/oracle-providers.html#GUID-7093246A-31A3-4304-AC5F-5FB6400405E2__SUNJSSE_CIPHER_SUITES) -## Configure proxies +### Configure proxies -Proxies need to configure TLS in two directions, for clients connecting to the proxy, and for the proxy connecting to brokers. +Configuring TLS on proxies includes two directions of connections, from clients to proxies, and from proxies to brokers. ```properties servicePortTls=6651 @@ -231,21 +240,13 @@ brokerClientCertificateFilePath=/path/to/client.cert.pem brokerClientKeyFilePath=/path/to/client.key-pk8.pem ``` -## Configure clients - -When you enable the TLS transport encryption, you need to configure the client to use `https://` and port 8443 for the web service URL, and `pulsar+ssl://` and port 6651 for the broker service URL. - -As the server certificate that you generated above does not belong to any of the default trust chains, you also need to either specify the path of the **trust cert** (recommended), or tell the client to allow untrusted server certs. - -### Hostname verification - -Hostname verification is a TLS security feature whereby a client can refuse to connect to a server if the "CommonName" does not match the hostname to which the hostname is connecting. By default, Pulsar clients disable hostname verification, as it requires that each broker has a DNS record and a unique cert. +### Configure clients -Moreover, as the administrator has full control of the certificate authority, a bad actor is unlikely to be able to pull off a man-in-the-middle attack. "allowInsecureConnection" allows the client to connect to servers whose cert has not been signed by an approved CA. The client disables "allowInsecureConnection" by default, and you should always disable "allowInsecureConnection" in production environments. As long as you disable "allowInsecureConnection", a man-in-the-middle attack requires that the attacker has access to the CA. +To enable TLS encryption, you need to configure the clients to use `https://` with port 8443 for the web service URL, and `pulsar+ssl://` with port 6651 for the broker service URL. -One scenario where you may want to enable hostname verification is where you have multiple proxy nodes behind a VIP, and the VIP has a DNS record, for example, pulsar.mycompany.com. In this case, you can generate a TLS cert with pulsar.mycompany.com as the "CommonName," and then enable hostname verification on the client. +As the server certificate that you generated above does not belong to any of the default trust chains, you also need to either specify the path of the **trust cert** (recommended) or enable the clients to allow untrusted server certs. -The examples below show that hostname verification is disabled for Java/Python/C++/Node.js/C# clients by default. +The following examples show how to configure TLS encryption for Java/Python/C++/Node.js/C# clients. ````mdx-code-block ```` -## Configure TLS encryption in CLI tools +### Configure CLI tools [Command-line tools](reference-cli-tools.md) like [`pulsar-admin`](reference-cli-tools.md#pulsar-admin), [`pulsar-perf`](reference-cli-tools.md#pulsar-perf), and [`pulsar-client`](reference-cli-tools.md#pulsar-client) use the `conf/client.conf` config file in a Pulsar installation. -To use TLS encryption with the CLI tools of Pulsar, you need to add the following parameters to the `conf/client.conf` file. +To use TLS encryption with Pulsar CLI tools, you need to add the following parameters to the `conf/client.conf` file. ```properties webServiceUrl=https://broker.example.com:8443/ @@ -345,115 +346,54 @@ tlsEnableHostnameVerification=false ## Configure TLS encryption with KeyStore -Apache Pulsar supports [TLS encryption](security-tls-transport.md) and [TLS authentication](security-tls-authentication.md) between clients and Apache Pulsar service. By default, it uses PEM format file configuration. This section tries to describe how to use [KeyStore](https://en.wikipedia.org/wiki/Java_KeyStore) type to configure TLS. - -### Generate TLS key and certificate - -The first step of deploying TLS is to generate the key and the certificate for each machine in the cluster. You can use Java’s `keytool` utility to accomplish this task. We will generate the key into a temporary keystore initially for broker, so that we can export and sign it later with CA. - -```shell -keytool -keystore broker.keystore.jks -alias localhost -validity {validity} -genkeypair -keyalg RSA -``` - -You need to specify two parameters in the above command: - -1. `keystore`: the keystore file that stores the certificate. The *keystore* file contains the private key of - the certificate; hence, it needs to be kept safely. -2. `validity`: the valid time of the certificate in days. - -> Ensure that common name (CN) matches exactly with the fully qualified domain name (FQDN) of the server. -The client compares the CN with the DNS domain name to ensure that it is indeed connecting to the desired server, not a malicious one. - -### Create your own CA - -After the first step, each broker in the cluster has a public-private key pair, and a certificate to identify the machine. -The certificate, however, is unsigned, which means that an attacker can create such a certificate to pretend to be any machine. - -Therefore, it is important to prevent forged certificates by signing them for each machine in the cluster. -A `certificate authority (CA)` is responsible for signing certificates. CA works like a government that issues passports — -the government stamps (signs) each passport so that the passport becomes difficult to forge. Other governments verify the stamps -to ensure the passport is authentic. Similarly, the CA signs the certificates, and the cryptography guarantees that a signed -certificate is computationally difficult to forge. Thus, as long as the CA is a genuine and trusted authority, the clients have -high assurance that they are connecting to authentic machines. - -```shell -openssl req -new -x509 -keyout ca-key -out ca-cert -days 365 -``` +By default, Pulsar uses [Conscrypt](https://github.com/google/conscrypt) for both broker service and Web service. -The generated CA is simply a *public-private* key pair and certificate, and it is intended to sign other certificates. +### Generate JKS certificate -The next step is to add the generated CA to the clients' truststore so that the clients can trust this CA: +You can use Java’s `keytool` utility to generate the key and certificate for each machine in the cluster. -```shell -keytool -keystore client.truststore.jks -alias CARoot -import -file ca-cert +```bash +DAYS=365 +CLIENT_COMMON_PARAMS="-storetype JKS -storepass clientpw -keypass clientpw -noprompt" +BROKER_COMMON_PARAMS="-storetype JKS -storepass brokerpw -keypass brokerpw -noprompt" + +# create keystore +keytool -genkeypair -keystore broker.keystore.jks ${BROKER_COMMON_PARAMS} -keyalg RSA -keysize 2048 -alias broker -validity $DAYS \ +-dname 'CN=broker,OU=Unknown,O=Unknown,L=Unknown,ST=Unknown,C=Unknown' +keytool -genkeypair -keystore client.keystore.jks ${CLIENT_COMMON_PARAMS} -keyalg RSA -keysize 2048 -alias client -validity $DAYS \ +-dname 'CN=client,OU=Unknown,O=Unknown,L=Unknown,ST=Unknown,C=Unknown' + +# export certificate +keytool -exportcert -keystore broker.keystore.jks ${BROKER_COMMON_PARAMS} -file broker.cer -alias broker +keytool -exportcert -keystore client.keystore.jks ${CLIENT_COMMON_PARAMS} -file client.cer -alias client + +# generate truststore +keytool -importcert -keystore client.truststore.jks ${CLIENT_COMMON_PARAMS} -file broker.cer -alias truststore +keytool -importcert -keystore broker.truststore.jks ${BROKER_COMMON_PARAMS} -file client.cer -alias truststore ``` - + :::note - -If you configure the brokers to require client authentication by setting `tlsRequireTrustedClientCertOnConnect` to `true` on the broker configuration, then you must also provide a truststore for the brokers and it should have all the CA certificates that clients keys were signed by. + +To configure [hostname verification](#hostname-verification), you need to append ` -ext SAN=IP:127.0.0.1,IP:192.168.20.2,DNS:broker.example.com` to the value of `BROKER_COMMON_PARAMS` as the Subject Alternative Name (SAN). ::: -```shell -keytool -keystore broker.truststore.jks -alias CARoot -import -file ca-cert -``` - -In contrast to the keystore, which stores each machine’s own identity, the truststore of a client stores all the certificates -that the client should trust. Importing a certificate into one’s truststore also means trusting all certificates that are signed -by that certificate. As the analogy above, trusting the government (CA) also means trusting all passports (certificates) that -it has issued. This attribute is called the chain of trust, and it is particularly useful when deploying TLS on a large BookKeeper cluster. -You can sign all certificates in the cluster with a single CA, and have all machines share the same truststore that trusts the CA. -That way all machines can authenticate all other machines. - - -### Sign the certificate - -The next step is to sign all certificates in the keystore with the CA we generated. - -1. Export the certificate from the keystore: - -```shell -keytool -keystore broker.keystore.jks -alias localhost -certreq -file cert-file -keytool -keystore client.keystore.jks -alias localhost -certreq -file cert-file -``` - -2. Sign it with the CA: - -```shell -openssl x509 -req -CA ca-cert -CAkey ca-key -in cert-file -out cert-signed -days {validity} -CAcreateserial -passin pass:{ca-password} -``` - -3. Import both the certificate of the CA and the signed certificate into the keystore: - -```shell -keytool -keystore broker.keystore.jks -alias CARoot -import -file ca-cert -keytool -keystore broker.keystore.jks -alias localhost -import -file cert-signed - -keytool -keystore client.keystore.jks -alias CARoot -import -file ca-cert -keytool -keystore client.keystore.jks -alias localhost -import -file cert-signed -``` - -The definitions of the parameters are the following: - -1. `keystore`: the location of the keystore -2. `ca-cert`: the certificate of the CA -3. `ca-key`: the private key of the CA -4. `ca-password`: the passphrase of the CA -5. `cert-file`: the exported, unsigned certificate of the broker -6. `cert-signed`: the signed certificate of the broker ### Configure brokers -Brokers enable TLS by providing valid `brokerServicePortTls` and `webServicePortTls`, and also set `tlsEnabledWithKeyStore` to `true` for using KeyStore type configuration. Besides this, KeyStore path, KeyStore password, TrustStore path, and TrustStore password need to be provided. And since brokers create internal client/admin client to communicate with other brokers, users also need to provide config for them, this is similar to how users configure the outside client/admin-client. If `tlsRequireTrustedClientCertOnConnect` is `true`, brokers reject the Connection if the Client Certificate is not trusted. - -The following TLS configs are needed on the broker side: +Configure the following parameters in the `conf/broker.conf` file and restrict access to the store files via filesystem permissions. ```properties brokerServicePortTls=6651 webServicePortTls=8081 +# Trusted client certificates are required to connect TLS +# Reject the Connection if the Client Certificate is not trusted. +# In effect, this requires that all connecting clients perform TLS client +# authentication. tlsRequireTrustedClientCertOnConnect=true tlsEnabledWithKeyStore=true + # key store tlsKeyStoreType=JKS tlsKeyStore=/var/private/tls/broker.keystore.jks @@ -475,46 +415,98 @@ brokerClientTlsKeyStore=/var/private/tls/client.keystore.jks brokerClientTlsKeyStorePassword=clientpw ``` +To disable non-TLS ports, you need to set the values of `brokerServicePort` and `webServicePort` to empty. + :::note -It is important to restrict access to the store files via filesystem permissions. +The default value of `tlsRequireTrustedClientCertOnConnect` is `false`. When it's enabled for mutual TLS, brokers/proxies require trusted client certificates; otherwise, brokers/proxies reject connection requests from clients. ::: -If you have configured TLS on the broker, to disable non-TLS ports, you can set the values of the following configurations to empty as below. +### Configure proxies -```conf -brokerServicePort= -webServicePort= +Configuring TLS on proxies includes two directions of connections, from clients to proxies, and from proxies to brokers. + +```properties +servicePortTls=6651 +webServicePortTls=8081 + +tlsRequireTrustedClientCertOnConnect=true + +# keystore +tlsKeyStoreType=JKS +tlsKeyStore=/var/private/tls/proxy.keystore.jks +tlsKeyStorePassword=brokerpw + +# truststore +tlsTrustStoreType=JKS +tlsTrustStore=/var/private/tls/proxy.truststore.jks +tlsTrustStorePassword=brokerpw + +# internal client/admin-client config +tlsEnabledWithKeyStore=true +brokerClientTlsEnabled=true +brokerClientTlsEnabledWithKeyStore=true +brokerClientTlsTrustStoreType=JKS +brokerClientTlsTrustStore=/var/private/tls/client.truststore.jks +brokerClientTlsTrustStorePassword=clientpw +brokerClientTlsKeyStoreType=JKS +brokerClientTlsKeyStore=/var/private/tls/client.keystore.jks +brokerClientTlsKeyStorePassword=clientpw ``` -In this case, you need to set the following configurations. +### Configure clients -```properties -brokerClientTlsEnabled=true // Set this to true -brokerClientTlsEnabledWithKeyStore=true // Set this to true -brokerClientTlsTrustStore= // Set this to your desired value -brokerClientTlsTrustStorePassword= // Set this to your desired value +Similar to [Configure TLS encryption with PEM](security-tls-transport.md#configure-clients), you need to provide the TrustStore information for a minimal configuration. + +The following is an example. + +````mdx-code-block + + + +```java + import org.apache.pulsar.client.api.PulsarClient; + + PulsarClient client = PulsarClient.builder() + .serviceUrl("pulsar+ssl://broker.example.com:6651/") + .useKeyStoreTls(true) + .tlsTrustStoreType("JKS") + .tlsTrustStorePath("/var/private/tls/client.truststore.jks") + .tlsTrustStorePassword("clientpw") + .tlsKeyStoreType("JKS") + .tlsKeyStorePath("/var/private/tls/client.keystore.jks") + .tlsKeyStorePassword("clientpw") + .enableTlsHostnameVerification(false) // false by default, in any case + .allowTlsInsecureConnection(false) // false by default, in any case + .build(); ``` -Optional settings that may worth considering: + + -1. tlsClientAuthentication=false: Enable/Disable using TLS for authentication. This config when enabled will authenticate the other end - of the communication channel. It should be enabled on both brokers and clients for mutual TLS. -2. tlsCiphers=[TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256], A cipher suite is a named combination of authentication, encryption, MAC and key exchange - algorithm used to negotiate the security settings for a network connection using TLS network protocol. By default, - it is null. [OpenSSL Ciphers](https://www.openssl.org/docs/man1.0.2/apps/ciphers.html) - [JDK Ciphers](http://docs.oracle.com/javase/8/docs/technotes/guides/security/StandardNames.html#ciphersuites) -3. tlsProtocols=[TLSv1.3,TLSv1.2] (list out the TLS protocols that you are going to accept from clients). - By default, it is not set. +```java + PulsarAdmin amdin = PulsarAdmin.builder().serviceHttpUrl("https://broker.example.com:8443") + .tlsTrustStoreType("JKS") + .tlsTrustStorePath("/var/private/tls/client.truststore.jks") + .tlsTrustStorePassword("clientpw") + .tlsKeyStoreType("JKS") + .tlsKeyStorePath("/var/private/tls/client.keystore.jks") + .tlsKeyStorePassword("clientpw") + .enableTlsHostnameVerification(false) // false by default, in any case + .allowTlsInsecureConnection(false) // false by default, in any case + .build(); +``` -### Configure Clients + + +```` -This is similar to [TLS encryption configurations for clients with PEM type](security-tls-transport.md#configure-clients). -For a minimal configuration, you need to provide the TrustStore information. +### Configure CLI tools -For example: -1. for [Command-line tools](reference-cli-tools.md) like [`pulsar-admin`](reference-cli-tools#pulsar-admin), [`pulsar-perf`](reference-cli-tools#pulsar-perf), and [`pulsar-client`](reference-cli-tools#pulsar-client) use the `conf/client.conf` config file in a Pulsar installation. +For [Command-line tools](reference-cli-tools.md) like [`pulsar-admin`](reference-cli-tools#pulsar-admin), [`pulsar-perf`](reference-cli-tools#pulsar-perf), and [`pulsar-client`](reference-cli-tools#pulsar-client) use the `conf/client.conf` config file in a Pulsar installation. ```properties webServiceUrl=https://broker.example.com:8443/ @@ -528,43 +520,9 @@ For example: keyStorePassword=clientpw ``` -2. for Java client - - ```java - import org.apache.pulsar.client.api.PulsarClient; - - PulsarClient client = PulsarClient.builder() - .serviceUrl("pulsar+ssl://broker.example.com:6651/") - .useKeyStoreTls(true) - .tlsTrustStoreType("JKS") - .tlsTrustStorePath("/var/private/tls/client.truststore.jks") - .tlsTrustStorePassword("clientpw") - .tlsKeyStoreType("JKS") - .tlsKeyStorePath("/var/private/tls/client.keystore.jks") - .tlsKeyStorePassword("clientpw") - .enableTlsHostnameVerification(false) // false by default, in any case - .allowTlsInsecureConnection(false) // false by default, in any case - .build(); - ``` - -3. for Java admin client - - ```java - PulsarAdmin amdin = PulsarAdmin.builder().serviceHttpUrl("https://broker.example.com:8443") - .tlsTrustStoreType("JKS") - .tlsTrustStorePath("/var/private/tls/client.truststore.jks") - .tlsTrustStorePassword("clientpw") - .tlsKeyStoreType("JKS") - .tlsKeyStorePath("/var/private/tls/client.keystore.jks") - .tlsKeyStorePassword("clientpw") - .enableTlsHostnameVerification(false) // false by default, in any case - .allowTlsInsecureConnection(false) // false by default, in any case - .build(); - ``` - :::note -Configure `tlsTrustStorePath` when you set `useKeyStoreTls` to `true`. +If you set `useKeyStoreTls` to `true`, be sure to configure `tlsTrustStorePath`. ::: @@ -576,4 +534,4 @@ You can enable TLS debug logging at the JVM level by starting the brokers and/or -Djavax.net.debug=all ``` -You can find more details on this in [Oracle documentation](http://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/ReadDebug.html) on [debugging SSL/TLS connections](http://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/ReadDebug.html). +For more details, see [Oracle documentation](http://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/ReadDebug.html). diff --git a/site2/website/sidebars.json b/site2/website/sidebars.json index 1284959aa3153..fa21f2fbceac2 100644 --- a/site2/website/sidebars.json +++ b/site2/website/sidebars.json @@ -266,11 +266,8 @@ { "type": "category", "label": "Encryption", - "link": { - "type": "doc", - "id": "security-encryption" - }, "items": [ + "security-encryption", "security-tls-transport", "security-bouncy-castle" ] @@ -347,7 +344,6 @@ "cookbooks-deduplication", "cookbooks-non-persistent", "cookbooks-retention-expiry", - "cookbooks-encryption", "cookbooks-message-queue", "cookbooks-bookkeepermetadata" ] From 74d6305b748568ac2fa389e4639be0586ef3c1ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Boschi?= Date: Fri, 30 Sep 2022 13:46:28 +0200 Subject: [PATCH 42/59] [fix] Remove pulsar-broker-common dependency from pulsar-client (#17855) * [fix] Remove pulsar-broker-common dependency from pulsar-client * fix newline * add enforcer rule * Move packages-core to jdk8 bytecode * checkstyle * use variables * style * Fix annotation discovery * Fix kafka module compile --- pom.xml | 3 +- .../utils/CmdGenerateDocumentation.java | 2 +- pulsar-client-all/pom.xml | 27 ++++ pulsar-client/pom.xml | 7 - .../impl/conf/CmdGenerateDocumentation.java | 2 +- .../util}/BaseGenerateDocumentation.java | 129 ++++++++++++------ pulsar-io/kafka/pom.xml | 10 ++ pulsar-package-management/core/pom.xml | 8 ++ .../proxy/util/CmdGenerateDocumentation.java | 2 +- 9 files changed, 139 insertions(+), 51 deletions(-) rename {pulsar-broker-common/src/main/java/org/apache/pulsar/broker => pulsar-common/src/main/java/org/apache/pulsar/common/util}/BaseGenerateDocumentation.java (63%) diff --git a/pom.xml b/pom.xml index 8bb53b47943b3..a32dd8f377aac 100644 --- a/pom.xml +++ b/pom.xml @@ -254,7 +254,7 @@ flexible messaging model and an intuitive client API. 3.0.0 4.0.rc2 1.0 - 3.0.0 + 3.1.0 @@ -281,6 +281,7 @@ flexible messaging model and an intuitive client API. 0.4 7.1.0 0.9.15 + 1.6.1 rename-netty-native-libs.sh diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/utils/CmdGenerateDocumentation.java b/pulsar-broker/src/main/java/org/apache/pulsar/utils/CmdGenerateDocumentation.java index 3f5df60faea69..c784ff84408d1 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/utils/CmdGenerateDocumentation.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/utils/CmdGenerateDocumentation.java @@ -21,9 +21,9 @@ import com.beust.jcommander.Parameters; import lombok.Data; import lombok.extern.slf4j.Slf4j; -import org.apache.pulsar.broker.BaseGenerateDocumentation; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.common.util.BaseGenerateDocumentation; import org.apache.pulsar.websocket.service.WebSocketProxyConfiguration; @Data diff --git a/pulsar-client-all/pom.xml b/pulsar-client-all/pom.xml index 6645488d70471..1adaafbb90dff 100644 --- a/pulsar-client-all/pom.xml +++ b/pulsar-client-all/pom.xml @@ -397,6 +397,33 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + ${maven-enforcer-plugin.version} + + + enforce-bytecode-version + + enforce + + + + + ${pulsar.client.compiler.release} + + + + + + + + org.codehaus.mojo + extra-enforcer-rules + ${extra-enforcer-rules.version} + + + diff --git a/pulsar-client/pom.xml b/pulsar-client/pom.xml index c0b21c64e6d87..8cf75e89f52ea 100644 --- a/pulsar-client/pom.xml +++ b/pulsar-client/pom.xml @@ -45,13 +45,6 @@ ${project.parent.version} - - org.apache.pulsar - pulsar-broker-common - ${project.parent.version} - compile - - ${project.groupId} bouncy-castle-bc diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/CmdGenerateDocumentation.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/CmdGenerateDocumentation.java index 14059c0db6453..28ad0263cf6c4 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/CmdGenerateDocumentation.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/CmdGenerateDocumentation.java @@ -21,7 +21,7 @@ import com.beust.jcommander.Parameters; import lombok.Data; import lombok.extern.slf4j.Slf4j; -import org.apache.pulsar.broker.BaseGenerateDocumentation; +import org.apache.pulsar.common.util.BaseGenerateDocumentation; @Data @Parameters(commandDescription = "Generate documentation automatically.") diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/BaseGenerateDocumentation.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/BaseGenerateDocumentation.java similarity index 63% rename from pulsar-broker-common/src/main/java/org/apache/pulsar/broker/BaseGenerateDocumentation.java rename to pulsar-common/src/main/java/org/apache/pulsar/common/util/BaseGenerateDocumentation.java index 99b2e7971855f..c253474755a69 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/BaseGenerateDocumentation.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/BaseGenerateDocumentation.java @@ -16,23 +16,27 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.broker; +package org.apache.pulsar.common.util; import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; import com.beust.jcommander.Parameters; import io.swagger.annotations.ApiModelProperty; +import java.io.Serializable; +import java.lang.annotation.Annotation; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.function.Predicate; +import java.util.stream.Collectors; import lombok.Data; +import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.pulsar.common.configuration.FieldContext; +import org.apache.commons.lang3.reflect.MethodUtils; +import org.apache.commons.lang3.tuple.Pair; @Data @Parameters(commandDescription = "Generate documentation automatically.") @@ -72,7 +76,7 @@ public boolean run(String[] args) throws Exception { jcommander.usage(); return false; } - if (!CollectionUtils.isEmpty(classNames)) { + if (classNames != null) { for (String className : classNames) { System.out.println(generateDocumentByClassName(className)); } @@ -82,21 +86,6 @@ public boolean run(String[] args) throws Exception { protected abstract String generateDocumentByClassName(String className) throws Exception; - protected Predicate isRequired = field -> { - FieldContext fieldContext = field.getAnnotation(FieldContext.class); - return fieldContext.required(); - }; - - protected Predicate isOptional = field -> { - FieldContext fieldContext = field.getAnnotation(FieldContext.class); - return !fieldContext.deprecated() && !fieldContext.required(); - }; - - protected Predicate isDeprecated = field -> { - FieldContext fieldContext = field.getAnnotation(FieldContext.class); - return fieldContext.deprecated(); - }; - protected Predicate isRequiredApiModel = field -> { ApiModelProperty modelProperty = field.getAnnotation(ApiModelProperty.class); return modelProperty.required(); @@ -107,11 +96,62 @@ public boolean run(String[] args) throws Exception { return !modelProperty.required(); }; - protected void writeDocListByFieldContext(List fieldList, StringBuilder sb, Object obj) throws Exception { - for (Field field : fieldList) { - FieldContext fieldContext = field.getAnnotation(FieldContext.class); + private Annotation getFieldContextAnnotation(Field field) { + for (Annotation annotation : field.getAnnotations()) { + if (annotation.annotationType().getCanonicalName() + .equals("org.apache.pulsar.common.configuration.FieldContext")) { + return annotation; + } + } + return null; + } + + private static class FieldContextWrapper { + private final Object fieldContext; + + public FieldContextWrapper(Object fieldContext) { + this.fieldContext = fieldContext; + } + + @SneakyThrows + String doc() { + return (String) MethodUtils.invokeMethod(fieldContext, "doc"); + } + + @SneakyThrows + Class type() { + return (Class) MethodUtils.invokeMethod(fieldContext, "type"); + } + + @SneakyThrows + boolean required() { + return (boolean) MethodUtils.invokeMethod(fieldContext, "required"); + } + + @SneakyThrows + boolean deprecated() { + return (boolean) MethodUtils.invokeMethod(fieldContext, "deprecated"); + } + + @SneakyThrows + boolean dynamic() { + return (boolean) MethodUtils.invokeMethod(fieldContext, "dynamic"); + } + + @SneakyThrows + String category() { + return (String) MethodUtils.invokeMethod(fieldContext, "category"); + } + } + + protected void writeDocListByFieldContext(List> fieldList, + StringBuilder sb, Object obj) throws Exception { + for (Pair fieldPair : fieldList) { + FieldContextWrapper fieldContext = fieldPair.getValue(); + final Field field = fieldPair.getKey(); field.setAccessible(true); + sb.append("### ").append(field.getName()).append("\n"); sb.append(fieldContext.doc().replace(">", "\\>")).append("\n\n"); sb.append("**Type**: `").append(field.getType().getCanonicalName()).append("`\n\n"); @@ -134,37 +174,46 @@ protected void writeDocListByApiModel(List fieldList, StringBuilder sb, O } } - protected static class CategoryComparator implements Comparator { + protected static class CategoryComparator implements Comparator>, Serializable { @Override - public int compare(Field o1, Field o2) { - FieldContext o1Context = o1.getAnnotation(FieldContext.class); - FieldContext o2Context = o2.getAnnotation(FieldContext.class); + public int compare(Pair o1, Pair o2) { + FieldContextWrapper o1Context = o1.getValue(); + FieldContextWrapper o2Context = o2.getValue(); if (o1Context.category().equals(o2Context.category())) { - return o1.getName().compareTo(o2.getName()); + return o1.getKey().getName().compareTo(o2.getKey().getName()); } return o1Context.category().compareTo(o2Context.category()); } } - protected String prefix = """ - !> This page is automatically generated from code files. - If you find something inaccurate, feel free to update `"""; - protected String suffix = """ - `. - """; + protected String prefix = "\n" + + "!> This page is automatically generated from code files.\n" + + "If you find something inaccurate, feel free to update `"; + protected String suffix = "\n`.\n"; + protected String generateDocByFieldContext(String className, String type, StringBuilder sb) throws Exception { Class clazz = Class.forName(className); Object obj = clazz.getDeclaredConstructor().newInstance(); Field[] fields = clazz.getDeclaredFields(); - ArrayList fieldList = new ArrayList<>(Arrays.asList(fields)); + List> fieldList = new ArrayList<>(fields.length); + for (Field field : fields) { + final Annotation fieldContextAnnotation = getFieldContextAnnotation(field); - fieldList.removeIf(f -> f.getAnnotation(FieldContext.class) == null); + if (fieldContextAnnotation != null) { + fieldList.add(Pair.of(field, new FieldContextWrapper(fieldContextAnnotation))); + } + } fieldList.sort(new CategoryComparator()); - List requiredFields = fieldList.stream().filter(isRequired).toList(); - List optionalFields = fieldList.stream().filter(isOptional).toList(); - List deprecatedFields = fieldList.stream().filter(isDeprecated).toList(); + List> requiredFields = + fieldList.stream().filter(p -> p.getValue().required()).collect(Collectors.toList()); + List> optionalFields = + fieldList.stream().filter(p -> !p.getValue().required() && !p.getValue().deprecated()) + .collect(Collectors.toList()); + List> deprecatedFields = + fieldList.stream().filter(p -> p.getValue().deprecated()).collect(Collectors.toList()); + sb.append("# ").append(type).append("\n"); @@ -187,8 +236,8 @@ protected String generateDocByApiModelProperty(String className, String type, St fieldList.removeIf(f -> f.getAnnotation(ApiModelProperty.class) == null); fieldList.sort(Comparator.comparing(Field::getName)); - List requiredFields = fieldList.stream().filter(isRequiredApiModel).toList(); - List optionalFields = fieldList.stream().filter(isOptionalApiModel).toList(); + List requiredFields = fieldList.stream().filter(isRequiredApiModel).collect(Collectors.toList()); + List optionalFields = fieldList.stream().filter(isOptionalApiModel).collect(Collectors.toList()); sb.append("# ").append(type).append("\n"); sb.append(prefix).append(className).append(suffix); diff --git a/pulsar-io/kafka/pom.xml b/pulsar-io/kafka/pom.xml index f285f684c9ecf..ca7485510c864 100644 --- a/pulsar-io/kafka/pom.xml +++ b/pulsar-io/kafka/pom.xml @@ -103,6 +103,16 @@ ${kafka.confluent.avroserializer.version} + + io.jsonwebtoken + jjwt-impl + + + + io.jsonwebtoken + jjwt-jackson + + diff --git a/pulsar-package-management/core/pom.xml b/pulsar-package-management/core/pom.xml index fb5b5671bc6c0..b4834c72b43a7 100644 --- a/pulsar-package-management/core/pom.xml +++ b/pulsar-package-management/core/pom.xml @@ -52,6 +52,14 @@ + + org.apache.maven.plugins + maven-compiler-plugin + + + ${pulsar.client.compiler.release} + + org.gaul modernizer-maven-plugin diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/util/CmdGenerateDocumentation.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/util/CmdGenerateDocumentation.java index 8b77b3857af4a..fd9da218d9c45 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/util/CmdGenerateDocumentation.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/util/CmdGenerateDocumentation.java @@ -21,7 +21,7 @@ import com.beust.jcommander.Parameters; import lombok.Data; import lombok.extern.slf4j.Slf4j; -import org.apache.pulsar.broker.BaseGenerateDocumentation; +import org.apache.pulsar.common.util.BaseGenerateDocumentation; import org.apache.pulsar.proxy.server.ProxyConfiguration; @Data From 2891d5c58afaef31714f6266f560e8f1d23da06f Mon Sep 17 00:00:00 2001 From: Qiang Huang Date: Sat, 1 Oct 2022 00:37:58 +0800 Subject: [PATCH 43/59] [fix][managed-ledger]fix typo error in ManagedCursorContainer #17509 --- .../bookkeeper/mledger/impl/ManagedCursorContainer.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainer.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainer.java index b5a5be733a136..293ce8d2fcbc2 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainer.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainer.java @@ -286,7 +286,7 @@ public void remove() { // ////////////////////// /** - * Push the item up towards the the root of the tree (lowest reading position). + * Push the item up towards the root of the tree (the lowest reading position). */ private void siftUp(Item item) { Item parent = getParent(item); @@ -297,7 +297,7 @@ private void siftUp(Item item) { } /** - * Push the item down towards the bottom of the tree (highest reading position). + * Push the item down towards the bottom of the tree (the highest reading position). */ private void siftDown(final Item item) { while (true) { From c0b3039e3b27d4c707cf6036699cd01758656bcb Mon Sep 17 00:00:00 2001 From: Lari Hotari Date: Fri, 30 Sep 2022 19:59:27 +0300 Subject: [PATCH 44/59] [fix][CI] Don't run pulsar-ci-flaky workflow for PRs which contain only document changes (#17906) - fix typo --- .github/workflows/pulsar-ci-flaky.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pulsar-ci-flaky.yaml b/.github/workflows/pulsar-ci-flaky.yaml index 040bc4328b3bc..3e8a3eb46ba0c 100644 --- a/.github/workflows/pulsar-ci-flaky.yaml +++ b/.github/workflows/pulsar-ci-flaky.yaml @@ -71,7 +71,7 @@ jobs: name: Flaky tests suite runs-on: ubuntu-20.04 timeout-minutes: 100 - if: ${{ needs.changed_files_job.outputs.docs_only != 'true' != 'true' }} + if: ${{ needs.changed_files_job.outputs.docs_only != 'true' }} steps: - name: checkout uses: actions/checkout@v2 From 6651bbbab5b33f09cdde83de048d8116b2835de6 Mon Sep 17 00:00:00 2001 From: Christophe Bornet Date: Fri, 30 Sep 2022 22:05:35 +0200 Subject: [PATCH 45/59] [fix][functions] Fix the download of builtin Functions (#17877) --- .../worker/rest/api/ComponentImpl.java | 46 +++-- .../api/v3/FunctionApiV3ResourceTest.java | 171 +++++++++++------- 2 files changed, 136 insertions(+), 81 deletions(-) diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java index 132641c8f01c7..7cd35352bd21f 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java @@ -46,7 +46,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Objects; -import java.util.TreeMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -102,7 +101,7 @@ import org.apache.pulsar.functions.utils.FunctionConfigUtils; import org.apache.pulsar.functions.utils.FunctionMetaDataUtils; import org.apache.pulsar.functions.utils.functions.FunctionArchive; -import org.apache.pulsar.functions.utils.functions.FunctionUtils; +import org.apache.pulsar.functions.utils.io.Connector; import org.apache.pulsar.functions.worker.FunctionMetaDataManager; import org.apache.pulsar.functions.worker.FunctionRuntimeInfo; import org.apache.pulsar.functions.worker.FunctionRuntimeManager; @@ -1473,10 +1472,18 @@ public StreamingOutput downloadFunction(String tenant, String namespace, String ? functionMetaData.getTransformFunctionPackageLocation().getPackagePath() : functionMetaData.getPackageLocation().getPackagePath(); - return getStreamingOutput(pkgPath); + FunctionDetails.ComponentType componentType = transformFunction + ? FunctionDetails.ComponentType.FUNCTION + : InstanceUtils.calculateSubjectType(functionMetaData.getFunctionDetails()); + + return getStreamingOutput(pkgPath, componentType); } private StreamingOutput getStreamingOutput(String pkgPath) { + return getStreamingOutput(pkgPath, null); + } + + private StreamingOutput getStreamingOutput(String pkgPath, FunctionDetails.ComponentType componentType) { return output -> { if (pkgPath.startsWith(Utils.HTTP)) { URL url = URI.create(pkgPath).toURL(); @@ -1489,15 +1496,7 @@ private StreamingOutput getStreamingOutput(String pkgPath) { Files.copy(file.toPath(), output); } else if (pkgPath.startsWith(Utils.BUILTIN) && !worker().getWorkerConfig().getUploadBuiltinSinksSources()) { - String sType = pkgPath.replaceFirst("^builtin://", ""); - final String connectorsDir = worker().getWorkerConfig().getConnectorsDirectory(); - log.warn("Processing package {} ; looking at the dir {}", pkgPath, connectorsDir); - TreeMap sinksOrSources = - FunctionUtils.searchForFunctions(connectorsDir, true); - Path narPath = sinksOrSources.get(sType).getArchivePath(); - if (narPath == null) { - throw new IllegalStateException("Didn't find " + pkgPath + " in " + connectorsDir); - } + Path narPath = getBuiltinArchivePath(pkgPath, componentType); log.info("Loading {} from {}", pkgPath, narPath); try (InputStream in = new FileInputStream(narPath.toString())) { IOUtils.copy(in, output, 1024); @@ -1511,7 +1510,7 @@ private StreamingOutput getStreamingOutput(String pkgPath) { output.flush(); } } catch (Exception e) { - log.error("Failed download package {} from packageMangment Service", pkgPath, e); + log.error("Failed download package {} from packageManagement Service", pkgPath, e); } } else { @@ -1520,6 +1519,27 @@ private StreamingOutput getStreamingOutput(String pkgPath) { }; } + private Path getBuiltinArchivePath(String pkgPath, FunctionDetails.ComponentType componentType) { + String type = pkgPath.replaceFirst("^builtin://", ""); + if (!FunctionDetails.ComponentType.FUNCTION.equals(componentType)) { + Connector connector = worker().getConnectorsManager().getConnector(type); + if (connector != null) { + return connector.getArchivePath(); + } + if (componentType != null) { + throw new IllegalStateException("Didn't find " + type + " in built-in connectors"); + } + } + FunctionArchive function = worker().getFunctionsManager().getFunction(type); + if (function != null) { + return function.getArchivePath(); + } + if (componentType != null) { + throw new IllegalStateException("Didn't find " + type + " in built-in functions"); + } + throw new IllegalStateException("Didn't find " + type + " in built-in connectors or functions"); + } + @Override public StreamingOutput downloadFunction( final String path, String clientRole, AuthenticationDataSource clientAuthenticationDataHttps) { diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java index 8d19869b4738a..fb09a4026a59c 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java @@ -19,7 +19,6 @@ package org.apache.pulsar.functions.worker.rest.api.v3; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doNothing; @@ -29,7 +28,6 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; -import static org.testng.Assert.fail; import com.google.common.collect.Lists; import java.io.File; import java.io.FileInputStream; @@ -44,7 +42,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.TreeMap; import java.util.UUID; import java.util.function.Consumer; import javax.ws.rs.core.Response; @@ -78,7 +75,8 @@ import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.FunctionConfigUtils; import org.apache.pulsar.functions.utils.functions.FunctionArchive; -import org.apache.pulsar.functions.utils.functions.FunctionUtils; +import org.apache.pulsar.functions.utils.io.Connector; +import org.apache.pulsar.functions.worker.ConnectorsManager; import org.apache.pulsar.functions.worker.FunctionMetaDataManager; import org.apache.pulsar.functions.worker.FunctionRuntimeManager; import org.apache.pulsar.functions.worker.FunctionsManager; @@ -1604,20 +1602,13 @@ public void testDownloadFunctionHttpUrl() throws Exception { String jarHttpUrl = "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - PulsarWorkerService worker = mock(PulsarWorkerService.class); - doReturn(true).when(worker).isInitialized(); - WorkerConfig config = mock(WorkerConfig.class); - when(config.isAuthorizationEnabled()).thenReturn(false); - when(worker.getWorkerConfig()).thenReturn(config); - FunctionsImpl function = new FunctionsImpl(() -> worker); - StreamingOutput streamOutput = function.downloadFunction(jarHttpUrl, null, null); + + StreamingOutput streamOutput = resource.downloadFunction(jarHttpUrl, null, null); File pkgFile = new File(testDir, UUID.randomUUID().toString()); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); Assert.assertTrue(pkgFile.exists()); - if (pkgFile.exists()) { - pkgFile.delete(); - } + pkgFile.delete(); } @Test @@ -1626,53 +1617,61 @@ public void testDownloadFunctionFile() throws Exception { File file = Paths.get(fileUrl.toURI()).toFile(); String fileLocation = file.getAbsolutePath().replace('\\', '/'); String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - PulsarWorkerService worker = mock(PulsarWorkerService.class); - doReturn(true).when(worker).isInitialized(); - WorkerConfig config = mock(WorkerConfig.class); - when(config.isAuthorizationEnabled()).thenReturn(false); - when(worker.getWorkerConfig()).thenReturn(config); - FunctionsImpl function = new FunctionsImpl(() -> worker); - StreamingOutput streamOutput = function.downloadFunction("file:///" + fileLocation, null, null); + + StreamingOutput streamOutput = resource.downloadFunction("file:///" + fileLocation, null, null); File pkgFile = new File(testDir, UUID.randomUUID().toString()); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); Assert.assertTrue(pkgFile.exists()); - if (pkgFile.exists()) { - pkgFile.delete(); - } + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); } @Test - public void testDownloadFunctionBuiltin() throws Exception { - mockStatic(WorkerUtils.class, ctx -> { - }); - + public void testDownloadFunctionBuiltinConnector() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - PulsarWorkerService worker = mock(PulsarWorkerService.class); - doReturn(true).when(worker).isInitialized(); + WorkerConfig config = new WorkerConfig() + .setUploadBuiltinSinksSources(false); + when(mockedWorkerService.getWorkerConfig()).thenReturn(config); - WorkerConfig config = mock(WorkerConfig.class); - when(config.isAuthorizationEnabled()).thenReturn(false); - when(config.getUploadBuiltinSinksSources()).thenReturn(false); - when(config.getConnectorsDirectory()).thenReturn("/connectors"); + Connector connector = Connector.builder().archivePath(file.toPath()).build(); + ConnectorsManager connectorsManager = mock(ConnectorsManager.class); + when(connectorsManager.getConnector("cassandra")).thenReturn(connector); + when(mockedWorkerService.getConnectorsManager()).thenReturn(connectorsManager); - when(worker.getDlogNamespace()).thenReturn(mock(Namespace.class)); - when(worker.getWorkerConfig()).thenReturn(config); - FunctionsImpl function = new FunctionsImpl(() -> worker); + StreamingOutput streamOutput = resource.downloadFunction("builtin://cassandra", null, null); - TreeMap functions = new TreeMap<>(); - FunctionArchive functionArchive = FunctionArchive.builder().archivePath(file.toPath()).build(); - functions.put("cassandra", functionArchive); + File pkgFile = new File(testDir, UUID.randomUUID().toString()); + OutputStream output = new FileOutputStream(pkgFile); + streamOutput.write(output); + output.flush(); + output.close(); + Assert.assertTrue(pkgFile.exists()); + Assert.assertTrue(pkgFile.exists()); + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); + } - mockStatic(FunctionUtils.class, ctx -> { - ctx.when(() -> FunctionUtils.searchForFunctions(anyString(), anyBoolean())).thenReturn(functions); + @Test + public void testDownloadFunctionBuiltinFunction() throws Exception { + URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); + File file = Paths.get(fileUrl.toURI()).toFile(); + String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - }); + WorkerConfig config = new WorkerConfig() + .setUploadBuiltinSinksSources(false); + when(mockedWorkerService.getWorkerConfig()).thenReturn(config); + + FunctionsManager functionsManager = mock(FunctionsManager.class); + FunctionArchive functionArchive = FunctionArchive.builder().archivePath(file.toPath()).build(); + when(functionsManager.getFunction("exclamation")).thenReturn(functionArchive); + when(mockedWorkerService.getConnectorsManager()).thenReturn(mock(ConnectorsManager.class)); + when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); - StreamingOutput streamOutput = function.downloadFunction("builtin://cassandra", null, null); + StreamingOutput streamOutput = resource.downloadFunction("builtin://exclamation", null, null); File pkgFile = new File(testDir, UUID.randomUUID().toString()); OutputStream output = new FileOutputStream(pkgFile); @@ -1680,71 +1679,107 @@ public void testDownloadFunctionBuiltin() throws Exception { output.flush(); output.close(); Assert.assertTrue(pkgFile.exists()); - if (pkgFile.exists()) { - Assert.assertEquals(file.length(), pkgFile.length()); - pkgFile.delete(); - } else { - fail("expected file"); - } + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); } @Test - public void testDownloadFunctionByName() throws Exception { + public void testDownloadFunctionBuiltinConnectorByName() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - doReturn(true).when(mockedWorkerService).isInitialized(); - WorkerConfig config = mock(WorkerConfig.class); - when(config.isAuthorizationEnabled()).thenReturn(false); + WorkerConfig config = new WorkerConfig() + .setUploadBuiltinSinksSources(false); when(mockedWorkerService.getWorkerConfig()).thenReturn(config); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); FunctionMetaData metaData = FunctionMetaData.newBuilder() - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("file:///" + fileLocation)) + .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("builtin://cassandra")) .setTransformFunctionPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) + .setFunctionDetails(FunctionDetails.newBuilder().setComponentType(FunctionDetails.ComponentType.SINK)) .build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); + Connector connector = Connector.builder().archivePath(file.toPath()).build(); + ConnectorsManager connectorsManager = mock(ConnectorsManager.class); + when(connectorsManager.getConnector("cassandra")).thenReturn(connector); + when(mockedWorkerService.getConnectorsManager()).thenReturn(connectorsManager); + StreamingOutput streamOutput = resource.downloadFunction(tenant, namespace, function, null, null, false); File pkgFile = new File(testDir, UUID.randomUUID().toString()); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); Assert.assertTrue(pkgFile.exists()); - if (pkgFile.exists()) { - pkgFile.delete(); - } + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); } @Test - public void testDownloadTransformFunctionByName() throws Exception { + public void testDownloadFunctionBuiltinFunctionByName() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - doReturn(true).when(mockedWorkerService).isInitialized(); - WorkerConfig config = mock(WorkerConfig.class); - when(config.isAuthorizationEnabled()).thenReturn(false); + WorkerConfig config = new WorkerConfig() + .setUploadBuiltinSinksSources(false); when(mockedWorkerService.getWorkerConfig()).thenReturn(config); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + FunctionMetaData metaData = FunctionMetaData.newBuilder() + .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("builtin://exclamation")) + .setTransformFunctionPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) + .setFunctionDetails(FunctionDetails.newBuilder().setComponentType(FunctionDetails.ComponentType.FUNCTION)) + .build(); + when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); + + FunctionsManager functionsManager = mock(FunctionsManager.class); + FunctionArchive functionArchive = FunctionArchive.builder().archivePath(file.toPath()).build(); + when(functionsManager.getFunction("exclamation")).thenReturn(functionArchive); + when(mockedWorkerService.getConnectorsManager()).thenReturn(mock(ConnectorsManager.class)); + when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); + + StreamingOutput streamOutput = resource.downloadFunction(tenant, namespace, function, null, null, false); + File pkgFile = new File(testDir, UUID.randomUUID().toString()); + OutputStream output = new FileOutputStream(pkgFile); + streamOutput.write(output); + Assert.assertTrue(pkgFile.exists()); + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); + } + + @Test + public void testDownloadTransformFunctionByName() throws Exception { + URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); + File file = Paths.get(fileUrl.toURI()).toFile(); + String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + + WorkerConfig workerConfig = new WorkerConfig() + .setUploadBuiltinSinksSources(false); + when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + FunctionMetaData metaData = FunctionMetaData.newBuilder() .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) .setTransformFunctionPackageLocation(PackageLocationMetaData.newBuilder() - .setPackagePath("file:///" + fileLocation)) + .setPackagePath("builtin://exclamation")) .build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); + FunctionsManager functionsManager = mock(FunctionsManager.class); + FunctionArchive functionArchive = FunctionArchive.builder().archivePath(file.toPath()).build(); + when(functionsManager.getFunction("exclamation")).thenReturn(functionArchive); + when(mockedWorkerService.getConnectorsManager()).thenReturn(mock(ConnectorsManager.class)); + when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); + StreamingOutput streamOutput = resource.downloadFunction(tenant, namespace, function, null, null, true); File pkgFile = new File(testDir, UUID.randomUUID().toString()); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); Assert.assertTrue(pkgFile.exists()); - if (pkgFile.exists()) { - pkgFile.delete(); - } + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); } From 8c1152ceb44b3ae65a8411e00d5abb63ce079969 Mon Sep 17 00:00:00 2001 From: Qiang Huang Date: Sat, 1 Oct 2022 17:07:00 +0800 Subject: [PATCH 46/59] [improve][broker] Add active status into cursor stats (#17884) --- .../org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java | 1 + .../apache/pulsar/broker/service/persistent/PersistentTopic.java | 1 + .../apache/pulsar/stats/client/PulsarBrokerStatsClientTest.java | 1 + .../pulsar/common/policies/data/ManagedLedgerInternalStats.java | 1 + 4 files changed, 4 insertions(+) diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java index 254ee767bc7fc..53bccfadbab22 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java @@ -4116,6 +4116,7 @@ public CompletableFuture getManagedLedgerInternalSta cs.individuallyDeletedMessages = cursor.getIndividuallyDeletedMessages(); cs.lastLedgerSwitchTimestamp = DateFormatter.format(cursor.getLastLedgerSwitchTimestamp()); cs.state = cursor.getState(); + cs.active = cursor.isActive(); cs.numberOfEntriesSinceFirstNotAckedMessage = cursor.getNumberOfEntriesSinceFirstNotAckedMessage(); cs.totalNonContiguousDeletedMessagesRange = cursor.getTotalNonContiguousDeletedMessagesRange(); cs.properties = cursor.getProperties(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java index fdcaaf2ffbdb3..fb1c521bb00b0 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java @@ -2210,6 +2210,7 @@ public CompletableFuture getInternalStats(boolean cs.individuallyDeletedMessages = cursor.getIndividuallyDeletedMessages(); cs.lastLedgerSwitchTimestamp = DateFormatter.format(cursor.getLastLedgerSwitchTimestamp()); cs.state = cursor.getState(); + cs.active = cursor.isActive(); cs.numberOfEntriesSinceFirstNotAckedMessage = cursor.getNumberOfEntriesSinceFirstNotAckedMessage(); cs.totalNonContiguousDeletedMessagesRange = cursor.getTotalNonContiguousDeletedMessagesRange(); cs.properties = cursor.getProperties(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/stats/client/PulsarBrokerStatsClientTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/stats/client/PulsarBrokerStatsClientTest.java index 93d1132d812d0..af72a2564bb0c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/stats/client/PulsarBrokerStatsClientTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/stats/client/PulsarBrokerStatsClientTest.java @@ -135,6 +135,7 @@ public void testTopicInternalStats() throws Exception { && (cursor.totalNonContiguousDeletedMessagesRange) < numberOfMsgs / 2); assertFalse(cursor.subscriptionHavePendingRead); assertFalse(cursor.subscriptionHavePendingReplayRead); + assertTrue(cursor.active); producer.close(); consumer.close(); log.info("-- Exiting {} test --", methodName); diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ManagedLedgerInternalStats.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ManagedLedgerInternalStats.java index 35bef4cda33c6..ee67f826ee678 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ManagedLedgerInternalStats.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ManagedLedgerInternalStats.java @@ -94,6 +94,7 @@ public static class CursorStats { public String individuallyDeletedMessages; public String lastLedgerSwitchTimestamp; public String state; + public boolean active; public long numberOfEntriesSinceFirstNotAckedMessage; public int totalNonContiguousDeletedMessagesRange; public boolean subscriptionHavePendingRead; From 55826742d1c589d106d7cbe97f12ec2e8bcca35f Mon Sep 17 00:00:00 2001 From: Matteo Merli Date: Sat, 1 Oct 2022 08:06:13 -0700 Subject: [PATCH 47/59] Allow to configure and disable the size of lookahead for detecting fixed delays in messages (#17907) --- conf/broker.conf | 6 ++++ .../pulsar/broker/ServiceConfiguration.java | 6 ++++ .../InMemoryDelayedDeliveryTracker.java | 18 ++++++---- ...InMemoryDelayedDeliveryTrackerFactory.java | 5 ++- .../delayed/InMemoryDeliveryTrackerTest.java | 34 +++++++++++-------- 5 files changed, 48 insertions(+), 21 deletions(-) diff --git a/conf/broker.conf b/conf/broker.conf index d117d679c8532..e6b3aef8811b5 100644 --- a/conf/broker.conf +++ b/conf/broker.conf @@ -576,6 +576,12 @@ delayedDeliveryMaxNumBuckets=50 # Enable share the delayed message index across subscriptions delayedDeliverySharedIndexEnabled=false +# Size of the lookahead window to use when detecting if all the messages in the topic +# have a fixed delay. +# Default is 50,000. Setting the lookahead window to 0 will disable the logic to handle +# fixed delays in messages in a different way. +delayedDeliveryFixedDelayDetectionLookahead=50000 + # Whether to enable acknowledge of batch local index. acknowledgmentAtBatchIndexLevelEnabled=false diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java index 8c883045e66c5..6683d36c36e06 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java @@ -372,6 +372,12 @@ The delayed message index bucket time step(in seconds) in per bucket snapshot se @FieldContext(category = CATEGORY_SERVER, doc = "Enable share the delayed message index across subscriptions") private boolean delayedDeliverySharedIndexEnabled = false; + @FieldContext(category = CATEGORY_SERVER, doc = "Size of the lookahead window to use " + + "when detecting if all the messages in the topic have a fixed delay. " + + "Default is 50,000. Setting the lookahead window to 0 will disable the " + + "logic to handle fixed delays in messages in a different way.") + private long delayedDeliveryFixedDelayDetectionLookahead = 50_000; + @FieldContext(category = CATEGORY_SERVER, doc = "Whether to enable the acknowledge of batch local index") private boolean acknowledgmentAtBatchIndexLevelEnabled = false; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTracker.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTracker.java index 83b113df36b6e..11d663322be52 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTracker.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTracker.java @@ -59,7 +59,7 @@ public class InMemoryDelayedDeliveryTracker implements DelayedDeliveryTracker, T // always going to be in FIFO order, then we can avoid pulling all the messages in // tracker. Instead, we use the lookahead for detection and pause the read from // the cursor if the delays are fixed. - public static final long DETECT_FIXED_DELAY_LOOKAHEAD_MESSAGES = 50_000; + private final long fixedDelayDetectionLookahead; // This is the timestamp of the message with the highest delivery time // If new added messages are lower than this, it means the delivery is requested @@ -70,17 +70,22 @@ public class InMemoryDelayedDeliveryTracker implements DelayedDeliveryTracker, T private boolean messagesHaveFixedDelay = true; InMemoryDelayedDeliveryTracker(PersistentDispatcherMultipleConsumers dispatcher, Timer timer, long tickTimeMillis, - boolean isDelayedDeliveryDeliverAtTimeStrict) { - this(dispatcher, timer, tickTimeMillis, Clock.systemUTC(), isDelayedDeliveryDeliverAtTimeStrict); + boolean isDelayedDeliveryDeliverAtTimeStrict, + long fixedDelayDetectionLookahead) { + this(dispatcher, timer, tickTimeMillis, Clock.systemUTC(), isDelayedDeliveryDeliverAtTimeStrict, + fixedDelayDetectionLookahead); } InMemoryDelayedDeliveryTracker(PersistentDispatcherMultipleConsumers dispatcher, Timer timer, - long tickTimeMillis, Clock clock, boolean isDelayedDeliveryDeliverAtTimeStrict) { + long tickTimeMillis, Clock clock, + boolean isDelayedDeliveryDeliverAtTimeStrict, + long fixedDelayDetectionLookahead) { this.dispatcher = dispatcher; this.timer = timer; this.tickTimeMillis = tickTimeMillis; this.clock = clock; this.isDelayedDeliveryDeliverAtTimeStrict = isDelayedDeliveryDeliverAtTimeStrict; + this.fixedDelayDetectionLookahead = fixedDelayDetectionLookahead; } /** @@ -283,8 +288,9 @@ public void close() { @Override public boolean shouldPauseAllDeliveries() { // Pause deliveries if we know all delays are fixed within the lookahead window - return messagesHaveFixedDelay - && priorityQueue.size() >= DETECT_FIXED_DELAY_LOOKAHEAD_MESSAGES + return fixedDelayDetectionLookahead > 0 + && messagesHaveFixedDelay + && priorityQueue.size() >= fixedDelayDetectionLookahead && !hasMessageAvailable(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTrackerFactory.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTrackerFactory.java index 5c04a6d53b257..7bf0ca87c40c7 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTrackerFactory.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTrackerFactory.java @@ -33,18 +33,21 @@ public class InMemoryDelayedDeliveryTrackerFactory implements DelayedDeliveryTra private boolean isDelayedDeliveryDeliverAtTimeStrict; + private long fixedDelayDetectionLookahead; + @Override public void initialize(ServiceConfiguration config) { this.timer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-delayed-delivery"), config.getDelayedDeliveryTickTimeMillis(), TimeUnit.MILLISECONDS); this.tickTimeMillis = config.getDelayedDeliveryTickTimeMillis(); this.isDelayedDeliveryDeliverAtTimeStrict = config.isDelayedDeliveryDeliverAtTimeStrict(); + this.fixedDelayDetectionLookahead = config.getDelayedDeliveryFixedDelayDetectionLookahead(); } @Override public DelayedDeliveryTracker newTracker(PersistentDispatcherMultipleConsumers dispatcher) { return new InMemoryDelayedDeliveryTracker(dispatcher, timer, tickTimeMillis, - isDelayedDeliveryDeliverAtTimeStrict); + isDelayedDeliveryDeliverAtTimeStrict, fixedDelayDetectionLookahead); } @Override diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/InMemoryDeliveryTrackerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/InMemoryDeliveryTrackerTest.java index db2db6cc1dbb0..1ff47a4ca5065 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/InMemoryDeliveryTrackerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/InMemoryDeliveryTrackerTest.java @@ -74,7 +74,7 @@ public void test() throws Exception { @Cleanup InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, - false); + false, 0); assertFalse(tracker.hasMessageAvailable()); @@ -146,7 +146,7 @@ public void testWithTimer() throws Exception { @Cleanup InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, - false); + false, 0); assertTrue(tasks.isEmpty()); assertTrue(tracker.addMessage(2, 2, 20)); @@ -187,7 +187,7 @@ public void testAddWithinTickTime() { @Cleanup InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 100, clock, - false); + false, 0); clockTime.set(0); @@ -209,7 +209,7 @@ public void testAddMessageWithStrictDelay() { @Cleanup InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 100, clock, - true); + true, 0); clockTime.set(10); @@ -236,7 +236,7 @@ public void testAddMessageWithDeliverAtTimeAfterNowBeforeTickTimeFrequencyWithSt // Use a short tick time to show that the timer task is run based on the deliverAt time in this scenario. @Cleanup InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, - 1000, clock, true); + 1000, clock, true, 0); // Set clock time, then run tracker to inherit clock time as the last tick time. clockTime.set(10000); @@ -274,7 +274,7 @@ public void testAddMessageWithDeliverAtTimeAfterNowAfterTickTimeFrequencyWithStr // a previous tick run. @Cleanup InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, - 100000, clock, true); + 100000, clock, true, 0); clockTime.set(500000); @@ -299,7 +299,7 @@ public void testAddMessageWithDeliverAtTimeAfterFullTickTimeWithStrict() throws // Use a short tick time to show that the timer task is run based on the deliverAt time in this scenario. @Cleanup InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, - 500, clock, true); + 500, clock, true, 0); clockTime.set(0); @@ -323,9 +323,11 @@ public void testWithFixedDelays() throws Exception { Clock clock = mock(Clock.class); when(clock.millis()).then(x -> clockTime.get()); + final long fixedDelayLookahead = 100; + @Cleanup InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, - true); + true, fixedDelayLookahead); assertFalse(tracker.hasMessageAvailable()); @@ -339,13 +341,13 @@ public void testWithFixedDelays() throws Exception { assertEquals(tracker.getNumberOfDelayedMessages(), 5); assertFalse(tracker.shouldPauseAllDeliveries()); - for (int i = 6; i <= InMemoryDelayedDeliveryTracker.DETECT_FIXED_DELAY_LOOKAHEAD_MESSAGES; i++) { + for (int i = 6; i <= fixedDelayLookahead; i++) { assertTrue(tracker.addMessage(i, i, i * 10)); } assertTrue(tracker.shouldPauseAllDeliveries()); - clockTime.set(InMemoryDelayedDeliveryTracker.DETECT_FIXED_DELAY_LOOKAHEAD_MESSAGES * 10); + clockTime.set(fixedDelayLookahead * 10); tracker.getScheduledMessages(100); assertFalse(tracker.shouldPauseAllDeliveries()); @@ -367,9 +369,11 @@ public void testWithMixedDelays() throws Exception { Clock clock = mock(Clock.class); when(clock.millis()).then(x -> clockTime.get()); + long fixedDelayLookahead = 100; + @Cleanup InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, - true); + true, fixedDelayLookahead); assertFalse(tracker.hasMessageAvailable()); @@ -381,7 +385,7 @@ public void testWithMixedDelays() throws Exception { assertFalse(tracker.shouldPauseAllDeliveries()); - for (int i = 6; i <= InMemoryDelayedDeliveryTracker.DETECT_FIXED_DELAY_LOOKAHEAD_MESSAGES; i++) { + for (int i = 6; i <= fixedDelayLookahead; i++) { assertTrue(tracker.addMessage(i, i, i * 10)); } @@ -401,9 +405,11 @@ public void testWithNoDelays() throws Exception { Clock clock = mock(Clock.class); when(clock.millis()).then(x -> clockTime.get()); + long fixedDelayLookahead = 100; + @Cleanup InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, - true); + true, fixedDelayLookahead); assertFalse(tracker.hasMessageAvailable()); @@ -415,7 +421,7 @@ public void testWithNoDelays() throws Exception { assertFalse(tracker.shouldPauseAllDeliveries()); - for (int i = 6; i <= InMemoryDelayedDeliveryTracker.DETECT_FIXED_DELAY_LOOKAHEAD_MESSAGES; i++) { + for (int i = 6; i <= fixedDelayLookahead; i++) { assertTrue(tracker.addMessage(i, i, i * 10)); } From e26060a1e15a3488fc93cdff6bb0e95e7ec52fed Mon Sep 17 00:00:00 2001 From: Penghui Li Date: Sun, 2 Oct 2022 08:46:09 +0800 Subject: [PATCH 48/59] [fix][broker] Fix the broker shutdown issue after Zookeeper node crashed (#17909) --- .../java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java index ad23faea25e27..4e488df478f76 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java @@ -98,7 +98,7 @@ public ZKMetadataStore(String metadataURL, MetadataStoreConfig metadataStoreConf .sessionTimeoutMs(metadataStoreConfig.getSessionTimeoutMillis()) .watchers(Collections.singleton(event -> { if (sessionWatcher != null) { - sessionWatcher.ifPresent(sw -> sw.process(event)); + sessionWatcher.ifPresent(sw -> executor.execute(() -> sw.process(event))); } })) .build(); From 9ff9703d740eb8151b8cf2eb1e7faf074e9cf3c7 Mon Sep 17 00:00:00 2001 From: Xiaoyu Hou Date: Sun, 2 Oct 2022 10:37:38 +0800 Subject: [PATCH 49/59] [improve][java-client]Shrink BatchMessageContainer maxBatchSize (#17854) --- .../impl/AbstractBatchMessageContainer.java | 4 ++ .../impl/BatchMessageContainerImpl.java | 26 +++++++- .../impl/BatchMessageContainerImplTest.java | 60 ++++++++++++++++++- 3 files changed, 87 insertions(+), 3 deletions(-) diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java index 9b4d1b7d683dd..784d1e05ac6e2 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java @@ -84,6 +84,10 @@ public long getCurrentBatchSize() { return currentBatchSizeBytes; } + int getMaxBatchSize() { + return maxBatchSize; + } + @Override public List createOpSendMsgs() throws IOException { throw new UnsupportedOperationException(); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java index 49cbc56d2a647..99e82a8c765d8 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java @@ -64,6 +64,8 @@ class BatchMessageContainerImpl extends AbstractBatchMessageContainer { protected SendCallback firstCallback; private final ByteBufAllocator allocator; + private static final int SHRINK_COOLING_OFF_PERIOD = 10; + private int consecutiveShrinkTime = 0; public BatchMessageContainerImpl() { this(PulsarByteBufAllocator.DEFAULT); @@ -98,7 +100,8 @@ public boolean add(MessageImpl msg, SendCallback callback) { messageMetadata.setSequenceId(msg.getSequenceId()); lowestSequenceId = Commands.initBatchMessageMetadata(messageMetadata, msg.getMessageBuilder()); this.firstCallback = callback; - batchedMessageMetadataAndPayload = allocator.compositeBuffer(); + batchedMessageMetadataAndPayload = allocator.buffer( + Math.min(maxBatchSize, ClientCnx.getMaxMessageSize())); if (msg.getMessageBuilder().hasTxnidMostBits() && currentTxnidMostBits == -1) { currentTxnidMostBits = msg.getMessageBuilder().getTxnidMostBits(); } @@ -167,11 +170,30 @@ private ByteBuf getCompressedBatchMetadataAndPayload() { // Update the current max batch size using the uncompressed size, which is what we need in any case to // accumulate the batch content - maxBatchSize = Math.max(maxBatchSize, uncompressedSize); + updateMaxBatchSize(uncompressedSize); maxMessagesNum = Math.max(maxMessagesNum, numMessagesInBatch); return compressedPayload; } + void updateMaxBatchSize(int uncompressedSize) { + if (uncompressedSize > maxBatchSize) { + maxBatchSize = uncompressedSize; + consecutiveShrinkTime = 0; + } else { + int shrank = maxBatchSize - (maxBatchSize >> 2); + if (uncompressedSize <= shrank) { + if (consecutiveShrinkTime <= SHRINK_COOLING_OFF_PERIOD) { + consecutiveShrinkTime++; + } else { + maxBatchSize = shrank; + consecutiveShrinkTime = 0; + } + } else { + consecutiveShrinkTime = 0; + } + } + } + @Override public void clear() { messages = new ArrayList<>(maxMessagesNum); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java index a4498b952cbf7..29d423388357e 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.client.impl; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -40,6 +41,63 @@ public class BatchMessageContainerImplTest { + @Test + public void testUpdateMaxBatchSize() { + int SHRINK_COOLING_OFF_PERIOD = 10; + BatchMessageContainerImpl messageContainer = new BatchMessageContainerImpl(); + // check init state + assertEquals(messageContainer.getMaxBatchSize(), 1024); + + // test expand + messageContainer.updateMaxBatchSize(2048); + assertEquals(messageContainer.getMaxBatchSize(), 2048); + + // test cooling-off period + messageContainer.updateMaxBatchSize(2); + assertEquals(messageContainer.getMaxBatchSize(), 2048); + + // test shrink + for (int i = 0; i < 15; ++i) { + messageContainer.updateMaxBatchSize(2); + if (i < SHRINK_COOLING_OFF_PERIOD) { + assertEquals(messageContainer.getMaxBatchSize(), 2048); + } else { + assertEquals(messageContainer.getMaxBatchSize(), 2048 * 0.75); + } + } + + messageContainer.updateMaxBatchSize(2048); + // test big message sudden appearance + for (int i = 0; i < 15; ++i) { + if (i == SHRINK_COOLING_OFF_PERIOD - 2) { + messageContainer.updateMaxBatchSize(2000); + } else { + messageContainer.updateMaxBatchSize(2); + } + assertEquals(messageContainer.getMaxBatchSize(), 2048); + } + + // test big and small message alternating occurrence + for (int i = 0; i < SHRINK_COOLING_OFF_PERIOD * 3; ++i) { + if (i % 2 ==0) { + messageContainer.updateMaxBatchSize(2); + } else { + messageContainer.updateMaxBatchSize(2000); + } + assertEquals(messageContainer.getMaxBatchSize(), 2048); + } + + // test consecutive big message + for (int i = 0; i < 15; ++i) { + messageContainer.updateMaxBatchSize(2000); + assertEquals(messageContainer.getMaxBatchSize(), 2048); + } + + // test expand after shrink + messageContainer.updateMaxBatchSize(4096); + assertEquals(messageContainer.getMaxBatchSize(), 4096); + } + @Test public void recoveryAfterOom() { final AtomicBoolean called = new AtomicBoolean(); @@ -62,7 +120,7 @@ public void recoveryAfterOom() { doAnswer((ignore) -> { called.set(true); throw new OutOfMemoryError("test"); - }).when(mockAllocator).compositeBuffer(); + }).when(mockAllocator).buffer(anyInt()); final BatchMessageContainerImpl batchMessageContainer = new BatchMessageContainerImpl(mockAllocator); batchMessageContainer.setProducer(producer); MessageMetadata messageMetadata1 = new MessageMetadata(); From 11482048d357ccb4e4f1802304a7dd0bfd7b9c26 Mon Sep 17 00:00:00 2001 From: JiangHaiting Date: Mon, 3 Oct 2022 14:42:09 +0800 Subject: [PATCH 50/59] [feat][broker]PIP-180 Shadow Topic - Part V - Support shadow topic creation. (#17711) --- .../mledger/ManagedLedgerConfig.java | 7 ++ .../mledger/ManagedLedgerFactory.java | 5 + .../impl/ManagedLedgerFactoryImpl.java | 17 +++- .../bookkeeper/mledger/impl/MetaStore.java | 9 ++ .../mledger/impl/MetaStoreImpl.java | 29 ++++++ .../mledger/impl/ShadowManagedLedgerImpl.java | 58 +++++++++++ .../pulsar/broker/service/BrokerService.java | 32 +++++- .../service/persistent/PersistentTopic.java | 12 +++ .../broker/service/PersistentTopicTest.java | 2 + .../service/persistent/ShadowTopicTest.java | 97 +++++++++++++++++++ .../apache/pulsar/client/admin/Topics.java | 54 +++++++++++ .../client/admin/internal/TopicsImpl.java | 40 +++++++- .../pulsar/admin/cli/PulsarAdminToolTest.java | 19 +++- .../apache/pulsar/admin/cli/CmdTopics.java | 34 +++++++ 14 files changed, 405 insertions(+), 10 deletions(-) create mode 100644 managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java create mode 100644 pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/ShadowTopicTest.java diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java index 92c9c91198134..0efd1ca2a823c 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java @@ -32,6 +32,7 @@ import org.apache.bookkeeper.common.annotation.InterfaceStability; import org.apache.bookkeeper.mledger.impl.NullLedgerOffloader; import org.apache.bookkeeper.mledger.intercept.ManagedLedgerInterceptor; +import org.apache.commons.collections4.MapUtils; import org.apache.pulsar.common.util.collections.ConcurrentOpenLongPairRangeSet; /** @@ -742,4 +743,10 @@ public int getMaxBacklogBetweenCursorsForCaching() { public void setMaxBacklogBetweenCursorsForCaching(int maxBacklogBetweenCursorsForCaching) { this.maxBacklogBetweenCursorsForCaching = maxBacklogBetweenCursorsForCaching; } + + public String getShadowSource() { + return MapUtils.getString(properties, PROPERTY_SOURCE_TOPIC_KEY); + } + + public static final String PROPERTY_SOURCE_TOPIC_KEY = "PULSAR.SHADOW_SOURCE"; } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java index e42c2581ba101..667d641ac9ae0 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.mledger; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; import org.apache.bookkeeper.common.annotation.InterfaceAudience; @@ -197,4 +198,8 @@ void asyncOpenReadOnlyCursor(String managedLedgerName, Position startPosition, M * */ long getCacheEvictionTimeThreshold(); + /** + * @return properties of this managedLedger. + */ + CompletableFuture> getManagedLedgerPropertiesAsync(String name); } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java index d7596a7468a40..e0af1cc632612 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java @@ -367,11 +367,13 @@ public void asyncOpen(final String name, final ManagedLedgerConfig config, final ledgers.computeIfAbsent(name, (mlName) -> { // Create the managed ledger CompletableFuture future = new CompletableFuture<>(); - final ManagedLedgerImpl newledger = new ManagedLedgerImpl(this, - bookkeeperFactory.get( - new EnsemblePlacementPolicyConfig(config.getBookKeeperEnsemblePlacementPolicyClassName(), - config.getBookKeeperEnsemblePlacementPolicyProperties())), - store, config, scheduledExecutor, name, mlOwnershipChecker); + BookKeeper bk = bookkeeperFactory.get( + new EnsemblePlacementPolicyConfig(config.getBookKeeperEnsemblePlacementPolicyClassName(), + config.getBookKeeperEnsemblePlacementPolicyProperties())); + final ManagedLedgerImpl newledger = config.getShadowSource() == null + ? new ManagedLedgerImpl(this, bk, store, config, scheduledExecutor, name, mlOwnershipChecker) + : new ShadowManagedLedgerImpl(this, bk, store, config, scheduledExecutor, name, + mlOwnershipChecker); PendingInitializeManagedLedger pendingLedger = new PendingInitializeManagedLedger(newledger); pendingInitializeLedgers.put(name, pendingLedger); newledger.initialize(new ManagedLedgerInitializeLedgerCallback() { @@ -954,6 +956,11 @@ public void operationFailed(MetaStoreException e) { return future; } + @Override + public CompletableFuture> getManagedLedgerPropertiesAsync(String name) { + return store.getManagedLedgerPropertiesAsync(name); + } + public MetaStore getMetaStore() { return store; } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStore.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStore.java index 35f109b21dc5c..3d60066782af2 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStore.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStore.java @@ -158,4 +158,13 @@ void asyncUpdateCursorInfo(String ledgerName, String cursorName, ManagedCursorIn * if the operation succeeds. */ CompletableFuture asyncExists(String ledgerName); + + + /** + * Get managed ledger properties from meta store. + * + * @param name ledgerName + * @return a future represents the result of the operation. + */ + CompletableFuture> getManagedLedgerPropertiesAsync(String name); } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java index a501b9e43dc0f..2949902ac353f 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java @@ -23,6 +23,8 @@ import io.netty.buffer.CompositeByteBuf; import io.netty.buffer.Unpooled; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -148,6 +150,33 @@ public void getManagedLedgerInfo(String ledgerName, boolean createIfMissing, Map }); } + public CompletableFuture> getManagedLedgerPropertiesAsync(String name) { + CompletableFuture> result = new CompletableFuture<>(); + getManagedLedgerInfo(name, false, new MetaStoreCallback<>() { + @Override + public void operationComplete(MLDataFormats.ManagedLedgerInfo mlInfo, Stat stat) { + HashMap propertiesMap = new HashMap<>(mlInfo.getPropertiesCount()); + if (mlInfo.getPropertiesCount() > 0) { + for (int i = 0; i < mlInfo.getPropertiesCount(); i++) { + MLDataFormats.KeyValue property = mlInfo.getProperties(i); + propertiesMap.put(property.getKey(), property.getValue()); + } + } + result.complete(propertiesMap); + } + + @Override + public void operationFailed(MetaStoreException e) { + if (e instanceof MetadataNotFoundException) { + result.complete(Collections.emptyMap()); + } else { + result.completeExceptionally(e); + } + } + }); + return result; + } + @Override public void asyncUpdateLedgerIds(String ledgerName, ManagedLedgerInfo mlInfo, Stat stat, MetaStoreCallback callback) { diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java new file mode 100644 index 0000000000000..346780a228349 --- /dev/null +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.bookkeeper.mledger.impl; + +import java.util.function.Supplier; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.common.util.OrderedScheduler; +import org.apache.bookkeeper.mledger.ManagedLedgerConfig; +import org.apache.pulsar.common.naming.TopicName; + +/** + * Working in progress until PIP-180 is finished. + * Currently, it works nothing different with ManagedLedgerImpl. + */ +@Slf4j +public class ShadowManagedLedgerImpl extends ManagedLedgerImpl { + + private final TopicName shadowSource; + private final String sourceMLName; + + public ShadowManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper, + MetaStore store, ManagedLedgerConfig config, + OrderedScheduler scheduledExecutor, + String name, final Supplier mlOwnershipChecker) { + super(factory, bookKeeper, store, config, scheduledExecutor, name, mlOwnershipChecker); + this.shadowSource = TopicName.get(config.getShadowSource()); + this.sourceMLName = shadowSource.getPersistenceNamingEncoding(); + } + + @Override + synchronized void initialize(ManagedLedgerInitializeLedgerCallback callback, Object ctx) { + // TODO: ShadowManagedLedger has different initialize process from normal ManagedLedger, + // which is complicated and will be implemented in the next PRs. + super.initialize(callback, ctx); + } + + public TopicName getShadowSource() { + return shadowSource; + } +} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java index 8491615448aae..2f6cb020b7085 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java @@ -1405,6 +1405,22 @@ protected CompletableFuture> loadOrCreatePersistentTopic(final S return topicFuture; } + CompletableFuture> fetchTopicPropertiesAsync(TopicName topicName) { + if (!topicName.isPartitioned()) { + return managedLedgerFactory.getManagedLedgerPropertiesAsync(topicName.getPersistenceNamingEncoding()); + } else { + TopicName partitionedTopicName = TopicName.get(topicName.getPartitionedTopicName()); + return fetchPartitionedTopicMetadataAsync(partitionedTopicName) + .thenCompose(metadata -> { + if (metadata.partitions == PartitionedTopicMetadata.NON_PARTITIONED) { + return managedLedgerFactory.getManagedLedgerPropertiesAsync( + topicName.getPersistenceNamingEncoding()); + } + return CompletableFuture.completedFuture(metadata.properties); + }); + } + } + private void checkOwnershipAndCreatePersistentTopic(final String topic, boolean createIfMissing, CompletableFuture> topicFuture, Map properties) { @@ -1412,7 +1428,21 @@ private void checkOwnershipAndCreatePersistentTopic(final String topic, boolean pulsar.getNamespaceService().isServiceUnitActiveAsync(topicName) .thenAccept(isActive -> { if (isActive) { - createPersistentTopic(topic, createIfMissing, topicFuture, properties); + CompletableFuture> propertiesFuture; + if (properties == null) { + //Read properties from storage when loading topic. + propertiesFuture = fetchTopicPropertiesAsync(topicName); + } else { + propertiesFuture = CompletableFuture.completedFuture(properties); + } + propertiesFuture.thenAccept(finalProperties -> + createPersistentTopic(topic, createIfMissing, topicFuture, finalProperties) + ).exceptionally(throwable -> { + log.warn("[{}] Read topic property failed", topic, throwable); + pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture)); + topicFuture.completeExceptionally(throwable); + return null; + }); } else { // namespace is being unloaded String msg = String.format("Namespace is being unloaded, cannot add topic %s", topic); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java index fb1c521bb00b0..a3193935e5325 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java @@ -72,6 +72,7 @@ import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.bookkeeper.mledger.impl.ShadowManagedLedgerImpl; import org.apache.bookkeeper.net.BookieId; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; @@ -176,6 +177,7 @@ public class PersistentTopic extends AbstractTopic implements Topic, AddEntryCal private final ConcurrentOpenHashMap shadowReplicators; @Getter private volatile List shadowTopics; + private final TopicName shadowSourceTopic; static final String DEDUPLICATION_CURSOR_NAME = "pulsar.dedup"; private static final String TOPIC_EPOCH_PROPERTY_NAME = "pulsar.topic.epoch"; @@ -302,6 +304,11 @@ public PersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerS this.transactionBuffer = new TransactionBufferDisable(); } transactionBuffer.syncMaxReadPositionForNormalPublish((PositionImpl) ledger.getLastConfirmedEntry()); + if (ledger instanceof ShadowManagedLedgerImpl) { + shadowSourceTopic = ((ShadowManagedLedgerImpl) ledger).getShadowSource(); + } else { + shadowSourceTopic = null; + } } @Override @@ -381,6 +388,7 @@ public CompletableFuture initialize() { } else { this.transactionBuffer = new TransactionBufferDisable(); } + shadowSourceTopic = null; } private void initializeDispatchRateLimiterIfNeeded() { @@ -3330,4 +3338,8 @@ private CompletableFuture transactionBufferCleanupAndClose() { public long getLastDataMessagePublishedTimestamp() { return lastDataMessagePublishedTimestamp; } + + public Optional getShadowSourceTopic() { + return Optional.ofNullable(shadowSourceTopic); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java index 970bfd763a4e5..2771808a9fa0b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java @@ -201,6 +201,8 @@ public void setup() throws Exception { doReturn(mlFactoryMock).when(pulsar).getManagedLedgerFactory(); doReturn(mock(PulsarClientImpl.class)).when(pulsar).getClient(); + doAnswer(invocationOnMock -> CompletableFuture.completedFuture(null)) + .when(mlFactoryMock).getManagedLedgerPropertiesAsync(any()); doAnswer(invocation -> { DeleteLedgerCallback deleteLedgerCallback = invocation.getArgument(1); deleteLedgerCallback.deleteLedgerComplete(null); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/ShadowTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/ShadowTopicTest.java new file mode 100644 index 0000000000000..22bfd70cf8883 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/ShadowTopicTest.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.broker.service.persistent; + + +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.impl.ShadowManagedLedgerImpl; +import org.apache.pulsar.broker.service.BrokerTestBase; +import org.apache.pulsar.common.naming.TopicName; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +public class ShadowTopicTest extends BrokerTestBase { + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + baseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + internalCleanup(); + } + + @Test() + public void testNonPartitionedShadowTopicSetup() throws Exception { + String sourceTopic = "persistent://prop/ns-abc/source"; + String shadowTopic = "persistent://prop/ns-abc/shadow"; + //1. test shadow topic setting in topic creation. + admin.topics().createNonPartitionedTopic(sourceTopic); + admin.topics().createShadowTopic(shadowTopic, sourceTopic); + PersistentTopic brokerShadowTopic = + (PersistentTopic) pulsar.getBrokerService().getTopicIfExists(shadowTopic).get().get(); + Assert.assertTrue(brokerShadowTopic.getManagedLedger() instanceof ShadowManagedLedgerImpl); + Assert.assertEquals(brokerShadowTopic.getShadowSourceTopic().get().toString(), sourceTopic); + Assert.assertEquals(admin.topics().getShadowSource(shadowTopic), sourceTopic); + + //2. test shadow topic could be properly loaded after unload. + admin.namespaces().unload("prop/ns-abc"); + Assert.assertTrue(pulsar.getBrokerService().getTopicReference(shadowTopic).isEmpty()); + Assert.assertEquals(admin.topics().getShadowSource(shadowTopic), sourceTopic); + brokerShadowTopic = (PersistentTopic) pulsar.getBrokerService().getTopicIfExists(shadowTopic).get().get(); + Assert.assertTrue(brokerShadowTopic.getManagedLedger() instanceof ShadowManagedLedgerImpl); + Assert.assertEquals(brokerShadowTopic.getShadowSourceTopic().get().toString(), sourceTopic); + } + + @Test() + public void testPartitionedShadowTopicSetup() throws Exception { + String sourceTopic = "persistent://prop/ns-abc/source-p"; + String shadowTopic = "persistent://prop/ns-abc/shadow-p"; + String shadowTopicPartition = TopicName.get(shadowTopic).getPartition(0).toString(); + + //1. test shadow topic setting in topic creation. + admin.topics().createPartitionedTopic(sourceTopic, 2); + admin.topics().createShadowTopic(shadowTopic, sourceTopic); + pulsarClient.newProducer().topic(shadowTopic).create().close();//trigger loading partitions. + PersistentTopic brokerShadowTopic = (PersistentTopic) pulsar.getBrokerService() + .getTopicIfExists(shadowTopicPartition).get().get(); + Assert.assertTrue(brokerShadowTopic.getManagedLedger() instanceof ShadowManagedLedgerImpl); + Assert.assertEquals(brokerShadowTopic.getShadowSourceTopic().get().toString(), sourceTopic); + Assert.assertEquals(admin.topics().getShadowSource(shadowTopic), sourceTopic); + + //2. test shadow topic could be properly loaded after unload. + admin.namespaces().unload("prop/ns-abc"); + Assert.assertTrue(pulsar.getBrokerService().getTopicReference(shadowTopic).isEmpty()); + + Assert.assertEquals(admin.topics().getShadowSource(shadowTopic), sourceTopic); + brokerShadowTopic = + (PersistentTopic) pulsar.getBrokerService().getTopicIfExists(shadowTopicPartition).get().get(); + Assert.assertTrue(brokerShadowTopic.getManagedLedger() instanceof ShadowManagedLedgerImpl); + Assert.assertEquals(brokerShadowTopic.getShadowSourceTopic().get().toString(), sourceTopic); + } + + +} diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Topics.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Topics.java index 177cae9a9a438..d2d0754f1d112 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Topics.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Topics.java @@ -4397,4 +4397,58 @@ CompletableFuture> examineMessageAsync(String topic, String init * @param sourceTopic source topic name */ CompletableFuture> getShadowTopicsAsync(String sourceTopic); + + /** + * Get the shadow source topic name of the given shadow topic. + * @param shadowTopic shadow topic name. + * @return The topic name of the source of the shadow topic. + */ + String getShadowSource(String shadowTopic) throws PulsarAdminException; + + /** + * Get the shadow source topic name of the given shadow topic asynchronously. + * @param shadowTopic shadow topic name. + * @return The topic name of the source of the shadow topic. + */ + CompletableFuture getShadowSourceAsync(String shadowTopic); + + /** + * Create a new shadow topic as the shadow of the source topic. + * The source topic must exist before call this method. + *

+ * For partitioned source topic, the partition number of shadow topic follows the source topic at creation. If + * the partition number of the source topic changes, the shadow topic needs to update its partition number + * manually. + * For non-partitioned source topic, the shadow topic will be created as non-partitioned topic. + *

+ * + * NOTE: This is still WIP until PIP-180 is finished. + * + * @param shadowTopic shadow topic name, and it must be a persistent topic name. + * @param sourceTopic source topic name, and it must be a persistent topic name. + * @param properties properties to be created with in the shadow topic. + * @throws PulsarAdminException + */ + void createShadowTopic(String shadowTopic, String sourceTopic, Map properties) + throws PulsarAdminException; + + /** + * Create a new shadow topic, see #{@link #createShadowTopic(String, String, Map)} for details. + */ + CompletableFuture createShadowTopicAsync(String shadowTopic, String sourceTopic, + Map properties); + + /** + * Create a new shadow topic, see #{@link #createShadowTopic(String, String, Map)} for details. + */ + default void createShadowTopic(String shadowTopic, String sourceTopic) throws PulsarAdminException { + createShadowTopic(shadowTopic, sourceTopic, null); + } + + /** + * Create a new shadow topic, see #{@link #createShadowTopic(String, String, Map)} for details. + */ + default CompletableFuture createShadowTopicAsync(String shadowTopic, String sourceTopic) { + return createShadowTopicAsync(shadowTopic, sourceTopic, null); + } } diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java index e3b51accdfd7c..4312080ac2233 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java @@ -131,6 +131,8 @@ public class TopicsImpl extends BaseResource implements Topics { private static final String ENCRYPTION_KEYS = "X-Pulsar-Base64-encryption-keys"; // CHECKSTYLE.ON: MemberName + public static final String PROPERTY_SHADOW_SOURCE_KEY = "PULSAR.SHADOW_SOURCE"; + public TopicsImpl(WebTarget web, Authentication auth, long readTimeoutMs) { super(auth, readTimeoutMs); adminTopics = web.path("/admin"); @@ -2705,7 +2707,43 @@ public CompletableFuture removeShadowTopicsAsync(String sourceTopic) { public CompletableFuture> getShadowTopicsAsync(String sourceTopic) { TopicName tn = validateTopic(sourceTopic); WebTarget path = topicPath(tn, "shadowTopics"); - return asyncGetRequest(path, new FutureCallback>(){}); + return asyncGetRequest(path, new FutureCallback>() {}); + } + + @Override + public String getShadowSource(String shadowTopic) throws PulsarAdminException { + return sync(() -> getShadowSourceAsync(shadowTopic)); + } + + @Override + public CompletableFuture getShadowSourceAsync(String shadowTopic) { + return getPropertiesAsync(shadowTopic).thenApply( + properties -> properties != null ? properties.get(PROPERTY_SHADOW_SOURCE_KEY) : null); + } + + @Override + public void createShadowTopic(String shadowTopic, String sourceTopic, Map properties) + throws PulsarAdminException { + sync(() -> createShadowTopicAsync(shadowTopic, sourceTopic, properties)); + } + + @Override + public CompletableFuture createShadowTopicAsync(String shadowTopic, String sourceTopic, + Map properties) { + checkArgument(TopicName.get(shadowTopic).isPersistent(), "Shadow topic must be persistent"); + checkArgument(TopicName.get(sourceTopic).isPersistent(), "Source topic must be persistent"); + return getPartitionedTopicMetadataAsync(sourceTopic).thenCompose(sourceTopicMeta -> { + HashMap shadowProperties = new HashMap<>(); + if (properties != null) { + shadowProperties.putAll(properties); + } + shadowProperties.put(PROPERTY_SHADOW_SOURCE_KEY, sourceTopic); + if (sourceTopicMeta.partitions == PartitionedTopicMetadata.NON_PARTITIONED) { + return createNonPartitionedTopicAsync(shadowTopic, shadowProperties); + } else { + return createPartitionedTopicAsync(shadowTopic, sourceTopicMeta.partitions, shadowProperties); + } + }); } private static final Logger log = LoggerFactory.getLogger(TopicsImpl.class); diff --git a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java index b9cfbe8c97eab..bcb1fca1705e5 100644 --- a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java +++ b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java @@ -34,7 +34,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; import com.google.common.collect.Sets; - import java.io.ByteArrayOutputStream; import java.io.File; import java.io.PrintStream; @@ -51,7 +50,6 @@ import java.util.Properties; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; - import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.admin.cli.utils.SchemaExtractor; import org.apache.pulsar.client.admin.Bookies; @@ -74,7 +72,6 @@ import org.apache.pulsar.client.admin.Topics; import org.apache.pulsar.client.admin.Transactions; import org.apache.pulsar.client.admin.internal.OffloadProcessStatusImpl; -import org.apache.pulsar.client.admin.internal.PulsarAdminBuilderImpl; import org.apache.pulsar.client.admin.internal.PulsarAdminImpl; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.SubscriptionType; @@ -1946,6 +1943,22 @@ public boolean matches(Long timestamp) { cmdTopics.run(split("remove-shadow-topics persistent://myprop/clust/ns1/ds1")); verify(mockTopics).removeShadowTopics("persistent://myprop/clust/ns1/ds1"); + cmdTopics.run(split("create-shadow-topic -s persistent://myprop/clust/ns1/source persistent://myprop/clust/ns1/ds1")); + verify(mockTopics).createShadowTopic("persistent://myprop/clust/ns1/ds1", "persistent://myprop/clust/ns1/source", null); + + cmdTopics = new CmdTopics(() -> admin); + cmdTopics.run(split("create-shadow-topic -p a=aa,b=bb,c=cc -s persistent://myprop/clust/ns1/source persistent://myprop/clust/ns1/ds1")); + HashMap p = new HashMap<>(); + p.put("a","aa"); + p.put("b","bb"); + p.put("c","cc"); + verify(mockTopics).createShadowTopic("persistent://myprop/clust/ns1/ds1", "persistent://myprop/clust/ns1/source", p); + + cmdTopics.run(split("get-shadow-source persistent://myprop/clust/ns1/ds1")); + verify(mockTopics).getShadowSource("persistent://myprop/clust/ns1/ds1"); + + + } private static LedgerInfo newLedger(long id, long entries, long size) { diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTopics.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTopics.java index ae37a591bae3a..245cfc1b85506 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTopics.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTopics.java @@ -255,6 +255,8 @@ public CmdTopics(Supplier admin) { jcommander.addCommand("get-shadow-topics", new GetShadowTopics()); jcommander.addCommand("set-shadow-topics", new SetShadowTopics()); jcommander.addCommand("remove-shadow-topics", new RemoveShadowTopics()); + jcommander.addCommand("create-shadow-topic", new CreateShadowTopic()); + jcommander.addCommand("get-shadow-source", new GetShadowSource()); jcommander.addCommand("get-schema-validation-enforce", new GetSchemaValidationEnforced()); jcommander.addCommand("set-schema-validation-enforce", new SetSchemaValidationEnforced()); @@ -1714,6 +1716,38 @@ void run() throws PulsarAdminException { } } + @Parameters(commandDescription = "Create a shadow topic for an existing source topic.") + private class CreateShadowTopic extends CliCommand { + + @Parameter(description = "persistent://tenant/namespace/topic", required = true) + private java.util.List params; + + @Parameter(names = {"--source", "-s"}, description = "source topic name", required = true) + private String sourceTopic; + + @Parameter(names = {"--properties", "-p"}, description = "key value pair properties(eg: a=a b=b c=c)") + private java.util.List propertyList; + + @Override + void run() throws Exception { + String topic = validateTopicName(params); + Map properties = parseListKeyValueMap(propertyList); + getTopics().createShadowTopic(topic, TopicName.get(sourceTopic).toString(), properties); + } + } + + @Parameters(commandDescription = "Get the source topic for a shadow topic") + private class GetShadowSource extends CliCommand { + @Parameter(description = "persistent://tenant/namespace/topic", required = true) + private java.util.List params; + + @Override + void run() throws PulsarAdminException { + String shadowTopic = validatePersistentTopic(params); + print(getTopics().getShadowSource(shadowTopic)); + } + } + @Parameters(commandDescription = "Get the delayed delivery policy for a topic") private class GetDelayedDelivery extends CliCommand { @Parameter(description = "tenant/namespace/topic", required = true) From af11c32611e41d708e76264d47ef9501534c6c5c Mon Sep 17 00:00:00 2001 From: Xiaoyu Hou Date: Mon, 3 Oct 2022 23:13:03 +0800 Subject: [PATCH 51/59] [improve][broker]Improve PersistentMessageExpiryMonitor expire speed when ledger not existed (#17842) --- .../bookkeeper/mledger/ManagedLedger.java | 7 ++++++ .../mledger/ManagedLedgerException.java | 6 +++++ .../mledger/impl/ManagedLedgerImpl.java | 25 +++++++++++++++++-- .../PersistentMessageExpiryMonitor.java | 25 ++++++++++++++++++- .../jcloud/impl/MockManagedLedger.java | 7 ++++++ 5 files changed, 67 insertions(+), 3 deletions(-) diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java index a71cf38afb855..c5de804b1379d 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java @@ -20,6 +20,7 @@ import io.netty.buffer.ByteBuf; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.function.Predicate; import org.apache.bookkeeper.common.annotation.InterfaceAudience; @@ -649,6 +650,12 @@ void asyncSetProperties(Map properties, AsyncCallbacks.UpdatePro */ CompletableFuture getLedgerInfo(long ledgerId); + /** + * Get basic ledger summary. + * will get {@link Optional#empty()} if corresponding ledger not exists. + */ + Optional getOptionalLedgerInfo(long ledgerId); + /** * Truncate ledgers * The truncate operation will move all cursors to the end of the topic and delete all inactive ledgers. diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerException.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerException.java index 0dc820ec46d72..fa6dc59d14753 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerException.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerException.java @@ -147,6 +147,12 @@ public NonRecoverableLedgerException(String msg) { } } + public static class LedgerNotExistException extends NonRecoverableLedgerException { + public LedgerNotExistException(String msg) { + super(msg); + } + } + public static class InvalidReplayPositionException extends ManagedLedgerException { public InvalidReplayPositionException(String msg) { super(msg); diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java index 53bccfadbab22..10de447d0f49e 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java @@ -100,6 +100,7 @@ import org.apache.bookkeeper.mledger.ManagedLedgerException.BadVersionException; import org.apache.bookkeeper.mledger.ManagedLedgerException.CursorNotFoundException; import org.apache.bookkeeper.mledger.ManagedLedgerException.InvalidCursorPositionException; +import org.apache.bookkeeper.mledger.ManagedLedgerException.LedgerNotExistException; import org.apache.bookkeeper.mledger.ManagedLedgerException.ManagedLedgerAlreadyClosedException; import org.apache.bookkeeper.mledger.ManagedLedgerException.ManagedLedgerFencedException; import org.apache.bookkeeper.mledger.ManagedLedgerException.ManagedLedgerInterceptException; @@ -1835,6 +1836,11 @@ public CompletableFuture getLedgerInfo(long ledgerId) { return result; } + @Override + public Optional getOptionalLedgerInfo(long ledgerId) { + return Optional.ofNullable(ledgers.get(ledgerId)); + } + CompletableFuture getLedgerHandle(long ledgerId) { CompletableFuture ledgerHandle = ledgerCache.get(ledgerId); if (ledgerHandle != null) { @@ -1941,7 +1947,7 @@ public void asyncReadEntry(PositionImpl position, ReadEntryCallback callback, Ob } else { log.error("[{}] Failed to get message with ledger {}:{} the ledgerId does not belong to this topic " + "or has been deleted.", name, position.getLedgerId(), position.getEntryId()); - callback.readEntryFailed(new ManagedLedgerException.NonRecoverableLedgerException("Message not found, " + callback.readEntryFailed(new LedgerNotExistException("Message not found, " + "the ledgerId does not belong to this topic or has been deleted"), ctx); } @@ -3754,11 +3760,26 @@ private static boolean isBkErrorNotRecoverable(int rc) { } } + private static boolean isLedgerNotExistException(int rc) { + switch (rc) { + case Code.NoSuchLedgerExistsException: + case Code.NoSuchLedgerExistsOnMetadataServerException: + return true; + + default: + return false; + } + } + public static ManagedLedgerException createManagedLedgerException(int bkErrorCode) { if (bkErrorCode == BKException.Code.TooManyRequestsException) { return new TooManyRequestsException("Too many request error from bookies"); } else if (isBkErrorNotRecoverable(bkErrorCode)) { - return new NonRecoverableLedgerException(BKException.getMessage(bkErrorCode)); + if (isLedgerNotExistException(bkErrorCode)) { + return new LedgerNotExistException(BKException.getMessage(bkErrorCode)); + } else { + return new NonRecoverableLedgerException(BKException.getMessage(bkErrorCode)); + } } else { return new ManagedLedgerException(BKException.getMessage(bkErrorCode)); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java index 8aabc7bf3d8af..9b1cd75282a4d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java @@ -26,8 +26,10 @@ import org.apache.bookkeeper.mledger.AsyncCallbacks.MarkDeleteCallback; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerException; +import org.apache.bookkeeper.mledger.ManagedLedgerException.LedgerNotExistException; import org.apache.bookkeeper.mledger.ManagedLedgerException.NonRecoverableLedgerException; import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.client.impl.MessageImpl; import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; @@ -191,7 +193,28 @@ public void findEntryFailed(ManagedLedgerException exception, Optional && (exception instanceof NonRecoverableLedgerException)) { log.warn("[{}][{}] read failed from ledger at position:{} : {}", topicName, subName, failedReadPosition, exception.getMessage()); - findEntryComplete(failedReadPosition.get(), ctx); + if (exception instanceof LedgerNotExistException) { + long failedLedgerId = failedReadPosition.get().getLedgerId(); + ManagedLedgerImpl ledger = ((ManagedLedgerImpl) cursor.getManagedLedger()); + Position lastPositionInLedger = ledger.getOptionalLedgerInfo(failedLedgerId) + .map(ledgerInfo -> PositionImpl.get(failedLedgerId, ledgerInfo.getEntries() - 1)) + .orElseGet(() -> { + Long nextExistingLedger = ledger.getNextValidLedger(failedReadPosition.get().getLedgerId()); + if (nextExistingLedger == null) { + log.info("[{}] [{}] Couldn't find next next valid ledger for expiry monitor when find " + + "entry failed {}", ledger.getName(), ledger.getName(), + failedReadPosition); + return (PositionImpl) failedReadPosition.get(); + } else { + return PositionImpl.get(nextExistingLedger, -1); + } + }); + log.info("[{}][{}] ledger not existed, will complete the last position of the non-existed" + + " ledger:{}", topicName, subName, lastPositionInLedger); + findEntryComplete(lastPositionInLedger, ctx); + } else { + findEntryComplete(failedReadPosition.get(), ctx); + } } expirationCheckInProgress = FALSE; updateRates(); diff --git a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/MockManagedLedger.java b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/MockManagedLedger.java index 32a4bf330a540..730fbc90d2c8e 100644 --- a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/MockManagedLedger.java +++ b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/MockManagedLedger.java @@ -20,6 +20,7 @@ import io.netty.buffer.ByteBuf; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.function.Predicate; import lombok.extern.slf4j.Slf4j; @@ -354,6 +355,12 @@ public CompletableFuture getLedgerInfo(long ledgerId) { return CompletableFuture.completedFuture(build); } + @Override + public Optional getOptionalLedgerInfo(long ledgerId) { + final LedgerInfo build = LedgerInfo.newBuilder().setLedgerId(ledgerId).setSize(100).setEntries(20).build(); + return Optional.of(build); + } + @Override public CompletableFuture asyncTruncate() { return CompletableFuture.completedFuture(null); From dd0c53ed9684edd7a00413a0c0ad9b9a07030099 Mon Sep 17 00:00:00 2001 From: Vineeth Date: Mon, 26 Sep 2022 13:22:08 -0700 Subject: [PATCH 52/59] Add proxyServiceUrl and proxyProtocol as oprtions for PerfTool CLI --- .../pulsar/testclient/PerfClientUtils.java | 3 ++- .../testclient/PerformanceBaseArguments.java | 16 ++++++++++++++++ .../pulsar/testclient/PerfClientUtilsTest.java | 5 +++++ .../testclient/PerformanceBaseArgumentsTest.java | 4 ++++ .../src/test/resources/perf_client1.conf | 2 ++ 5 files changed, 29 insertions(+), 1 deletion(-) diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java index 1ce5777fd3219..a3552d314309c 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java @@ -75,7 +75,8 @@ public static ClientBuilder createClientBuilderFromArguments(PerformanceBaseArgu .enableBusyWait(arguments.enableBusyWait) .listenerThreads(arguments.listenerThreads) .tlsTrustCertsFilePath(arguments.tlsTrustCertsFilePath) - .maxLookupRequests(arguments.maxLookupRequest); + .maxLookupRequests(arguments.maxLookupRequest) + .proxyServiceUrl(arguments.proxyServiceURL, arguments.proxyProtocol); if (isNotBlank(arguments.authPluginClassName)) { clientBuilder.authentication(arguments.authPluginClassName, arguments.authParams); diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java index cff7e16e9caa4..2f3459f030224 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java @@ -23,6 +23,7 @@ import java.io.FileInputStream; import java.util.Properties; import lombok.SneakyThrows; +import org.apache.pulsar.client.api.ProxyProtocol; public abstract class PerformanceBaseArguments { @@ -85,6 +86,12 @@ public abstract class PerformanceBaseArguments { + "on each broker connection to prevent overloading a broker") public int maxLookupRequest = 50000; + @Parameter(names = { "--proxy-url" }, description = "Proxy-server URL to which to connect.") + String proxyServiceURL = null; + + @Parameter(names = { "--proxy-protocol" }, description = "Proxy protocol to select type of routing at proxy.") + ProxyProtocol proxyProtocol = null; + public abstract void fillArgumentsFromProperties(Properties prop); @SneakyThrows @@ -133,6 +140,15 @@ public void fillArgumentsFromProperties() { .getProperty("tlsEnableHostnameVerification", "")); } + + if (proxyServiceURL == null) { + proxyServiceURL = prop.getProperty("proxyServiceURL"); + } + + if (proxyProtocol == null) { + proxyProtocol = ProxyProtocol.valueOf(prop.getProperty("proxyProtocol")); + } + fillArgumentsFromProperties(prop); } diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerfClientUtilsTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerfClientUtilsTest.java index ea21112635a87..e1f1e7d358162 100644 --- a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerfClientUtilsTest.java +++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerfClientUtilsTest.java @@ -22,6 +22,7 @@ import java.util.Map; import java.util.Properties; import org.apache.pulsar.client.api.Authentication; +import org.apache.pulsar.client.api.ProxyProtocol; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.impl.ClientBuilderImpl; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; @@ -71,6 +72,8 @@ public void fillArgumentsFromProperties(Properties prop) { args.tlsTrustCertsFilePath = "path"; args.tlsAllowInsecureConnection = true; args.maxLookupRequest = 100000; + args.proxyServiceURL = "pulsar+ssl://my-proxy-pulsar:4443"; + args.proxyProtocol = ProxyProtocol.SNI; final ClientBuilderImpl builder = (ClientBuilderImpl)PerfClientUtils.createClientBuilderFromArguments(args); final ClientConfigurationData conf = builder.getClientConfigurationData(); @@ -88,6 +91,8 @@ public void fillArgumentsFromProperties(Properties prop) { Assert.assertEquals(conf.getTlsTrustCertsFilePath(), "path"); Assert.assertTrue(conf.isTlsAllowInsecureConnection()); Assert.assertEquals(conf.getMaxLookupRequest(), 100000); + Assert.assertEquals(conf.getProxyServiceUrl(), "pulsar+ssl://my-proxy-pulsar:4443"); + Assert.assertEquals(conf.getProxyProtocol(), ProxyProtocol.SNI); } } \ No newline at end of file diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java index 0b244a5a4e1ba..e3926c54e9e0d 100644 --- a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java +++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java @@ -23,6 +23,8 @@ import org.testng.Assert; import org.testng.annotations.Test; +import static org.apache.pulsar.client.api.ProxyProtocol.SNI; + public class PerformanceBaseArgumentsTest { @@ -47,6 +49,8 @@ public void fillArgumentsFromProperties(Properties prop) { Assert.assertEquals(args.tlsTrustCertsFilePath, "./path"); Assert.assertTrue(args.tlsAllowInsecureConnection); Assert.assertTrue(args.tlsHostnameVerificationEnable); + Assert.assertEquals(args.proxyServiceURL, "https://my-proxy-pulsar:4443/"); + Assert.assertEquals(args.proxyProtocol, SNI); } } \ No newline at end of file diff --git a/pulsar-testclient/src/test/resources/perf_client1.conf b/pulsar-testclient/src/test/resources/perf_client1.conf index 127960618bf5d..dde29de5d7da3 100644 --- a/pulsar-testclient/src/test/resources/perf_client1.conf +++ b/pulsar-testclient/src/test/resources/perf_client1.conf @@ -23,3 +23,5 @@ authParams=myparams tlsTrustCertsFilePath=./path tlsAllowInsecureConnection=true tlsEnableHostnameVerification=true +proxyServiceURL=https://my-proxy-pulsar:4443/ +proxyProtocol=SNI From d1868e247d3bcd3c073f29916e44030f1b46a4bc Mon Sep 17 00:00:00 2001 From: Vineeth Date: Tue, 27 Sep 2022 10:32:51 -0700 Subject: [PATCH 53/59] Add proxyServiceUrl and proxyProtocol as oprtions for PerfTool CLI --- conf/client.conf | 4 ++ .../testclient/PerformanceBaseArguments.java | 14 +++++- .../PerformanceBaseArgumentsTest.java | 50 +++++++++++++++++++ .../src/test/resources/perf_client1.conf | 2 +- .../src/test/resources/perf_client2.conf | 25 ++++++++++ .../src/test/resources/perf_client3.conf | 27 ++++++++++ 6 files changed, 120 insertions(+), 2 deletions(-) create mode 100644 pulsar-testclient/src/test/resources/perf_client2.conf create mode 100644 pulsar-testclient/src/test/resources/perf_client3.conf diff --git a/conf/client.conf b/conf/client.conf index ea1d339a09c5b..8a485e5676c7b 100644 --- a/conf/client.conf +++ b/conf/client.conf @@ -88,7 +88,11 @@ tlsKeyStorePassword= # When TLS authentication with KeyStore is used, available options can be SunJSSE, Conscrypt and so on. webserviceTlsProvider= +#Proxy-server URL to which to connect +proxyServiceUrl= +#Proxy protocol to select type of routing at proxy +proxyProtocol= # Pulsar Admin Custom Commands #customCommandFactoriesDirectory=commandFactories diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java index 2f3459f030224..ee1fd1b6908cb 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java @@ -19,6 +19,8 @@ package org.apache.pulsar.testclient; import static org.apache.commons.lang3.StringUtils.isBlank; +import static org.apache.pulsar.testclient.PerfClientUtils.exit; + import com.beust.jcommander.Parameter; import java.io.FileInputStream; import java.util.Properties; @@ -146,7 +148,17 @@ public void fillArgumentsFromProperties() { } if (proxyProtocol == null) { - proxyProtocol = ProxyProtocol.valueOf(prop.getProperty("proxyProtocol")); + try { + String proxyProtocolString = prop.getProperty("proxyProtocol"); + if (proxyProtocolString != null) { + proxyProtocol = ProxyProtocol.valueOf(prop.getProperty("proxyProtocol")); + } + } catch (IllegalArgumentException e) { + System.out.println("Incorrect proxyProtocol name"); + e.printStackTrace(); + exit(-1); + } + } fillArgumentsFromProperties(prop); diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java index e3926c54e9e0d..f8bb0bc2bb01f 100644 --- a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java +++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java @@ -53,4 +53,54 @@ public void fillArgumentsFromProperties(Properties prop) { Assert.assertEquals(args.proxyProtocol, SNI); } + @Test + public void testReadFromConfigFileWithoutProxyUrl() { + + AtomicBoolean called = new AtomicBoolean(); + + final PerformanceBaseArguments args = new PerformanceBaseArguments() { + @Override + public void fillArgumentsFromProperties(Properties prop) { + called.set(true); + } + }; + args.confFile = "./src/test/resources/perf_client2.conf"; + args.fillArgumentsFromProperties(); + Assert.assertTrue(called.get()); + Assert.assertEquals(args.serviceURL, "https://my-pulsar:8443/"); + Assert.assertEquals(args.authPluginClassName, + "org.apache.pulsar.testclient.PerfClientUtilsTest.MyAuth"); + Assert.assertEquals(args.authParams, "myparams"); + Assert.assertEquals(args.tlsTrustCertsFilePath, "./path"); + Assert.assertTrue(args.tlsAllowInsecureConnection); + Assert.assertTrue(args.tlsHostnameVerificationEnable); + } + + @Test + public void testReadFromConfigFileProxyProtocolException() { + + AtomicBoolean calledVar1 = new AtomicBoolean(); + AtomicBoolean calledVar2 = new AtomicBoolean(); + + final PerformanceBaseArguments args = new PerformanceBaseArguments() { + @Override + public void fillArgumentsFromProperties(Properties prop) { + calledVar1.set(true); + } + }; + + PerfClientUtils.setExitProcedure(code -> { + calledVar2.set(true); + Assert.assertNotNull(code); + if (code != -1) { + Assert.fail("Incorrect exit code"); + } + }); + + args.confFile = "./src/test/resources/perf_client3.conf"; + args.fillArgumentsFromProperties(); + Assert.assertTrue(calledVar1.get()); + Assert.assertTrue(calledVar2.get()); + } + } \ No newline at end of file diff --git a/pulsar-testclient/src/test/resources/perf_client1.conf b/pulsar-testclient/src/test/resources/perf_client1.conf index dde29de5d7da3..1e96c7b35c7ca 100644 --- a/pulsar-testclient/src/test/resources/perf_client1.conf +++ b/pulsar-testclient/src/test/resources/perf_client1.conf @@ -24,4 +24,4 @@ tlsTrustCertsFilePath=./path tlsAllowInsecureConnection=true tlsEnableHostnameVerification=true proxyServiceURL=https://my-proxy-pulsar:4443/ -proxyProtocol=SNI +proxyProtocol=SNI \ No newline at end of file diff --git a/pulsar-testclient/src/test/resources/perf_client2.conf b/pulsar-testclient/src/test/resources/perf_client2.conf new file mode 100644 index 0000000000000..127960618bf5d --- /dev/null +++ b/pulsar-testclient/src/test/resources/perf_client2.conf @@ -0,0 +1,25 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +brokerServiceUrl=https://my-pulsar:8443/ +authPlugin=org.apache.pulsar.testclient.PerfClientUtilsTest.MyAuth +authParams=myparams +tlsTrustCertsFilePath=./path +tlsAllowInsecureConnection=true +tlsEnableHostnameVerification=true diff --git a/pulsar-testclient/src/test/resources/perf_client3.conf b/pulsar-testclient/src/test/resources/perf_client3.conf new file mode 100644 index 0000000000000..ff524e03660cb --- /dev/null +++ b/pulsar-testclient/src/test/resources/perf_client3.conf @@ -0,0 +1,27 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +brokerServiceUrl=https://my-pulsar:8443/ +authPlugin=org.apache.pulsar.testclient.PerfClientUtilsTest.MyAuth +authParams=myparams +tlsTrustCertsFilePath=./path +tlsAllowInsecureConnection=true +tlsEnableHostnameVerification=true +proxyServiceURL=https://my-proxy-pulsar:4443/ +proxyProtocol=TEST \ No newline at end of file From 48b2a2d48f19dff17343718ed2b02778c8611938 Mon Sep 17 00:00:00 2001 From: Vineeth Date: Tue, 27 Sep 2022 11:29:42 -0700 Subject: [PATCH 54/59] Add proxyServiceUrl and proxyProtocol as oprtions for PerfTool CLI --- .../org/apache/pulsar/testclient/PerformanceBaseArguments.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java index ee1fd1b6908cb..307af7cbb152b 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceBaseArguments.java @@ -20,7 +20,6 @@ import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.pulsar.testclient.PerfClientUtils.exit; - import com.beust.jcommander.Parameter; import java.io.FileInputStream; import java.util.Properties; @@ -160,7 +159,7 @@ public void fillArgumentsFromProperties() { } } - + fillArgumentsFromProperties(prop); } From 603ff9cb394385075349d8985a11e2d5fec35a32 Mon Sep 17 00:00:00 2001 From: Vineeth Date: Mon, 3 Oct 2022 08:47:02 -0700 Subject: [PATCH 55/59] Add proxyServiceUrl and proxyProtocol as oprtions for PerfTool CLI --- .../PerformanceBaseArgumentsTest.java | 100 ++++++++++++++---- 1 file changed, 78 insertions(+), 22 deletions(-) diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java index f8bb0bc2bb01f..bc335a83c71af 100644 --- a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java +++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java @@ -18,12 +18,21 @@ */ package org.apache.pulsar.testclient; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.Properties; import java.util.concurrent.atomic.AtomicBoolean; + +import junit.framework.AssertionFailedError; import org.testng.Assert; import org.testng.annotations.Test; import static org.apache.pulsar.client.api.ProxyProtocol.SNI; +import static org.testng.Assert.fail; public class PerformanceBaseArgumentsTest { @@ -64,16 +73,40 @@ public void fillArgumentsFromProperties(Properties prop) { called.set(true); } }; - args.confFile = "./src/test/resources/perf_client2.conf"; - args.fillArgumentsFromProperties(); - Assert.assertTrue(called.get()); - Assert.assertEquals(args.serviceURL, "https://my-pulsar:8443/"); - Assert.assertEquals(args.authPluginClassName, - "org.apache.pulsar.testclient.PerfClientUtilsTest.MyAuth"); - Assert.assertEquals(args.authParams, "myparams"); - Assert.assertEquals(args.tlsTrustCertsFilePath, "./path"); - Assert.assertTrue(args.tlsAllowInsecureConnection); - Assert.assertTrue(args.tlsHostnameVerificationEnable); + + File file = new File("./src/test/resources/performance_client2.conf"); + try { + Properties props = new Properties(); + + Map configs = Map.of("brokerServiceUrl","https://my-pulsar:8443/", + "authPlugin","org.apache.pulsar.testclient.PerfClientUtilsTest.MyAuth", + "authParams", "myparams", + "tlsTrustCertsFilePath", "./path", + "tlsAllowInsecureConnection","true", + "tlsEnableHostnameVerification", "true" + ); + props.putAll(configs); + FileOutputStream out = new FileOutputStream(file); + props.store(out, "properties file"); + out.close(); + args.confFile = "./src/test/resources/performance_client2.conf"; + + args.fillArgumentsFromProperties(); + Assert.assertTrue(called.get()); + Assert.assertEquals(args.serviceURL, "https://my-pulsar:8443/"); + Assert.assertEquals(args.authPluginClassName, + "org.apache.pulsar.testclient.PerfClientUtilsTest.MyAuth"); + Assert.assertEquals(args.authParams, "myparams"); + Assert.assertEquals(args.tlsTrustCertsFilePath, "./path"); + Assert.assertTrue(args.tlsAllowInsecureConnection); + Assert.assertTrue(args.tlsHostnameVerificationEnable); + + } catch (IOException e) { + e.printStackTrace(); + fail("Error while updating/reading config file"); + } finally { + file.delete(); + } } @Test @@ -88,19 +121,42 @@ public void fillArgumentsFromProperties(Properties prop) { calledVar1.set(true); } }; + File file = new File("./src/test/resources/performance_client3.conf");; + try { + Properties props = new Properties(); - PerfClientUtils.setExitProcedure(code -> { - calledVar2.set(true); - Assert.assertNotNull(code); - if (code != -1) { - Assert.fail("Incorrect exit code"); - } - }); - - args.confFile = "./src/test/resources/perf_client3.conf"; - args.fillArgumentsFromProperties(); - Assert.assertTrue(calledVar1.get()); - Assert.assertTrue(calledVar2.get()); + Map configs = Map.of("brokerServiceUrl","https://my-pulsar:8443/", + "authPlugin","org.apache.pulsar.testclient.PerfClientUtilsTest.MyAuth", + "authParams", "myparams", + "tlsTrustCertsFilePath", "./path", + "tlsAllowInsecureConnection","true", + "tlsEnableHostnameVerification", "true", + "proxyServiceURL", "https://my-proxy-pulsar:4443/", + "proxyProtocol", "TEST" + ); + props.putAll(configs); + FileOutputStream out = new FileOutputStream(file); + props.store(out, "properties file"); + out.close(); + args.confFile = "./src/test/resources/performance_client2.conf"; + PerfClientUtils.setExitProcedure(code -> { + calledVar2.set(true); + Assert.assertNotNull(code); + if (code != -1) { + fail("Incorrect exit code"); + } + }); + + args.confFile = "./src/test/resources/perf_client3.conf"; + args.fillArgumentsFromProperties(); + Assert.assertTrue(calledVar1.get()); + Assert.assertTrue(calledVar2.get()); + } catch (IOException e) { + e.printStackTrace(); + fail("Error while updating/reading config file"); + } finally { + file.delete(); + } } } \ No newline at end of file From 6c975f4faa0276265132983acc92d490c6ecfc8e Mon Sep 17 00:00:00 2001 From: Vineeth Date: Mon, 3 Oct 2022 08:58:46 -0700 Subject: [PATCH 56/59] Add proxyServiceUrl and proxyProtocol as oprtions for PerfTool CLI --- .../PerformanceBaseArgumentsTest.java | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java index bc335a83c71af..77fd9989b7c84 100644 --- a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java +++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java @@ -74,7 +74,10 @@ public void fillArgumentsFromProperties(Properties prop) { } }; - File file = new File("./src/test/resources/performance_client2.conf"); + File tempConfigFile = new File("./src/test/resources/performance_client2.conf"); + if (tempConfigFile.exists()) { + tempConfigFile.delete(); + } try { Properties props = new Properties(); @@ -86,7 +89,7 @@ public void fillArgumentsFromProperties(Properties prop) { "tlsEnableHostnameVerification", "true" ); props.putAll(configs); - FileOutputStream out = new FileOutputStream(file); + FileOutputStream out = new FileOutputStream(tempConfigFile); props.store(out, "properties file"); out.close(); args.confFile = "./src/test/resources/performance_client2.conf"; @@ -105,7 +108,7 @@ public void fillArgumentsFromProperties(Properties prop) { e.printStackTrace(); fail("Error while updating/reading config file"); } finally { - file.delete(); + tempConfigFile.delete(); } } @@ -121,7 +124,10 @@ public void fillArgumentsFromProperties(Properties prop) { calledVar1.set(true); } }; - File file = new File("./src/test/resources/performance_client3.conf");; + File tempConfigFile = new File("./src/test/resources/performance_client3.conf"); + if (tempConfigFile.exists()) { + tempConfigFile.delete(); + } try { Properties props = new Properties(); @@ -135,7 +141,7 @@ public void fillArgumentsFromProperties(Properties prop) { "proxyProtocol", "TEST" ); props.putAll(configs); - FileOutputStream out = new FileOutputStream(file); + FileOutputStream out = new FileOutputStream(tempConfigFile); props.store(out, "properties file"); out.close(); args.confFile = "./src/test/resources/performance_client2.conf"; @@ -155,7 +161,7 @@ public void fillArgumentsFromProperties(Properties prop) { e.printStackTrace(); fail("Error while updating/reading config file"); } finally { - file.delete(); + tempConfigFile.delete(); } } From bf84ca2f80b2be45d7bbc67b8def3674a463fa1f Mon Sep 17 00:00:00 2001 From: Vineeth Date: Mon, 3 Oct 2022 09:00:34 -0700 Subject: [PATCH 57/59] Add proxyServiceUrl and proxyProtocol as oprtions for PerfTool CLI --- .../src/test/resources/perf_client2.conf | 25 ----------------- .../src/test/resources/perf_client3.conf | 27 ------------------- 2 files changed, 52 deletions(-) delete mode 100644 pulsar-testclient/src/test/resources/perf_client2.conf delete mode 100644 pulsar-testclient/src/test/resources/perf_client3.conf diff --git a/pulsar-testclient/src/test/resources/perf_client2.conf b/pulsar-testclient/src/test/resources/perf_client2.conf deleted file mode 100644 index 127960618bf5d..0000000000000 --- a/pulsar-testclient/src/test/resources/perf_client2.conf +++ /dev/null @@ -1,25 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -brokerServiceUrl=https://my-pulsar:8443/ -authPlugin=org.apache.pulsar.testclient.PerfClientUtilsTest.MyAuth -authParams=myparams -tlsTrustCertsFilePath=./path -tlsAllowInsecureConnection=true -tlsEnableHostnameVerification=true diff --git a/pulsar-testclient/src/test/resources/perf_client3.conf b/pulsar-testclient/src/test/resources/perf_client3.conf deleted file mode 100644 index ff524e03660cb..0000000000000 --- a/pulsar-testclient/src/test/resources/perf_client3.conf +++ /dev/null @@ -1,27 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -brokerServiceUrl=https://my-pulsar:8443/ -authPlugin=org.apache.pulsar.testclient.PerfClientUtilsTest.MyAuth -authParams=myparams -tlsTrustCertsFilePath=./path -tlsAllowInsecureConnection=true -tlsEnableHostnameVerification=true -proxyServiceURL=https://my-proxy-pulsar:4443/ -proxyProtocol=TEST \ No newline at end of file From 257bbabefacdf390874747b1babad783ecfa35dd Mon Sep 17 00:00:00 2001 From: Vineeth Date: Mon, 3 Oct 2022 10:20:11 -0700 Subject: [PATCH 58/59] Add proxyServiceUrl and proxyProtocol as oprtions for PerfTool CLI --- .../apache/pulsar/testclient/PerformanceBaseArgumentsTest.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java index 77fd9989b7c84..efde7c31b0014 100644 --- a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java +++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java @@ -19,15 +19,12 @@ package org.apache.pulsar.testclient; import java.io.File; -import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; -import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.concurrent.atomic.AtomicBoolean; -import junit.framework.AssertionFailedError; import org.testng.Assert; import org.testng.annotations.Test; From 875674a15814cd74544c142c7f389ee2ad5bb668 Mon Sep 17 00:00:00 2001 From: Vineeth Date: Mon, 3 Oct 2022 12:07:10 -0700 Subject: [PATCH 59/59] Add proxyServiceUrl and proxyProtocol as oprtions for PerfTool CLI --- .../pulsar/testclient/PerformanceBaseArgumentsTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java index efde7c31b0014..3071f92f9ccce 100644 --- a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java +++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceBaseArgumentsTest.java @@ -141,7 +141,7 @@ public void fillArgumentsFromProperties(Properties prop) { FileOutputStream out = new FileOutputStream(tempConfigFile); props.store(out, "properties file"); out.close(); - args.confFile = "./src/test/resources/performance_client2.conf"; + args.confFile = "./src/test/resources/performance_client3.conf"; PerfClientUtils.setExitProcedure(code -> { calledVar2.set(true); Assert.assertNotNull(code); @@ -150,7 +150,7 @@ public void fillArgumentsFromProperties(Properties prop) { } }); - args.confFile = "./src/test/resources/perf_client3.conf"; + args.confFile = "./src/test/resources/performance_client3.conf"; args.fillArgumentsFromProperties(); Assert.assertTrue(calledVar1.get()); Assert.assertTrue(calledVar2.get());