From 20e27fa57f210d46efccea0b6df229790c90eb3a Mon Sep 17 00:00:00 2001 From: April M <36110273+aimurphy@users.noreply.github.com> Date: Mon, 23 Feb 2026 12:15:27 -0800 Subject: [PATCH 1/6] local preview playbook --- local-preview-playbook.yml | 183 ++++++++++++++++++++++++++++++++++--- 1 file changed, 168 insertions(+), 15 deletions(-) diff --git a/local-preview-playbook.yml b/local-preview-playbook.yml index 1911874..eb97263 100644 --- a/local-preview-playbook.yml +++ b/local-preview-playbook.yml @@ -54,23 +54,176 @@ asciidoc: xrefstyle: short # CUSTOM ATTRIBUTES company: 'DataStax' + trust-center: 'IBM Trust Center' + trust-center-url: 'https://www.ibm.com/trust' + trust-center-link: '{trust-center-url}[{trust-center}]' + support-url: 'https://www.ibm.com/mysupport/s/' + dsbulk: 'DataStax Bulk Loader (DSBulk)' + dsbulk-short: 'DSBulk' + dsbulk-repo: 'https://github.com/datastax/dsbulk' + astra: 'Astra' + astra-db: 'Astra DB' + astra-ui: 'Astra Portal' + astra-url: 'https://astra.datastax.com' + astra-ui-link: '{astra-url}[{astra-ui}^]' + db-classic: 'Managed Cluster' + db-serverless: 'Serverless (non-vector)' + db-serverless-vector: 'Serverless (vector)' + scb: 'Secure Connect Bundle (SCB)' + scb-short: 'SCB' + scb-brief: 'Secure Connect Bundle' + devops-api: 'DevOps API' + devops-api-ref-url: 'xref:astra-api-docs:ROOT:attachment$devops-api/index.html' + astra-cli: 'Astra CLI' + astra-stream: 'Astra Streaming' + starlight-kafka: 'Starlight for Kafka' + starlight-rabbitmq: 'Starlight for RabbitMQ' + astra-streaming-examples-repo: 'https://github.com/datastax/astra-streaming-examples' + sstable-sideloader: '{astra-db} Sideloader' + zdm: 'Zero Downtime Migration' + zdm-short: 'ZDM' + zdm-proxy: 'ZDM Proxy' + cass-migrator: 'Cassandra Data Migrator (CDM)' + cass-migrator-short: 'CDM' + hcd: 'Hyper-Converged Database (HCD)' + hcd-short: 'HCD' + dse: 'DataStax Enterprise (DSE)' + dse-short: 'DSE' + metrics-collector: 'DSE Metrics Collector' + mc: 'Mission Control' + opscenter: 'DSE OpsCenter' + studio: 'DataStax Studio' + cass-reg: 'Apache Cassandra(R)' + cass: 'Apache Cassandra' + cass-short: 'Cassandra' + cql: 'Cassandra Query Language (CQL)' + cql-shell: 'CQL shell' + cql-console: 'CQL console' + cql-service: 'CQL Service' + pulsar-reg: 'Apache Pulsar(TM)' + pulsar: 'Apache Pulsar' + pulsar-short: 'Pulsar' + spark-reg: 'Apache Spark(TM)' + spark: 'Apache Spark' + spark-short: 'Spark' + spark-connect: 'Spark Connect' + spark-connector: 'Apache Cassandra Spark Connector' + spark-connector-short: 'Spark Connector' + kafka-reg: 'Apache Kafka(R)' + kafka: 'Apache Kafka' + kafka-short: 'Kafka' + kafka-connect: 'Kafka Connect' + kafka-connector: 'DataStax Apache Kafka Connector' + kafka-connector-short: 'Kafka Connector' + solr-reg: 'Apache Solr(TM)' + solr: 'Apache Solr' + solr-short: 'Solr' + lucene-reg: 'Apache Lucene(TM)' + lucene: 'Apache Lucene' + lucene-short: 'Lucene' + hadoop-reg: 'Apache Hadoop(R)' + hadoop: 'Apache Hadoop' + hadoop-short: 'Hadoop' + airflow-reg: 'Apache Airflow(R)' + airflow: 'Apache Airflow' + airflow-short: 'Airflow' + maven-reg: 'Apache Maven(TM)' + maven: 'Apache Maven' + maven-short: 'Maven' + flink-reg: 'Apache Flink(R)' + flink: 'Apache Flink' + flink-short: 'Flink' + beam-reg: 'Apache Beam(R)' + beam: 'Apache Beam' + beam-short: 'Beam' + geode-reg: 'Apache Geode(TM)' + geode: 'Apache Geode' + geode-short: 'Geode' + hbase-reg: 'Apache HBase(R)' + hbase: 'Apache HBase' + hbase-short: 'HBase' + kudu-reg: 'Apache Kudu(TM)' + kudu: 'Apache Kudu' + kudu-short: 'Kudu' + phoenix-reg: 'Apache Phoenix(TM)' + phoenix: 'Apache Phoenix' + phoenix-short: 'Phoenix' + zookeeper-reg: 'Apache ZooKeeper(TM)' + zookeeper: 'Apache ZooKeeper' + zookeeper-short: 'ZooKeeper' + asf: 'Apache Software Foundation (ASF)' + asf-short: 'ASF' + tinkerpop-reg: 'Apache TinkerPop(TM)' + tinkerpop: 'Apache TinkerPop' + tinkerpop-short: 'TinkerPop' + cloudstack-reg: 'Apache CloudStack(R)' + cloudstack: 'Apache CloudStack' + cloudstack-short: 'CloudStack' + tomcat-reg: 'Apache Tomcat(R)' + tomcat: 'Apache Tomcat' + tomcat-short: 'Tomcat' + ajp: 'Apache JServ Protocol (AJP)' + ajp-short: 'AJP' + activemq-reg: 'Apache ActiveMQ(R)' + activemq: 'Apache ActiveMQ' + activemq-short: 'ActiveMQ' + tomee-reg: 'Apache TomEE(TM)' + tomee: 'Apache TomEE' + tomee-short: 'TomEE' + bookkeeper-reg: 'Apache BookKeeper(TM)' + bookkeeper: 'Apache BookKeeper' + bookkeeper-short: 'BookKeeper' + groovy-reg: 'Apache Groovy(TM)' + groovy: 'Apache Groovy' + groovy-short: 'Groovy' + cpp-driver-url: 'https://github.com/datastax/cpp-driver' + csharp-driver-url: 'https://github.com/datastax/csharp-driver' + gocql-astra-url: 'https://github.com/datastax/gocql-astra' + go-driver-url: 'https://github.com/apache/cassandra-gocql-driver' + cql-proxy-url: 'https://github.com/datastax/cql-proxy' + java-driver-url: 'https://github.com/apache/cassandra-java-driver' + nodejs-driver-url: 'https://github.com/datastax/nodejs-driver' + python-driver-url: 'https://github.com/datastax/python-driver' + scala-driver-url: 'https://github.com/apache/cassandra-spark-connector' + cass-driver-cpp-shield: 'image:https://img.shields.io/github/v/tag/datastax/cpp-driver?label=latest[alt="Latest cpp-driver release on GitHub",link="{cpp-driver-url}/tags"]' + cass-driver-csharp-shield: 'image:https://img.shields.io/nuget/v/CassandraCSharpDriver?label=latest[alt="Latest CassandraCSharpDriver release on NuGet",link="https://www.nuget.org/packages/CassandraCSharpDriver"]' + cass-driver-go-shield: 'image:https://img.shields.io/github/v/tag/apache/cassandra-gocql-driver?label=latest%20gocql[alt="Latest gocql release on GitHub",link="{go-driver-url}/tags"]' + cass-driver-java-shield: 'image:https://img.shields.io/github/v/tag/apache/cassandra-java-driver?label=latest[alt="Latest cassandra-java-driver release on GitHub",link="{java-driver-url}/tags"]' + cass-driver-nodejs-shield: 'image:https://img.shields.io/github/v/tag/datastax/nodejs-driver?label=latest[alt="Latest nodejs-driver release on GitHub",link="{nodejs-driver-url}/tags"]' + cass-driver-python-shield: 'image:https://img.shields.io/github/v/tag/datastax/python-driver?label=latest[alt="Latest python-driver release on GitHub",link="{python-driver-url}/tags"]' + cass-driver-scala-shield: 'image:https://img.shields.io/github/v/tag/apache/cassandra-spark-connector?label=latest[alt="Latest cassandra-spark-connector release on GitHub",link="{scala-driver-url}/releases"]' + data-api: 'Data API' + csharp-client-api-ref-url: 'xref:astra-api-docs:ROOT:attachment$csharp-client' + py-client-api-ref-url-2x: 'xref:astra-api-docs:ROOT:attachment$python-client/astrapy' + ts-client-api-ref-url-2x: 'xref:astra-api-docs:ROOT:attachment$typescript-client' + java-client-api-ref-url-2x: 'xref:astra-api-docs:ROOT:attachment$java-client' + python-client-repo-url: 'https://github.com/datastax/astrapy' + typescript-client-repo-url: 'https://github.com/datastax/astra-db-ts' + typescript-client-examples-url: '{typescript-client-repo-url}/blob/v2.x/examples' + java-client-repo-url: 'https://github.com/datastax/astra-db-java' + csharp-client-repo-url: 'https://github.com/datastax/astra-db-csharp' + python-client-python-version: '3.8' + dataapi-java-client-shield: 'image:https://img.shields.io/maven-central/v/com.datastax.astra/astra-db-java.svg?label=latest[alt="Latest astra-db-java release on Maven Central",link="https://search.maven.org/artifact/com.datastax.astra/astra-db-java"]' + dataapi-python-client-shield: 'image:https://img.shields.io/github/v/tag/datastax/astrapy?label=latest[alt="Latest astrapy release on GitHub",link="{python-client-repo-url}/releases"]' + dataapi-typescript-client-shield: 'image:https://img.shields.io/github/v/tag/datastax/astra-db-ts?label=latest[alt="Latest astra-db-ts release on GitHub",link="{typescript-client-repo-url}/releases"]' + dataapi-csharp-client-shield: 'image:https://img.shields.io/github/v/tag/datastax/astra-db-csharp?label=latest[alt="Latest astra-db-csharp release on GitHub",link="{csharp-client-repo-url}/releases"]' + agent: 'DataStax Agent' + repair-service: 'Repair Service' + backup-service: 'Backup Service' + performance-service: 'Performance Service' + monitoring-service: 'OpsCenter Monitoring' + nodesync-service: 'NodeSync Service' + bestpractice-service: 'Best Practice Service' + capacity-service: 'Capacity Service' + lcm: 'Lifecycle Manager (LCM)' + lcm-short: 'LCM' + cr: 'custom resource (CR)' + cr-short: 'CR' + crd: 'custom resource definition (CRD)' + crd-short: 'CRD' + # Custom attributes only used in ragstack-ai astra_db: 'Astra DB' - astra_stream: 'Astra Streaming' astra_ui: 'Astra Portal' - astra_cli: 'Astra CLI' - astra-streaming-examples-repo: 'https://raw.githubusercontent.com/datastax/astra-streaming-examples/master' - luna-streaming-examples-repo: 'https://raw.githubusercontent.com/datastaxdevs/luna-streaming-examples/main' - support_url: 'https://www.ibm.com/mysupport/s/' - glossary-url: 'https://docs.datastax.com/en/glossary/docs/index.html#' - emoji-tada: "🎉" - emoji-rocket: "🚀" - emoji-smile: "😀" - dse: 'DataStax Enterprise (DSE)' - cassandra: 'Apache Cassandra(R)' - classic: 'classic' - classic_cap: 'Classic' - serverless: 'serverless' - serverless_cap: 'Serverless' # Antora Atlas primary-site-url: https://docs.datastax.com/en primary-site-manifest-url: https://docs.datastax.com/en/site-manifest.json From d8eb45174b591c31ab4603bd462c1beed6828d7a Mon Sep 17 00:00:00 2001 From: April M <36110273+aimurphy@users.noreply.github.com> Date: Mon, 23 Feb 2026 12:28:15 -0800 Subject: [PATCH 2/6] kafka --- antora.yml | 4 ++++ modules/ROOT/pages/faqs.adoc | 6 +++--- modules/ROOT/pages/index.adoc | 4 ++-- modules/components/pages/starlight.adoc | 2 +- modules/connectors/pages/index.adoc | 4 ++-- modules/connectors/pages/sinks/google-bigquery.adoc | 4 ++-- modules/connectors/pages/sinks/kafka.adoc | 10 +++++----- modules/connectors/pages/sources/kafka.adoc | 8 ++++---- 8 files changed, 23 insertions(+), 19 deletions(-) diff --git a/antora.yml b/antora.yml index a5c80dc..d1eb646 100644 --- a/antora.yml +++ b/antora.yml @@ -19,6 +19,10 @@ asciidoc: pulsar: 'Apache Pulsar' pulsar-short: 'Pulsar' astra-stream: 'Astra Streaming' + kafka-reg: 'Apache Kafka(R)' + kafka: 'Apache Kafka' + kafka-short: 'Kafka' + kafka-connect: 'Kafka Connect' # Required for include::common partials that are shared with Astra Streaming web-ui: 'Admin Console' diff --git a/modules/ROOT/pages/faqs.adoc b/modules/ROOT/pages/faqs.adoc index b1d01eb..24f612b 100644 --- a/modules/ROOT/pages/faqs.adoc +++ b/modules/ROOT/pages/faqs.adoc @@ -13,7 +13,7 @@ In addition to {pulsar} itself, {company} {product} provides: * An installer that can stand up a dev or production cluster on bare metal or VMs without a pre-existing Kubernetes environment * A Helm chart that can deploy and manage {pulsar-short} on your current Kubernetes infrastructure -* {cass-short}, Elastic, Kinesis, Kafka, and JDBC connectors +* {cass-short}, Elastic, Kinesis, {kafka-reg}, and JDBC connectors * A management dashboard * A monitoring and alerting system @@ -42,7 +42,7 @@ There are several public repos, each with a different purpose. See: * https://github.com/datastax/pulsar-admin-console[https://github.com/datastax/pulsar-admin-console] : This is the repo for the {pulsar-short} admin console, which allows for the configuration and monitoring of {pulsar-short}. * https://github.com/datastax/pulsar-heartbeat[https://github.com/datastax/pulsar-heartbeat] : This is a monitoring/observability tool for {pulsar-short} that tracks the health of the cluster and can generate alerts in Slack and OpsGenie. * https://github.com/datastax/pulsar-helm-chart[https://github.com/datastax/pulsar-helm-chart] : This is the Helm chart for deploying the {company} {pulsar-short} Distro in an existing Kubernetes cluster. -* https://github.com/datastax/pulsar-sink[https://github.com/datastax/pulsar-sink] : This is the {company} {pulsar} Connector (`pulsar-sink` for {cass-short}) repo. +* https://github.com/datastax/pulsar-sink[https://github.com/datastax/pulsar-sink] : This is the {company} {pulsar} Connector (`pulsar-sink`) repo. * https://github.com/datastax/burnell[https://github.com/datastax/burnell] : This is a utility for {pulsar-short} that provides various functions, such as key initialization for authentication, and JWT token creation API. == Is there a prerequisite version of Java needed for the {company} {product} installation? @@ -81,7 +81,7 @@ https://github.com/datastax/pulsar-heartbeat[{pulsar-short} Heartbeat] monitors == What are the features provided by {company} {pulsar} Connector (`pulsar-sink`) that are not supported in `kafka-sink`? -The https://pulsar.apache.org/docs/en/io-overview/[{pulsar-short} IO framework] provides many features that are not possible in Kafka, and has different compression formats and auth/security features. The features are handled by {pulsar-short}. For more, see xref:connectors:index.adoc[{product} IO Connectors]. +The https://pulsar.apache.org/docs/en/io-overview/[{pulsar-short} IO framework] provides many features that are not possible in {kafka-short}, and has different compression formats and auth/security features. The features are handled by {pulsar-short}. For more, see xref:connectors:index.adoc[{product} IO Connectors]. The {company} {pulsar} Connector allows single-record acknowledgement and negative acknowledgements. diff --git a/modules/ROOT/pages/index.adoc b/modules/ROOT/pages/index.adoc index cebc98f..1d840df 100644 --- a/modules/ROOT/pages/index.adoc +++ b/modules/ROOT/pages/index.adoc @@ -17,9 +17,9 @@ In addition to the distribution of https://pulsar.apache.org/en/versions/[{pulsa * A xref:install-upgrade:quickstart-helm-installs.adoc[Helm chart] that deploys and manages {pulsar-short} on your current CNCF-conformant Kubernetes infrastructure -* {cass-short}, Elastic, Kinesis, Kafka, and JDBC xref:connectors:index.adoc[connectors] +* {cass-short}, Elastic, Kinesis, {kafka-reg}, and JDBC xref:connectors:index.adoc[connectors] -* The xref:components:starlight.adoc[Starlight suite of {pulsar-short} protocol handlers for Kafka, RabbitMQ, and JMS] +* The xref:components:starlight.adoc[Starlight suite of {pulsar-short} protocol handlers for {kafka-short}, RabbitMQ, and JMS] * xref:components:admin-console-vm.adoc[{pulsar-short} Admin Console] for simplified administration of your {pulsar-short} environment diff --git a/modules/components/pages/starlight.adoc b/modules/components/pages/starlight.adoc index 2d16f8c..2f85434 100644 --- a/modules/components/pages/starlight.adoc +++ b/modules/components/pages/starlight.adoc @@ -11,7 +11,7 @@ The Starlight extensions are open source and included in https://www.ibm.com/doc == {starlight-kafka} -The https://github.com/datastax/starlight-for-kafka[{starlight-kafka} extension] brings native Apache Kafka(R) protocol support to {pulsar} by introducing a Kafka protocol handler on {pulsar-short} brokers. +The https://github.com/datastax/starlight-for-kafka[{starlight-kafka} extension] brings native {kafka-reg} protocol support to {pulsar} by introducing a {kafka-short} protocol handler on {pulsar-short} brokers. For more information, see the xref:starlight-for-kafka:ROOT:index.adoc[{starlight-kafka} documentation]. diff --git a/modules/connectors/pages/index.adoc b/modules/connectors/pages/index.adoc index 454921f..ab02aa0 100644 --- a/modules/connectors/pages/index.adoc +++ b/modules/connectors/pages/index.adoc @@ -19,7 +19,7 @@ Supported sink connectors:: The following sink connectors are included in {product}: + * xref:connectors:sinks/astra-db.adoc[{astra-db} and {cass} sink] -* xref:connectors:sinks/kafka.adoc[Apache Kafka sink] +* xref:connectors:sinks/kafka.adoc[{kafka-reg} sink] * xref:connectors:sinks/kinesis.adoc[AWS Kinesis sink] * xref:connectors:sinks/elastic-search.adoc[Elasticsearch sink] * xref:connectors:sinks/jdbc-clickhouse.adoc[JDBC ClickHouse sink] @@ -79,7 +79,7 @@ Source connectors ingest messages from external services into {pulsar-short} top Supported source connectors:: The following source connectors are included in {product}: + -* xref:connectors:sources/kafka.adoc[Apache Kafka source] +* xref:connectors:sources/kafka.adoc[{kafka-reg} source] * xref:connectors:sources/kinesis.adoc[AWS Kinesis source] * xref:connectors:sources/data-generator.adoc[Data Generator source] * xref:connectors:sources/debezium-mongodb.adoc[Debezium MongoDB source] diff --git a/modules/connectors/pages/sinks/google-bigquery.adoc b/modules/connectors/pages/sinks/google-bigquery.adoc index df3db75..f831f10 100644 --- a/modules/connectors/pages/sinks/google-bigquery.adoc +++ b/modules/connectors/pages/sinks/google-bigquery.adoc @@ -25,11 +25,11 @@ include::common:streaming:partial$connectors/connector-params-intro.adoc[] include::common:streaming:partial$connectors/sinks/pulsar-config-params.adoc[] [#configs] -=== Kafka Connect adaptor properties (`configs`) +=== {kafka-connect} adaptor properties (`configs`) include::common:streaming:partial$connectors/sinks/google-bigquery-properties.adoc[] [#kafkaConnectorConfigProperties] -=== Kafka Connect BigQuery Sink properties (`kafkaConnectorConfigProperties`) +=== {kafka-connect} BigQuery Sink properties (`kafkaConnectorConfigProperties`) include::common:streaming:partial$connectors/sinks/google-bigquery-sink-properties.adoc[] \ No newline at end of file diff --git a/modules/connectors/pages/sinks/kafka.adoc b/modules/connectors/pages/sinks/kafka.adoc index 1586b92..e24db2f 100644 --- a/modules/connectors/pages/sinks/kafka.adoc +++ b/modules/connectors/pages/sinks/kafka.adoc @@ -1,12 +1,12 @@ -= Kafka += {kafka-short} :connectorType: kafka -The Kafka sink connector reads messages from {pulsar-short} topics and writes them to https://kafka.apache.org/[Kafka] topics. +The {kafka-short} sink connector reads messages from {pulsar-short} topics and writes them to https://kafka.apache.org/[{kafka-reg}] topics. [#compatibility] == Compatibility -{product} supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/apache/kafka/tree/2.7[Kafka 2.7.2 library]. +{product} supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/apache/kafka/tree/2.7[{kafka} 2.7.2 library]. == Create the connector @@ -29,9 +29,9 @@ include::common:streaming:partial$connectors/connector-params-intro.adoc[] include::common:streaming:partial$connectors/sinks/pulsar-config-params.adoc[] [#configs] -=== Kafka sink connector properties (`configs`) +=== {kafka-short} sink connector properties (`configs`) Set these properties in the `configs` section of the connector configuration. -Generally, all properties provided in the https://pulsar.apache.org/docs/io-kafka-sink[OSS {pulsar} Kafka sink connector] are supported. +Generally, all properties provided in the https://pulsar.apache.org/docs/io-kafka-sink[OSS {pulsar} {kafka-short} sink connector] are supported. Exceptions include properties that aren't relevant to {product} and properties that aren't present in {pulsar} {pulsar-version}. \ No newline at end of file diff --git a/modules/connectors/pages/sources/kafka.adoc b/modules/connectors/pages/sources/kafka.adoc index 5ac9110..49598c4 100644 --- a/modules/connectors/pages/sources/kafka.adoc +++ b/modules/connectors/pages/sources/kafka.adoc @@ -1,7 +1,7 @@ -= Kafka += {kafka-short} :connectorType: kafka -The Kafka source connector pulls data from https://kafka.apache.org/[Kafka] topics and persists it to {pulsar-short} topics. +The {kafka-short} source connector pulls data from https://kafka.apache.org/[{kafka-reg}] topics and persists it to {pulsar-short} topics. == Create the connector @@ -24,9 +24,9 @@ include::common:streaming:partial$connectors/connector-params-intro.adoc[] include::common:streaming:partial$connectors/sources/pulsar-config-params.adoc[] [#configs] -=== Kafka source connector properties (`configs`) +=== {kafka-short} source connector properties (`configs`) Set these properties in the `configs` section of the connector configuration. -Generally, all properties provided in the https://pulsar.apache.org/docs/io-kafka-source[OSS {pulsar} Kafka source connector] are supported. +Generally, all properties provided in the https://pulsar.apache.org/docs/io-kafka-source[OSS {pulsar} {kafka-short} source connector] are supported. Exceptions include properties that aren't relevant to {product} and properties that aren't present in {pulsar} {pulsar-version}. \ No newline at end of file From dd4b92011e642ded8a17b329d88965cc5cdf01b5 Mon Sep 17 00:00:00 2001 From: April M <36110273+aimurphy@users.noreply.github.com> Date: Mon, 23 Feb 2026 12:37:29 -0800 Subject: [PATCH 3/6] apache attributes --- antora.yml | 5 +++++ .../components/pages/admin-console-tutorial.adoc | 2 +- modules/connectors/pages/index.adoc | 16 ++++++++-------- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/antora.yml b/antora.yml index d1eb646..16e6e6d 100644 --- a/antora.yml +++ b/antora.yml @@ -23,6 +23,11 @@ asciidoc: kafka: 'Apache Kafka' kafka-short: 'Kafka' kafka-connect: 'Kafka Connect' + geode-reg: 'Apache Geode(TM)' + hbase-reg: 'Apache HBase(R)' + kudu-reg: 'Apache Kudu(TM)' + phoenix-reg: 'Apache Phoenix(TM)' + solr-reg: 'Apache Solr(TM)' # Required for include::common partials that are shared with Astra Streaming web-ui: 'Admin Console' diff --git a/modules/components/pages/admin-console-tutorial.adoc b/modules/components/pages/admin-console-tutorial.adoc index 3c6e3ef..c074c73 100644 --- a/modules/components/pages/admin-console-tutorial.adoc +++ b/modules/components/pages/admin-console-tutorial.adoc @@ -69,7 +69,7 @@ On the {pulsar-short} Admin Console's *Code Samples* page, there are examples fo Each example shows Producer, Consumer, and Reader code, plus language-specific examples of setting project properties and dependencies. -For example, selecting Java will show you how to connect your Java project to {pulsar-short} by modifying your Maven's `pom.xml` file. +For example, selecting Java will show you how to connect your Java project to {pulsar-short} by modifying your project's `pom.xml` file. [#connect-to-pulsar] == Connecting to {pulsar-short} diff --git a/modules/connectors/pages/index.adoc b/modules/connectors/pages/index.adoc index ab02aa0..edd71e1 100644 --- a/modules/connectors/pages/index.adoc +++ b/modules/connectors/pages/index.adoc @@ -34,11 +34,11 @@ They are available in the `luna-streaming-all` version of {product}, and you can They are considered experimental because they aren't fully supported or tested with {astra-stream}. + * Aerospike -* Apache Geode -* Apache HBase -* Apache Kudu -* Apache Phoenix -* Apache Solr +* {geode-reg} +* {hbase-reg} +* {kudu-reg} +* {phoenix-reg} +* {solr-reg} * Batch Data Generator * xref:connectors:sinks/cloud-storage.adoc[Cloud Storage] * CoAP @@ -94,9 +94,9 @@ They are available in the `luna-streaming-all` version of {product}, and you can They are considered experimental because they aren't fully supported or tested with {astra-stream}. + * {cass} -* Apache Geode -* Apache Kudu -* Apache Phoenix +* {geode-reg} +* {kudu-reg} +* {phoenix-reg} * Batch Data Generator * Big Query * Canal From e63b2a1cf1d549c32da310b91365ee37ef2803fc Mon Sep 17 00:00:00 2001 From: April M <36110273+aimurphy@users.noreply.github.com> Date: Mon, 23 Feb 2026 12:57:34 -0800 Subject: [PATCH 4/6] github urls --- antora.yml | 11 ++++++ modules/ROOT/pages/faqs.adoc | 20 +++++------ .../pages/admin-console-tutorial.adoc | 2 +- .../components/pages/admin-console-vm.adoc | 4 +-- modules/components/pages/heartbeat-vm.adoc | 4 +-- modules/components/pages/pulsar-beam.adoc | 6 ++-- modules/components/pages/pulsar-monitor.adoc | 2 +- modules/connectors/pages/sinks/astra-db.adoc | 2 +- .../pages/quickstart-helm-installs.adoc | 14 ++++---- .../pages/quickstart-server-installs.adoc | 8 ++--- .../install-upgrade/pages/upgrade-guide.adoc | 36 +++++++++---------- modules/operations/pages/auth.adoc | 14 ++++---- modules/operations/pages/functions.adoc | 2 +- modules/operations/pages/scale-cluster.adoc | 4 +-- 14 files changed, 70 insertions(+), 59 deletions(-) diff --git a/antora.yml b/antora.yml index 16e6e6d..8d9a7af 100644 --- a/antora.yml +++ b/antora.yml @@ -28,6 +28,17 @@ asciidoc: kudu-reg: 'Apache Kudu(TM)' phoenix-reg: 'Apache Phoenix(TM)' solr-reg: 'Apache Solr(TM)' + pulsar-admin-console-repo: 'https://github.com/datastax/pulsar-admin-console' + pulsar-heartbeat-repo: 'https://github.com/datastax/pulsar-heartbeat' + pulsar-sink-repo: 'https://github.com/datastax/pulsar-sink' + pulsar-ansible-repo: 'https://github.com/datastax/pulsar-ansible' + pulsar-helm-chart-repo: 'https://github.com/datastax/pulsar-helm-chart' + pulsar-openid-connect-repo: 'https://github.com/datastax/pulsar-openid-connect-plugin' + pulsar-repo: 'https://github.com/datastax/pulsar' + pulsar-beam-repo: 'https://github.com/kafkaesque-io/pulsar-beam' + apache-pulsar-repo: 'https://github.com/apache/pulsar' + kaap-operator-repo: 'https://github.com/datastax/kaap' + #Bookkeeper # Required for include::common partials that are shared with Astra Streaming web-ui: 'Admin Console' diff --git a/modules/ROOT/pages/faqs.adoc b/modules/ROOT/pages/faqs.adoc index 24f612b..c407282 100644 --- a/modules/ROOT/pages/faqs.adoc +++ b/modules/ROOT/pages/faqs.adoc @@ -38,11 +38,11 @@ They include Minikube, K8d, Kind, Google Kubernetes Engine (GKE), Microsoft Azur There are several public repos, each with a different purpose. See: -* https://github.com/datastax/pulsar[https://github.com/datastax/pulsar] : This is the distro repo (a fork of apache/pulsar). -* https://github.com/datastax/pulsar-admin-console[https://github.com/datastax/pulsar-admin-console] : This is the repo for the {pulsar-short} admin console, which allows for the configuration and monitoring of {pulsar-short}. -* https://github.com/datastax/pulsar-heartbeat[https://github.com/datastax/pulsar-heartbeat] : This is a monitoring/observability tool for {pulsar-short} that tracks the health of the cluster and can generate alerts in Slack and OpsGenie. -* https://github.com/datastax/pulsar-helm-chart[https://github.com/datastax/pulsar-helm-chart] : This is the Helm chart for deploying the {company} {pulsar-short} Distro in an existing Kubernetes cluster. -* https://github.com/datastax/pulsar-sink[https://github.com/datastax/pulsar-sink] : This is the {company} {pulsar} Connector (`pulsar-sink`) repo. +* {pulsar-repo}[{pulsar-repo}] : This is the distro repo (a fork of apache/pulsar). +* {pulsar-admin-console-repo}[{pulsar-admin-console-repo}] : This is the repo for the {pulsar-short} admin console, which allows for the configuration and monitoring of {pulsar-short}. +* {pulsar-heartbeat-repo}[{pulsar-heartbeat-repo}] : This is a monitoring/observability tool for {pulsar-short} that tracks the health of the cluster and can generate alerts in Slack and OpsGenie. +* {pulsar-helm-chart-repo}[{pulsar-helm-chart-repo}] : This is the Helm chart for deploying the {company} {pulsar-short} Distro in an existing Kubernetes cluster. +* {pulsar-sink-repo}[{pulsar-sink-repo}] : This is the {company} {pulsar} Connector (`pulsar-sink`) repo. * https://github.com/datastax/burnell[https://github.com/datastax/burnell] : This is a utility for {pulsar-short} that provides various functions, such as key initialization for authentication, and JWT token creation API. == Is there a prerequisite version of Java needed for the {company} {product} installation? @@ -51,10 +51,10 @@ The {company} {product} distribution is designed for Java 17. However, because t == What are the install options for {company} {product}? -* **KAAP Helm chart (Recommended)**: Use the {company} https://github.com/datastax/kaap/[KAAP operator Helm chart], which provides Kubernetes-native autoscaling and simplified management for {pulsar} clusters. For more information, see the xref:kaap-operator::index.adoc[Kubernetes Autoscaling for {pulsar} (KAAP)] documentation. -* **{product} Helm chart**: Use the Helm chart provided at https://github.com/apache/pulsar-helm-chart[https://github.com/datastax/pulsar-helm-chart] to install {company} {product} in an existing Kubernetes cluster on your laptop or hosted by a cloud provider. -* **Tarball**: Use the tarball provided at https://github.com/datastax/pulsar/releases[https://github.com/datastax/pulsar/releases] to install {company} {product} on a server or VM. -* **Ansible**: Use the {company} Ansible scripts provided at https://github.com/datastax/pulsar-ansible[https://github.com/datastax/pulsar-ansible] to install {company} {product} on a server or VM with our provided playbooks. +* **KAAP Helm chart (Recommended)**: Use the {company} {kaap-operator-repo}[KAAP operator Helm chart], which provides Kubernetes-native autoscaling and simplified management for {pulsar} clusters. For more information, see the xref:kaap-operator::index.adoc[Kubernetes Autoscaling for {pulsar} (KAAP)] documentation. +* **{product} Helm chart**: Use the Helm chart provided at {pulsar-helm-chart-repo}[{pulsar-helm-chart-repo}] to install {company} {product} in an existing Kubernetes cluster on your laptop or hosted by a cloud provider. +* **Tarball**: Use the tarball provided at {pulsar-repo}/releases[{pulsar-repo}/releases] to install {company} {product} on a server or VM. +* **Ansible**: Use the {company} Ansible scripts provided at {pulsar-ansible-repo}[{pulsar-ansible-repo}] to install {company} {product} on a server or VM with our provided playbooks. == How do I install {company} {product} in my Kubernetes cluster? @@ -77,7 +77,7 @@ From the Admin Console, you can: == What is {pulsar-short} Heartbeat? -https://github.com/datastax/pulsar-heartbeat[{pulsar-short} Heartbeat] monitors the availability, tracks the performance, and reports failures of the {pulsar-short} cluster. It produces synthetic workloads to measure end-to-end message pubsub latency. {pulsar-short} Heartbeat is a cloud-native application that can be installed by Helm within the {pulsar-short} Kubernetes cluster. +{pulsar-heartbeat-repo}[{pulsar-short} Heartbeat] monitors the availability, tracks the performance, and reports failures of the {pulsar-short} cluster. It produces synthetic workloads to measure end-to-end message pubsub latency. {pulsar-short} Heartbeat is a cloud-native application that can be installed by Helm within the {pulsar-short} Kubernetes cluster. == What are the features provided by {company} {pulsar} Connector (`pulsar-sink`) that are not supported in `kafka-sink`? diff --git a/modules/components/pages/admin-console-tutorial.adoc b/modules/components/pages/admin-console-tutorial.adoc index c074c73..bf615d8 100644 --- a/modules/components/pages/admin-console-tutorial.adoc +++ b/modules/components/pages/admin-console-tutorial.adoc @@ -145,4 +145,4 @@ Alternatively, you can save the URL authentication parameters in your `client.co == Next steps -For more on building and running a standalone {pulsar-short} Admin console, see the xref:admin-console-vm.adoc[Admin Console on Server/VM] or the {pulsar-short} Admin console repo https://github.com/datastax/pulsar-admin-console#dev[readme]. \ No newline at end of file +For more on building and running a standalone {pulsar-short} Admin console, see the xref:admin-console-vm.adoc[Admin Console on Server/VM] or the {pulsar-short} Admin console repo {pulsar-admin-console-repo}#dev[readme]. \ No newline at end of file diff --git a/modules/components/pages/admin-console-vm.adoc b/modules/components/pages/admin-console-vm.adoc index 66e88e0..97fadd8 100644 --- a/modules/components/pages/admin-console-vm.adoc +++ b/modules/components/pages/admin-console-vm.adoc @@ -15,13 +15,13 @@ wget https://nodejs.org/dist/v14.18.3/node-v14.18.3-linux-x64.tar.xz / tar -xf node-v14.18.3-linux-x64.tar.xz ---- -. Download and install the {pulsar-short} Admin console tarball to the VM. You can find the most recent {pulsar-short} Admin Console release https://github.com/datastax/pulsar-admin-console/releases[here]. +. Download and install the {pulsar-short} Admin console tarball to the VM. You can find the most recent {pulsar-short} Admin Console release {pulsar-admin-console-repo}/releases[here]. + The tarball is also available with `wget`: + [source,bash,subs="attributes+"] ---- -wget https://github.com/datastax/pulsar-admin-console/releases/download/{admin-console-version}/pulsar-admin-console-2.2.0.tar.gz +wget {pulsar-admin-console-repo}/releases/download/{admin-console-version}/pulsar-admin-console-2.2.0.tar.gz ---- . Extract the tarball: diff --git a/modules/components/pages/heartbeat-vm.adoc b/modules/components/pages/heartbeat-vm.adoc index 039f878..ba1b12e 100644 --- a/modules/components/pages/heartbeat-vm.adoc +++ b/modules/components/pages/heartbeat-vm.adoc @@ -8,7 +8,7 @@ This document describes how to install {pulsar-short} Heartbeat on a virtual mac + For example, `uname -m` in Ubuntu might return `x86_64`, and `uname -o` returns `GNU/Linux`. -. Download the heartbeat binary `.gz` file matching your OS and process architecture from the https://github.com/datastax/pulsar-heartbeat/releases[releases page]. +. Download the heartbeat binary `.gz` file matching your OS and process architecture from the {pulsar-heartbeat-repo}/releases[releases page]. . Uncompress the file to be an executable binary. The filename structure is `pulsar-heartbeat---`. + @@ -23,7 +23,7 @@ ls ~/Downloads/pulsar-heartbeat-{heartbeat-version}-linux-amd64 == Execute Heartbeat binary -The {pulsar-short} Heartbeat configuration is defined by a `.yaml` file. A yaml template for Heartbeat is available at https://github.com/datastax/pulsar-heartbeat/blob/master/config/runtime-template.yml[]. In this file, the environmental variable `PULSAR_OPS_MONITOR_CFG` tells the application where to source the file. +The {pulsar-short} Heartbeat configuration is defined by a `.yaml` file. A yaml template for Heartbeat is available at {pulsar-heartbeat-repo}/blob/master/config/runtime-template.yml[]. In this file, the environmental variable `PULSAR_OPS_MONITOR_CFG` tells the application where to source the file. Run the binary file `pulsar-heartbeat---`. diff --git a/modules/components/pages/pulsar-beam.adoc b/modules/components/pages/pulsar-beam.adoc index ac6c9a3..9426268 100644 --- a/modules/components/pages/pulsar-beam.adoc +++ b/modules/components/pages/pulsar-beam.adoc @@ -3,7 +3,7 @@ :description: Install a minimal {product} Helm chart that includes {pulsar-short} Beam :helmValuesPath: https://raw.githubusercontent.com/datastaxdevs/luna-streaming-examples/main/beam/values.yaml -The https://github.com/kafkaesque-io/pulsar-beam[{pulsar-short} Beam] project is an HTTP-based streaming and queueing system for use with {pulsar}. +The {pulsar-beam-repo}[{pulsar-short} Beam] project is an HTTP-based streaming and queueing system for use with {pulsar}. With {pulsar-short} Beam, you can send messages over HTTP, push messages to a webhook or cloud function, chain webhooks and functions together, or stream messages through server-sent events (SSE). @@ -75,7 +75,7 @@ id: {9 0 0 0 0xc002287ad0} data: Hi there ---- -You have now completed the basics of using Beam in a {pulsar-short} Cluster. Refer to the project's https://github.com/kafkaesque-io/pulsar-beam/blob/master/README.md[readme] to see all the possibilities! +You have now completed the basics of using Beam in a {pulsar-short} Cluster. Refer to the project's {pulsar-beam-repo}/blob/master/README.md[readme] to see all the possibilities! == A Python producer and consumer @@ -156,6 +156,6 @@ include::partial$cleanup-terminal-and-helm.adoc[] Here are links to resources and guides you might be interested in: -* https://github.com/kafkaesque-io/pulsar-beam[Learn more] about the {pulsar-short} Beam project +* {pulsar-beam-repo}[Learn more] about the {pulsar-short} Beam project * https://kafkaesque-io.github.io/pulsar-beam-swagger[{pulsar-short} Beam API] * xref:pulsar-sql.adoc[] \ No newline at end of file diff --git a/modules/components/pages/pulsar-monitor.adoc b/modules/components/pages/pulsar-monitor.adoc index 767d226..07b0abe 100644 --- a/modules/components/pages/pulsar-monitor.adoc +++ b/modules/components/pages/pulsar-monitor.adoc @@ -26,7 +26,7 @@ TIP: {pulsar-short} Heartbeat is installed automatically for server/VM installat * A command line argument `./pulsar-heartbeat -config /path/to/runtime.yml` * A default path to `../config/runtime.yml` -You can download a template https://github.com/datastax/pulsar-heartbeat/blob/master/config/runtime-template.yml[here]. +You can download a template {pulsar-heartbeat-repo}/blob/master/config/runtime-template.yml[here]. == Observability diff --git a/modules/connectors/pages/sinks/astra-db.adoc b/modules/connectors/pages/sinks/astra-db.adoc index b98105e..8388eaa 100644 --- a/modules/connectors/pages/sinks/astra-db.adoc +++ b/modules/connectors/pages/sinks/astra-db.adoc @@ -44,4 +44,4 @@ include::common:streaming:partial$connectors/sinks/astra-db-topic.adoc[] == See also -* https://github.com/datastax/pulsar-sink[{company} {pulsar} connector GitHub repository] \ No newline at end of file +* {pulsar-sink-repo}[{company} {pulsar} connector GitHub repository] \ No newline at end of file diff --git a/modules/install-upgrade/pages/quickstart-helm-installs.adoc b/modules/install-upgrade/pages/quickstart-helm-installs.adoc index 0a41fb4..0ac3a19 100644 --- a/modules/install-upgrade/pages/quickstart-helm-installs.adoc +++ b/modules/install-upgrade/pages/quickstart-helm-installs.adoc @@ -2,14 +2,14 @@ [IMPORTANT] ==== -The {company} production-ready Helm chart is now deprecated. For new deployments, we recommend using the {company} https://github.com/datastax/kaap/[KAAP operator Helm chart], which provides Kubernetes-native autoscaling and simplified management for {pulsar} clusters. For more information, see the xref:kaap-operator::index.adoc[Kubernetes Autoscaling for {pulsar} (KAAP)] documentation. +The {company} production-ready Helm chart is now deprecated. For new deployments, we recommend using the {company} {kaap-operator-repo}[KAAP operator Helm chart], which provides Kubernetes-native autoscaling and simplified management for {pulsar} clusters. For more information, see the xref:kaap-operator::index.adoc[Kubernetes Autoscaling for {pulsar} (KAAP)] documentation. ==== You have options for installing *{company} {product}*: * With the provided *{company} Helm chart* for an existing Kubernetes environment locally or with a cloud provider, as covered in this topic. * With the *{company} {product} tarball* for deployment to a single server/VM, or to multiple servers/VMs. See xref:install-upgrade:quickstart-server-installs.adoc[Quick Start for Server/VM installs]. -* With the *{company} Ansible scripts* provided at https://github.com/datastax/pulsar-ansible[https://github.com/datastax/pulsar-ansible]. +* With the *{company} Ansible scripts* provided at {pulsar-ansible-repo}[{pulsar-ansible-repo}]. The Helm chart and options described below configure an {pulsar} cluster. It is designed for production use, but can also be used in local development environments with the proper settings. @@ -132,7 +132,7 @@ There are two ways to override `default_storage`: replication-type: none ---- -* Create a custom storage configuration as a `yaml` file (https://github.com/datastax/pulsar-helm-chart/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the BookKeeper PVCs. +* Create a custom storage configuration as a `yaml` file ({pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the BookKeeper PVCs. + [source,yaml] ---- @@ -240,7 +240,7 @@ helm repo update curl -LOs https://datastax.github.io/pulsar-helm-chart/examples/dev-values.yaml ---- -The `dev-values.yaml` file can be viewed (https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values.yaml[here]). +The `dev-values.yaml` file can be viewed ({pulsar-helm-chart-repo}/blob/master/examples/dev-values.yaml[here]). To list the version of the chart in the local Helm repository: @@ -320,9 +320,9 @@ kubectl port-forward -n pulsar $(kubectl get pods -n pulsar -l app.kubernetes.io == Example configurations -There are several example configurations in the https://github.com/datastax/pulsar-helm-chart/blob/master/examples[examples] directory: +There are several example configurations in the {pulsar-helm-chart-repo}/blob/master/examples[examples] directory: -* https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values.yaml[dev-values.yaml example file]. A configuration for setting up a development environment to run in a local Kubernetes environment (for example, https://minikube.sigs.k8s.io/docs/start/[minikube], or https://kind.sigs.k8s.io/[kind]). Message/state persistence, redundancy, authentication, and TLS are disabled. +* {pulsar-helm-chart-repo}/blob/master/examples/dev-values.yaml[dev-values.yaml example file]. A configuration for setting up a development environment to run in a local Kubernetes environment (for example, https://minikube.sigs.k8s.io/docs/start/[minikube], or https://kind.sigs.k8s.io/[kind]). Message/state persistence, redundancy, authentication, and TLS are disabled. + TIP: With message/state persistence disabled, the cluster will not survive a restart of the ZooKeeper or BookKeeper. @@ -493,7 +493,7 @@ This is useful if you are using a self-signed certificate. For automated handling of publicly signed certificates, you can use a tool such as https://cert-mananager[cert-manager]. -For more information, see https://github.com/datastax/pulsar-helm-chart/blob/master/aws-customer-docs.md[Using Cert-Manager for {pulsar-short} Certificates in AWS]. +For more information, see {pulsar-helm-chart-repo}/blob/master/aws-customer-docs.md[Using Cert-Manager for {pulsar-short} Certificates in AWS]. Once you have created the secrets that store the certificate info (or specified it in the values), you can enable TLS in the values: diff --git a/modules/install-upgrade/pages/quickstart-server-installs.adoc b/modules/install-upgrade/pages/quickstart-server-installs.adoc index a1fa8dd..8734f89 100644 --- a/modules/install-upgrade/pages/quickstart-server-installs.adoc +++ b/modules/install-upgrade/pages/quickstart-server-installs.adoc @@ -41,7 +41,7 @@ Check this setting with `cat /sys/kernel/mm/transparent_hugepage/enabled` and `c [#install] == Installation -. Download the {company} {product} tarball from the https://github.com/datastax/pulsar/releases[{company} GitHub repo]. There are three versions of {product} currently available: +. Download the {company} {product} tarball from the {pulsar-repo}/releases[{company} GitHub repo]. There are three versions of {product} currently available: + [cols="1,1"] [%autowidth] @@ -93,12 +93,12 @@ You have successfully installed the {company} {product} tarball. Once the {company} {product} tarball is installed, you may want to add additional tooling to your server/VM deployment. * *{pulsar-short} Admin Console:* Web-based UI that administrates {pulsar-short}. -Download the latest version from the https://github.com/datastax/pulsar-admin-console[{company} GitHub repo] and follow the instructions xref:components:admin-console-vm.adoc[here]. +Download the latest version from the {pulsar-admin-console-repo}[{company} GitHub repo] and follow the instructions xref:components:admin-console-vm.adoc[here]. + Admin Console requires https://nodejs.org/download/release/latest-v14.x/[NodeJS 14 LTS] and Nginx version 1.17.9+. * *{pulsar-short} Heartbeat:* Monitors {pulsar-short} cluster availability. -Download the latest version from the https://github.com/datastax/pulsar-heartbeat/releases/[{company} GitHub repo] and follow the instructions xref:components:heartbeat-vm.adoc[here]. +Download the latest version from the {pulsar-heartbeat-repo}/releases/[{company} GitHub repo] and follow the instructions xref:components:heartbeat-vm.adoc[here]. == Next steps @@ -108,4 +108,4 @@ Download the latest version from the https://github.com/datastax/pulsar-heartbea * For installation to existing Kubernetes environments or with a cloud provider, see xref:install-upgrade:quickstart-helm-installs.adoc[Quick Start for Helm Chart installs]. -* For Ansible deployment, use the {company} Ansible scripts provided at https://github.com/datastax/pulsar-ansible[https://github.com/datastax/pulsar-ansible]. \ No newline at end of file +* For Ansible deployment, use the {company} Ansible scripts provided at {pulsar-ansible-repo}[{pulsar-ansible-repo}]. \ No newline at end of file diff --git a/modules/install-upgrade/pages/upgrade-guide.adoc b/modules/install-upgrade/pages/upgrade-guide.adoc index 807f825..09ea450 100644 --- a/modules/install-upgrade/pages/upgrade-guide.adoc +++ b/modules/install-upgrade/pages/upgrade-guide.adoc @@ -115,12 +115,12 @@ Prometheus metrics have been updated in {product} 3.1. The following PRs were merged to update metrics: -* https://github.com/apache/pulsar/pull/13785[#13785 - Bump prometheus client version from 0.5.0 to 0.15.0] -* https://github.com/apache/pulsar/pull/16581[#16581 - Rename {pulsar-short} txn metrics to specify OpenMetrics] -* https://github.com/apache/pulsar/pull/16610[#16610 - Rename {pulsar-short} schema metrics to specify OpenMetrics] -* https://github.com/apache/pulsar/pull/16611[#16611 - Rename {pulsar-short} lb metrics to specify OpenMetrics] -* https://github.com/apache/pulsar/pull/16591[#16591 - Bump prometheus client version from 0.15.0 to 0.16.0] -* https://github.com/apache/pulsar/pull/17419[#17419 - Removed timestamp from all prometheus metrics.] +* {apache-pulsar-repo}/pull/13785[#13785 - Bump prometheus client version from 0.5.0 to 0.15.0] +* {apache-pulsar-repo}/pull/16581[#16581 - Rename {pulsar-short} txn metrics to specify OpenMetrics] +* {apache-pulsar-repo}/pull/16610[#16610 - Rename {pulsar-short} schema metrics to specify OpenMetrics] +* {apache-pulsar-repo}/pull/16611[#16611 - Rename {pulsar-short} lb metrics to specify OpenMetrics] +* {apache-pulsar-repo}/pull/16591[#16591 - Bump prometheus client version from 0.15.0 to 0.16.0] +* {apache-pulsar-repo}/pull/17419[#17419 - Removed timestamp from all prometheus metrics.] === Other functional impacts @@ -131,31 +131,31 @@ The following PRs were merged in {product} 3.1 that may impact your deployment's |=== |PR Link |Title |Functional Impact -|https://github.com/apache/pulsar/pull/19180[#19180] +|{apache-pulsar-repo}/pull/19180[#19180] |Deprecate blocking AuthorizationService, AuthorizationProvider methods |This will affect the public API for the AuthorizationService and the AuthorizationProvider, which only impacts users that are running custom code inside the {pulsar-short} Broker -|https://github.com/apache/pulsar/pull/19182[#19182] +|{apache-pulsar-repo}/pull/19182[#19182] |Remove AuthorizationProvider methods deprecated in 2.7 and 2.9 |Removing deprecated methods allowTenantOperationAsync, allowTenantOperation, allowNamespaceOperationAsync, allowNamespaceOperation, allowNamespacePolicyOperationAsync, allowNamespacePolicyOperation, allowTopicOperationAsync, allowTopicOperation. These methods could be used by third party extensions -|https://github.com/apache/pulsar/pull/19197[#19197] +|{apache-pulsar-repo}/pull/19197[#19197] |Update AuthenticationProvider to simplify HTTP Authn |This changes the public API within the broker as some methods are marked as @Deprecated -|https://github.com/apache/pulsar/pull/19295[#19295] +|{apache-pulsar-repo}/pull/19295[#19295] |OneStageAuth State: move authn out of constructor |This could break 3rd party plugins in the broker if they were relying on authentication to happen in the constructor. In order to make those implementations fail fast, this PR includes a change to throw an exception when the getAuthRole is called without first calling authenticateAsync or authenticate. That makes these changes semi-backwards compatible. -|https://github.com/apache/pulsar/pull/19314[#19314] +|{apache-pulsar-repo}/pull/19314[#19314] |TokenAuthenticationState: authenticate token only once |In a sense, this breaks an implicit contract that the class had. However, because the getAuthRole() method will throw an exception if called incorrectly, it is likely that misuse of this class will result in a fail fast behavior. -|https://github.com/apache/pulsar/pull/19455[#19455] +|{apache-pulsar-repo}/pull/19455[#19455] |Require authRole is proxyRole to set originalPrincipal |This change affects the binary protocol's usage without changing the binary protocol itself. Upgrading existing proxies will not work if the proxyRoles is not correctly configured in the broker.conf. -|https://github.com/apache/pulsar/pull/19486[#19486] +|{apache-pulsar-repo}/pull/19486[#19486] |Remove default 30s ackTimeout when setting DLQ policy on java consumer |Removed setting default ackTimeoutMillis in java ConsumerBuilder when a deadLetterPolicy is set. It has to be specified exclusively to use. |=== @@ -166,7 +166,7 @@ This section describes changes in {product} 3.1 that may impact your deployment' === Configuration values removed in 3.1 -* https://github.com/apache/pulsar/pull/14506[PR #14506] removes `managedLedgerNumWorkerThreads`. +* {apache-pulsar-repo}/pull/14506[PR #14506] removes `managedLedgerNumWorkerThreads`. The `MetadataStore` instance is now passed from the `PulsarService` directly to the `ManagedLedgerFactory`. * The {pulsar-short} SQL `conf/presto` directory has been removed. @@ -227,7 +227,7 @@ The {pulsar-short} server module's `javac` release version is `17`. Client and client-server shared modules remain at the target Java 8 release. -This modification is described in detail in https://github.com/apache/pulsar/pull/15207[PIP-156]. +This modification is described in detail in {apache-pulsar-repo}/pull/15207[PIP-156]. === Python 2 support removed @@ -239,7 +239,7 @@ The build image is updated to use `ubuntu:20.04`, as there is no Python 3.7 supp Executable scripts have been updated to invoke `python3` instead of `python`. -This modification is described in detail in https://github.com/apache/pulsar/pull/15376[PIP-155] +This modification is described in detail in {apache-pulsar-repo}/pull/15376[PIP-155] == Known issues @@ -255,7 +255,7 @@ This section describes known issues encountered when upgrading to {product} 3.1. All other components such as Broker, Proxy, and Functions Worker can be downgraded at any time. -For more information, see https://github.com/apache/pulsar/issues/22051[Issue 22051]. +For more information, see {apache-pulsar-repo}/issues/22051[Issue 22051]. == Upgrade procedure @@ -350,7 +350,7 @@ kubectl logs *POD-NAME* -n *NAMESPACE* === Upgrade Kubernetes deployment with Helm chart -The Helm chart for {product} is available in the https://github.com/datastax/pulsar-helm-chart/blob/master/helm-chart-sources/pulsar/values.yaml[Helm chart sources] repository. +The Helm chart for {product} is available in the {pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/values.yaml[Helm chart sources] repository. . To prevent data loss, back up your existing {pulsar-short} data and configuration files. . To save your current Helm release configuration, run the following command: diff --git a/modules/operations/pages/auth.adoc b/modules/operations/pages/auth.adoc index 32f1ac8..f35ca2a 100644 --- a/modules/operations/pages/auth.adoc +++ b/modules/operations/pages/auth.adoc @@ -54,7 +54,7 @@ kubectl create secret tls --key --cert The resulting secret will be of type `kubernetes.io/tls`. The key should *not* be in `PKCS 8` format, even though that is the format used by {pulsar-short}. The `kubernetes.io/tls` format will be converted by the chart to `PKCS 8`. -If you have a self-signed certificate, manually specify the certificate information directly in https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values-keycloak-auth.yaml[values]: +If you have a self-signed certificate, manually specify the certificate information directly in {pulsar-helm-chart-repo}/blob/master/examples/dev-values-keycloak-auth.yaml[values]: [source,yaml] ---- @@ -64,17 +64,17 @@ If you have a self-signed certificate, manually specify the certificate informat # caCertificate: | ---- -Once you have created the secrets that store the certificate info (or manually specified it in https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values-keycloak-auth.yaml[values]), enable TLS in the values: +Once you have created the secrets that store the certificate info (or manually specified it in {pulsar-helm-chart-repo}/blob/master/examples/dev-values-keycloak-auth.yaml[values]), enable TLS in the values: `enableTls: yes` == Token Authentication via Keycloak Integration -{company} created the https://github.com/datastax/pulsar-openid-connect-plugin[{pulsar-short} OpenID Connect Authentication Plugin] to provide a more dynamic authentication option for {pulsar-short}. This plugin integrates with any OpenID Connect-compliant identity provider to dynamically retrieve public keys for token validation. This dynamic public key retrieval enables support for key rotation and multiple authentication/identity providers by configuring multiple allowed token issuers. It also means that token secret keys will *not* be stored in Kubernetes secrets. +{company} created the {pulsar-openid-connect-repo}[{pulsar-short} OpenID Connect Authentication Plugin] to provide a more dynamic authentication option for {pulsar-short}. This plugin integrates with any OpenID Connect-compliant identity provider to dynamically retrieve public keys for token validation. This dynamic public key retrieval enables support for key rotation and multiple authentication/identity providers by configuring multiple allowed token issuers. It also means that token secret keys will *not* be stored in Kubernetes secrets. -In order to simplify deployment for {pulsar-short} cluster components, the plugin provides the option to use Keycloak in conjunction with {pulsar-short}'s basic token based authentication. For more, see https://github.com/datastax/pulsar-openid-connect-plugin[{pulsar-short} OpenID Connect Authentication Plugin]. +In order to simplify deployment for {pulsar-short} cluster components, the plugin provides the option to use Keycloak in conjunction with {pulsar-short}'s basic token based authentication. For more, see {pulsar-openid-connect-repo}[{pulsar-short} OpenID Connect Authentication Plugin]. -See the example https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values-keycloak-auth.yaml[Keycloak Helm chart] for deploying a working cluster that integrates with Keycloak. By default, the Helm chart creates a {pulsar-short} realm within Keycloak and sets up the client used by the {pulsar-short} Admin Console as well as a sample client and some sample groups. The configuration for the broker side auth plugin should be placed in the `.Values..configData` maps. +See the example {pulsar-helm-chart-repo}/blob/master/examples/dev-values-keycloak-auth.yaml[Keycloak Helm chart] for deploying a working cluster that integrates with Keycloak. By default, the Helm chart creates a {pulsar-short} realm within Keycloak and sets up the client used by the {pulsar-short} Admin Console as well as a sample client and some sample groups. The configuration for the broker side auth plugin should be placed in the `.Values..configData` maps. === Configuring Keycloak for Token Generation @@ -85,7 +85,7 @@ See the example https://github.com/datastax/pulsar-helm-chart/blob/master/exampl helm install test --values ../../examples/dev-values-keycloak-auth.yaml ---- -. Ensure your deployment name matches the working cluster's name. The name of the deployment is *very important* for a working cluster. The https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values-keycloak-auth.yaml[values] file assumes that the cluster's name is `test`. +. Ensure your deployment name matches the working cluster's name. The name of the deployment is *very important* for a working cluster. The {pulsar-helm-chart-repo}/blob/master/examples/dev-values-keycloak-auth.yaml[values] file assumes that the cluster's name is `test`. . Once the cluster is operational, port forward to Keycloak: + @@ -106,7 +106,7 @@ keycloak: . Navigate to `localhost:8080` in a browser and view the {pulsar-short} realm in the Keycloak UI. Note that the realm name must match the configured realm name (`.Values.keycloak.realm`) for the OpenID Connect plugin to work properly. -The OpenID Connect plugin uses the `sub` (subject) claim from the JWT as the role used for authorization within {pulsar-short}. To get Keycloak to generate the JWT for a client with the right `sub`, create a special "mapper" that is a "Hardcoded claim" mapping claim name sub to a claim value that is the desired role, like `superuser`. The default config installed by https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values-keycloak-auth.yaml[this Helm chart] provides examples of how to add custom mapper protocols to clients. +The OpenID Connect plugin uses the `sub` (subject) claim from the JWT as the role used for authorization within {pulsar-short}. To get Keycloak to generate the JWT for a client with the right `sub`, create a special "mapper" that is a "Hardcoded claim" mapping claim name sub to a claim value that is the desired role, like `superuser`. The default config installed by {pulsar-helm-chart-repo}/blob/master/examples/dev-values-keycloak-auth.yaml[this Helm chart] provides examples of how to add custom mapper protocols to clients. === Retrieving and using a token from Keycloak with {pulsar-short} Admin CLI diff --git a/modules/operations/pages/functions.adoc b/modules/operations/pages/functions.adoc index 808e89a..6fc1c11 100644 --- a/modules/operations/pages/functions.adoc +++ b/modules/operations/pages/functions.adoc @@ -10,7 +10,7 @@ The function automatically runs for each message published to the specified inpu Functions are implemented using https://pulsar.apache.org/docs/en/functions-overview/[{pulsar-reg} functions]. -https://github.com/datastax/pulsar[{product} 2.10 or later] is required to deploy custom functions in {pulsar-short}. +{pulsar-repo}[{product} 2.10 or later] is required to deploy custom functions in {pulsar-short}. == Manage functions using {pulsar-short} Admin CLI diff --git a/modules/operations/pages/scale-cluster.adoc b/modules/operations/pages/scale-cluster.adoc index 25a27c4..45c8c6d 100644 --- a/modules/operations/pages/scale-cluster.adoc +++ b/modules/operations/pages/scale-cluster.adoc @@ -6,9 +6,9 @@ include::operations:partial$operator-scaling.adoc[] == Install {pulsar-short} cluster -For our {pulsar-short} cluster installation, use this https://github.com/datastax/pulsar-helm-chart[Helm chart]. +For our {pulsar-short} cluster installation, use this {pulsar-helm-chart-repo}[Helm chart]. -To start the cluster, use the values provided in this https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values.yaml[YAML file]: +To start the cluster, use the values provided in this {pulsar-helm-chart-repo}/blob/master/examples/dev-values.yaml[YAML file]: [source,bash] ---- From 62c569599ffd2304e9e24eb76019788f262a58d8 Mon Sep 17 00:00:00 2001 From: April M <36110273+aimurphy@users.noreply.github.com> Date: Mon, 23 Feb 2026 13:38:52 -0800 Subject: [PATCH 5/6] bookkeeper, zookeeper, kaap --- antora.yml | 11 ++++- modules/ROOT/pages/faqs.adoc | 2 +- .../components/pages/admin-console-vm.adoc | 2 +- modules/components/pages/pulsar-beam.adoc | 28 ++++++------- modules/components/pages/pulsar-monitor.adoc | 6 +-- modules/components/pages/pulsar-sql.adoc | 2 +- .../pages/cluster-sizing-reference.adoc | 32 +++++++-------- .../pages/production-cluster-sizing.adoc | 41 +++++++++---------- .../pages/quickstart-helm-installs.adoc | 30 +++++++------- .../pages/quickstart-server-installs.adoc | 8 ++-- .../install-upgrade/pages/upgrade-guide.adoc | 19 +++++---- modules/operations/pages/scale-cluster.adoc | 2 +- .../operations/partials/operator-scaling.adoc | 2 +- 13 files changed, 98 insertions(+), 87 deletions(-) diff --git a/antora.yml b/antora.yml index 8d9a7af..77c4734 100644 --- a/antora.yml +++ b/antora.yml @@ -18,6 +18,9 @@ asciidoc: pulsar-reg: 'Apache Pulsar(TM)' pulsar: 'Apache Pulsar' pulsar-short: 'Pulsar' + pulsar-beam: 'Pulsar Beam' + kaap-short: 'KAAP Operator' + kaap: 'Kubernetes Autoscaling for Apache Pulsar (KAAP)' astra-stream: 'Astra Streaming' kafka-reg: 'Apache Kafka(R)' kafka: 'Apache Kafka' @@ -38,7 +41,13 @@ asciidoc: pulsar-beam-repo: 'https://github.com/kafkaesque-io/pulsar-beam' apache-pulsar-repo: 'https://github.com/apache/pulsar' kaap-operator-repo: 'https://github.com/datastax/kaap' - #Bookkeeper + bookkeeper-reg: 'Apache BookKeeper(TM)' + bookkeeper: 'Apache BookKeeper' + bookkeeper-short: 'BookKeeper' + zookeeper-reg: 'Apache ZooKeeper(TM)' + zookeeper: 'Apache ZooKeeper' + zookeeper-short: 'ZooKeeper' + crd: 'custom resource definition (CRD)' # Required for include::common partials that are shared with Astra Streaming web-ui: 'Admin Console' diff --git a/modules/ROOT/pages/faqs.adoc b/modules/ROOT/pages/faqs.adoc index c407282..a02beef 100644 --- a/modules/ROOT/pages/faqs.adoc +++ b/modules/ROOT/pages/faqs.adoc @@ -51,7 +51,7 @@ The {company} {product} distribution is designed for Java 17. However, because t == What are the install options for {company} {product}? -* **KAAP Helm chart (Recommended)**: Use the {company} {kaap-operator-repo}[KAAP operator Helm chart], which provides Kubernetes-native autoscaling and simplified management for {pulsar} clusters. For more information, see the xref:kaap-operator::index.adoc[Kubernetes Autoscaling for {pulsar} (KAAP)] documentation. +* **{kaap-short} Helm chart (Recommended)**: Use the {company} {kaap-operator-repo}[{kaap-short} Helm chart], which provides Kubernetes-native autoscaling and simplified management for {pulsar} clusters. For more information, see the xref:kaap-operator::index.adoc[{kaap}] documentation. * **{product} Helm chart**: Use the Helm chart provided at {pulsar-helm-chart-repo}[{pulsar-helm-chart-repo}] to install {company} {product} in an existing Kubernetes cluster on your laptop or hosted by a cloud provider. * **Tarball**: Use the tarball provided at {pulsar-repo}/releases[{pulsar-repo}/releases] to install {company} {product} on a server or VM. * **Ansible**: Use the {company} Ansible scripts provided at {pulsar-ansible-repo}[{pulsar-ansible-repo}] to install {company} {product} on a server or VM with our provided playbooks. diff --git a/modules/components/pages/admin-console-vm.adoc b/modules/components/pages/admin-console-vm.adoc index 97fadd8..09ad2ba 100644 --- a/modules/components/pages/admin-console-vm.adoc +++ b/modules/components/pages/admin-console-vm.adoc @@ -78,7 +78,7 @@ These values can be modified in the JSON configuration file. | cluster_name | standalone | Name of {pulsar-short} cluster connecting to. The cluster name can be retrieved with the CLI command `pulsar-admin clusters list`. | functions_disabled | false | If functions are not enabled in the cluster, disable the function sections (Functions, Sinks, Sources). | grafana_url | | If `render_monitoring_tab` is enabled, URL for Grafana. -| host_overrides.http | \http://localhost:8964 | URL to display in console to connect to {pulsar-short} Beam HTTP proxy. +| host_overrides.http | \http://localhost:8964 | URL to display in console to connect to {pulsar-beam} HTTP proxy. | host_overrides.pulsar | \http://localhost:6650 | URL to display in console to connect to {pulsar-short}. | host_overrides.ws | //localhost:8080 | URL to display in console to connect to WebSocket proxy. | notice_text | | Custom notice to appear at top of console. diff --git a/modules/components/pages/pulsar-beam.adoc b/modules/components/pages/pulsar-beam.adoc index 9426268..fb1f509 100644 --- a/modules/components/pages/pulsar-beam.adoc +++ b/modules/components/pages/pulsar-beam.adoc @@ -1,13 +1,13 @@ -= {pulsar-short} Beam with {product} -:navtitle: {pulsar-short} Beam -:description: Install a minimal {product} Helm chart that includes {pulsar-short} Beam += {pulsar-beam} with {product} +:navtitle: {pulsar-beam} +:description: Install a minimal {product} Helm chart that includes {pulsar-beam} :helmValuesPath: https://raw.githubusercontent.com/datastaxdevs/luna-streaming-examples/main/beam/values.yaml -The {pulsar-beam-repo}[{pulsar-short} Beam] project is an HTTP-based streaming and queueing system for use with {pulsar}. +The {pulsar-beam-repo}[{pulsar-beam}] project is an HTTP-based streaming and queueing system for use with {pulsar}. -With {pulsar-short} Beam, you can send messages over HTTP, push messages to a webhook or cloud function, chain webhooks and functions together, or stream messages through server-sent events (SSE). +With {pulsar-beam}, you can send messages over HTTP, push messages to a webhook or cloud function, chain webhooks and functions together, or stream messages through server-sent events (SSE). -In this guide, you'll install a minimal {company} {pulsar-short} Helm chart that includes {pulsar-short} Beam. +In this guide, you'll install a minimal {company} {pulsar-short} Helm chart that includes {pulsar-beam}. == Prerequisites @@ -21,15 +21,15 @@ include::partial$install-helm.adoc[] == Forward service port -In a separate terminal window, port forward the Beam endpoint service: +In a separate terminal window, port forward the {pulsar-beam} endpoint service: [source,shell] ---- kubectl port-forward -n datastax-pulsar service/pulsar-proxy 8085:8085 ---- -The forwarding service will map the URL:PORT https://127.0.0.1:8085 to {pulsar-short} Proxy running in the new cluster. -Because Beam was enabled, the Proxy knows to forward on to the Beam service. +The forwarding service will map the URL:PORT https://127.0.0.1:8085 to {pulsar-short} proxy running in the new cluster. +Because {pulsar-beam} was enabled, the proxy knows to forward on to the {pulsar-beam} service. [source,shell] ---- @@ -49,7 +49,7 @@ curl http://127.0.0.1:8085/v2/sse/persistent/public/default/$TOPIC?SubscriptionI ---- Note the use of `SubscriptionInitialPosition=earliest` in the message consumer. -This instructs Beam to create a subscription on the topic starting at the earliest message. +This instructs {pulsar-beam} to create a subscription on the topic starting at the earliest message. Try changing the value to `latest` to only receive new messages that arrive. == Produce a new message @@ -75,11 +75,11 @@ id: {9 0 0 0 0xc002287ad0} data: Hi there ---- -You have now completed the basics of using Beam in a {pulsar-short} Cluster. Refer to the project's {pulsar-beam-repo}/blob/master/README.md[readme] to see all the possibilities! +You have now completed the basics of using {pulsar-beam} in a {pulsar-short} cluster. Refer to the project's {pulsar-beam-repo}/blob/master/README.md[readme] to see all the possibilities! == A Python producer and consumer -This is another example of producing and consuming messages using Beam. +This is another example of producing and consuming messages using {pulsar-beam}. Instead of using curl, this example will use the "requests" Python library to issue HTTP requests. === Create project @@ -156,6 +156,6 @@ include::partial$cleanup-terminal-and-helm.adoc[] Here are links to resources and guides you might be interested in: -* {pulsar-beam-repo}[Learn more] about the {pulsar-short} Beam project -* https://kafkaesque-io.github.io/pulsar-beam-swagger[{pulsar-short} Beam API] +* {pulsar-beam-repo}[Learn more] about the {pulsar-beam} project +* https://kafkaesque-io.github.io/pulsar-beam-swagger[{pulsar-beam} API] * xref:pulsar-sql.adoc[] \ No newline at end of file diff --git a/modules/components/pages/pulsar-monitor.adoc b/modules/components/pages/pulsar-monitor.adoc index 07b0abe..81db193 100644 --- a/modules/components/pages/pulsar-monitor.adoc +++ b/modules/components/pages/pulsar-monitor.adoc @@ -12,7 +12,7 @@ TIP: {pulsar-short} Heartbeat is installed automatically for server/VM installat * Monitor message pubsub and admin REST API endpoint * Measure end-to-end message latency from producing to consuming messages * Measure message latency over the websocket interface, and {pulsar-short} function -* Monitor instance availability of broker, proxy, bookkeeper, and zookeeper in a {pulsar-short} Kubernetes cluster +* Monitor instance availability of broker, proxy, {bookkeeper-short}, and {zookeeper-short} in a {pulsar-short} Kubernetes cluster * Monitor individual {pulsar-short} broker's health * Incident alert integration with OpsGenie * Customer configurable alert thresholds and probe test intervals @@ -50,7 +50,7 @@ You can download a template {pulsar-heartbeat-repo}/blob/master/config/runtime-t | pulsar_k8s_bookkeeper_offline_counter | gauge -| bookkeeper offline instances in Kubernetes cluster +| {bookkeeper-short} offline instances in Kubernetes cluster | pulsar_k8s_broker_offline_counter | gauge @@ -62,7 +62,7 @@ You can download a template {pulsar-heartbeat-repo}/blob/master/config/runtime-t | pulsar_k8s_bookkeeper_zookeeper_counter | gauge -| zookeeper offline instances in the Kubernetes cluster +| {zookeeper-short} offline instances in the Kubernetes cluster | pulsar_monitor_counter | counter diff --git a/modules/components/pages/pulsar-sql.adoc b/modules/components/pages/pulsar-sql.adoc index 37c5d22..167974d 100644 --- a/modules/components/pages/pulsar-sql.adoc +++ b/modules/components/pages/pulsar-sql.adoc @@ -197,7 +197,7 @@ select * from pulsar."public/default".mytopic limit 10 trino> exit ---- -You have successfully interacted with a {pulsar-short} Cluster via SQL. +You have successfully interacted with a {pulsar-short} cluster via SQL. Want to put your new learnings to the test? Try using the Trino plugin in https://redash.io/data-sources/presto[Redash] or https://superset.apache.org/docs/databases/trino/[Superset] to create useful dashboards. diff --git a/modules/install-upgrade/pages/cluster-sizing-reference.adoc b/modules/install-upgrade/pages/cluster-sizing-reference.adoc index 8c6ba6a..0d15991 100644 --- a/modules/install-upgrade/pages/cluster-sizing-reference.adoc +++ b/modules/install-upgrade/pages/cluster-sizing-reference.adoc @@ -22,12 +22,12 @@ For example, if there are 3 zones, then set a replication factor of 3. |Note |SANDBOX (STANDALONE) -|(combined) Zookeeper, Broker, and Bookie +|{zookeeper-short}, broker, and bookie (combined) |1 | .3+|DEVELOPMENT (1 region, 1 zone) -|Zookeeper +|{zookeeper-short} |1 | |Broker @@ -38,7 +38,7 @@ For example, if there are 3 zones, then set a replication factor of 3. | .5+|SINGLE REGION TESTING ENVIRONMENT (1 region, 1 zone) -|Zookeeper +|{zookeeper-short} |3 | |Broker @@ -47,28 +47,28 @@ For example, if there are 3 zones, then set a replication factor of 3. |Bookie |3 | -|{pulsar-short} Proxy +|{pulsar-short} proxy |3 | -|(Dedicated) Function Worker +|Function worker (dedicated) |3 | .6+|HIGH-AVAILABILITY PRODUCTION ENVIRONMENT (1 region, 3 zones) -|Zookeeper +|{zookeeper-short} |5 |2/2/1 nodes^*^ |Broker |3 |1 nodes per AZ^*^ -|(Dedicated) Function Worker +|Function worker (dedicated) |3 |1 node per AZ^*^ The number of function workers depends on the cluster's functions workload. |Bookie |6 |2 nodes per AZ^*^ -|{pulsar-short} Proxy +|{pulsar-short} proxy |3 |1 node per AZ^*^ |Autorecovery @@ -90,19 +90,19 @@ The following table lists the minimum hardware requirements for a {pulsar-short} |Note |DEVELOPMENT -|Zookeeper, Broker, and Bookkeeper +|{zookeeper-short}, broker, and {bookkeeper-short} a|* CPU: 8 vCPU * Memory: 16 GB * Data Disk: 64 GB SSD -a|Data disks are shared by Zookeeper data, Bookkeeper journal, and Bookkeeper ledger +a|Data disks are shared by {zookeeper-short} data, {bookkeeper-short} journal, and {bookkeeper-short} ledger .4+|TEST -|Zookeeper +|{zookeeper-short} a|* CPU: 2 vCPU * Memory: 4 GB * Data Disk: 32 GB SSD | -|Broker and Function Worker +|Broker and function worker a|* CPU: 8 vCPU * Memory: 16 GB |No data disk @@ -112,13 +112,13 @@ a|* CPU: 4 vCPU * Data Disk Journal: 32 GB SSD * Data Disk Ledger: 256 GB SSD | -|{pulsar-short} Proxy, Function Worker +|{pulsar-short} proxy, function worker a|* CPU: 4 vCPU * Memory: 8 GB | .6+|PRODUCTION -|Zookeeper +|{zookeeper-short} a|* CPU: 4 vCPU * Memory: 4 GB * Data Disk: 64 GB SSD @@ -134,11 +134,11 @@ a|* CPU: 8 vCPU * Data Disk Journal: 256 GB SSD * Data Disk Ledger: 1024 GB SSD |Ledger disk capacity can be beyond 1TB. -|{pulsar-short} Proxy, Autorecovery +|{pulsar-short} proxy, Autorecovery a|* CPU: 4 vCPU * Memory: 16 GB | -|Dedicated Function Worker +|Function worker (dedicated) a|* CPU: 4 vCPU * Memory: 8 GB | diff --git a/modules/install-upgrade/pages/production-cluster-sizing.adoc b/modules/install-upgrade/pages/production-cluster-sizing.adoc index 1447148..6bbb124 100644 --- a/modules/install-upgrade/pages/production-cluster-sizing.adoc +++ b/modules/install-upgrade/pages/production-cluster-sizing.adoc @@ -22,7 +22,7 @@ While Kubernetes is the more popular option, it is easier to express disk calcul === Required components -* https://pulsar.apache.org/docs/concepts-architecture-overview/#metadata-store[Zookeeper] - This is {pulsar-short}'s meta data store. It stores data about a cluster's configuration, helps the proxy direct messages to the correct broker, and holds Bookie configurations. Start with 1 instance of Zookeeper in each availability zone (AZ) to mitigate a single failure point, and scale Zookeeper as cluster traffic increases. You could scale Zookeeper as traffic within the cluster increases, but it shouldn't be very often as it can handle quite a bit of load. +* https://pulsar.apache.org/docs/concepts-architecture-overview/#metadata-store[{zookeeper-reg}] - This is {pulsar-short}'s meta data store. It stores data about a cluster's configuration, helps the proxy direct messages to the correct broker, and holds bookie configurations. Start with 1 instance of {zookeeper-short} in each availability zone (AZ) to mitigate a single failure point, and scale {zookeeper-short} as cluster traffic increases. You could scale {zookeeper-short} as traffic within the cluster increases, but it shouldn't be very often as it can handle quite a bit of load. * https://pulsar.apache.org/docs/concepts-architecture-overview/#brokers[Broker] - This is {pulsar-short}'s message router. Ideally, each broker should be fully utilized without becoming a performance bottleneck. @@ -30,9 +30,9 @@ The {pulsar-short} broker is stateless, so it requires considerable computing po Start with 1 broker instance in each zone, and set a scaling rule that watches CPU load. The best way to optimize this is through performance testing based on your cluster's workload characteristics. -* https://pulsar.apache.org/docs/concepts-architecture-overview/#apache-bookkeeper[Bookkeeper (bookie)] - This is {pulsar-short}'s data store. -Bookkeeper stores message data in a low-latency, resilient way. -{pulsar-short} uses Bookkeeper's quorum math to function, so a loss of 1 Bookkeeper instance won't bring your system down, but will cause some data loss. +* https://pulsar.apache.org/docs/concepts-architecture-overview/#apache-bookkeeper[{bookkeeper-reg} (bookie)] - This is {pulsar-short}'s data store. +{bookkeeper-short} stores message data in a low-latency, resilient way. +{pulsar-short} uses {bookkeeper-short}'s quorum math to function, so a loss of 1 {bookkeeper-short} instance won't bring your system down, but will cause some data loss. Start with at least 3 bookies, with 1 in each AZ. At least 2 bookies per AZ are required for high availability, so if one bookie goes down, the other bookie in the AZ can take over. Scale bookies up on disc usage percentage. Scale down manually by making a bookie read-only, offloading its data, then terminating the instance. @@ -42,7 +42,7 @@ Scale bookies up on disc usage percentage. Scale down manually by making a booki The {company} {product} Helm chart deployment includes optional but highly recommended server components for better {pulsar-short} cluster metrics monitoring and operation visibility. -* https://bookkeeper.apache.org/docs/admin/autorecovery[Bookkeeper AutoRecovery] - This is a {pulsar-short} component that recovers Bookkeeper data in the event of a bookie outage. While optional you will want the insurance of autorecovery working on your behalf. +* https://bookkeeper.apache.org/docs/admin/autorecovery[{bookkeeper-short} AutoRecovery] - This is a {pulsar-short} component that recovers {bookkeeper-short} data in the event of a bookie outage. While optional you will want the insurance of Autorecovery working on your behalf. A single instance of Autorecovery should be adequate - only in the most heavily-used clusters will you need more. * https://pulsar.apache.org/docs/concepts-architecture-overview/#pulsar-proxy[{pulsar-short} proxy] - The {pulsar-short} proxy is just that - a proxy. It runs at the edge of the cluster with public facing endpoints. @@ -66,8 +66,8 @@ image::pulsar-components.png[] [#message-retention] == Message retention -The broker ensures messages are received and delivered appropriately, but it is a stateless process so it doesn't use its memory to track this. Instead, the broker uses Bookkeepers (or "bookies") to store message data and the message's acknowledgement state. -A great benefit of Bookkeeper is its quorum policies. These policies make each bookie aware of the other bookies to form a bookkeeper cluster. With a cluster established, the cluster can have acknowledgement rules that form a data replication factor. For example, if you had 3 bookies in a Bookkeeper cluster with an acknowledgement rule that at least 2 of the 3 bookies must have a copy of the data, then the cluster has a replication factor of 2. A {pulsar-short} broker uses the `managedLedgerDefaultAckQuorum` and `managedLedgerDefaultWriteQuorum` configurations to set the bounds of this rule. For more about Bookkeeper persistence, see https://pulsar.apache.org/docs/administration-zk-bk/#bookkeeper-persistence-policies[here]. +The broker ensures messages are received and delivered appropriately, but it is a stateless process so it doesn't use its memory to track this. Instead, the broker uses {bookkeeper-short}s (bookies) to store message data and the message's acknowledgement state. +A great benefit of {bookkeeper-short} is its quorum policies. These policies make each bookie aware of the other bookies to form a {bookkeeper-short} cluster. With a cluster established, the cluster can have acknowledgement rules that form a data replication factor. For example, if you had 3 bookies in a {bookkeeper-short} cluster with an acknowledgement rule that at least 2 of the 3 bookies must have a copy of the data, then the cluster has a replication factor of 2. A {pulsar-short} broker uses the `managedLedgerDefaultAckQuorum` and `managedLedgerDefaultWriteQuorum` configurations to set the bounds of this rule. For more about {bookkeeper-short} persistence, see https://pulsar.apache.org/docs/administration-zk-bk/#bookkeeper-persistence-policies[here]. When a client produces a message, the broker will not acknowledge receipt until the replication factor has been achieved. Continuing from the above example, if the replication factor is 2, a broker's acknowledgment means a minimum of 2 bookies have confirmed storage of message data. If the broker times out waiting for at least 2 responses from the bookies, then the broker will not acknowledge receipt with the client. The client will need to handle the exception by attempting to resend or fail. This process forms one of {pulsar-short}'s core values - guaranteed message receipt. @@ -113,7 +113,7 @@ For more on message compression, see the https://pulsar.apache.org/docs/concepts * _Message retention and TTL period_ - the size or time acknowledged messages are kept on disk. See message retention above for more detail. -* _Tiered storage policies_ - Tiered storage offloads bookkeeper data to cheaper, long-term storage, and can impact cluster sizing if that storage service is included in the cluster. For our calculations we will not be including this feature. For more on tiered storage, see https://pulsar.apache.org/docs/tiered-storage-overview/[{pulsar-short} documentation]. +* _Tiered storage policies_ - Tiered storage offloads {bookkeeper-short} data to cheaper, long-term storage, and can impact cluster sizing if that storage service is included in the cluster. For our calculations we will not be including this feature. For more on tiered storage, see https://pulsar.apache.org/docs/tiered-storage-overview/[{pulsar-short} documentation]. There are other factors that could be a part of the aggregated cluster workload. As you gain familiarity with {pulsar-short} you can further customize this calculation. For now, we will estimate with the above numbers to size a cluster. @@ -148,11 +148,10 @@ Gather the following workload characteristics to determine your cluster's size r With the aggregated workload characteristics, we can now apply our methodology to these characteristics to size a production cluster. -First, we will size the bookkeeper's disk. -We size this first because it's the most important component (bookies store message data) and are also the hardest to scale. -By default, {pulsar-short} sets Bookkeeper https://pulsar.apache.org/docs/administration-zk-bk/#bookkeeper-persistence-policies[ack-quorum] size to 2. +First, size the {bookkeeper-short} disk because it's the most important component (bookies store message data) and the hardest to scale. +By default, {pulsar-short} sets {bookkeeper-short} https://pulsar.apache.org/docs/administration-zk-bk/#bookkeeper-persistence-policies[ack-quorum] size to 2. That means at least 2 bookies in the ensemble need to acknowledge receipt of message data before {pulsar-short} will acknowledge receipt of the message. -But (very important) we want the message replication factor to be an odd number, so we can tolerate 1 Bookie failure. +But (very important) we want the message replication factor to be an odd number, so we can tolerate 1 bookie failure. . Multiply replication factor (3) by average message payload size (1) by average message throughput (100000), then factor in TTL (3) and retention period (3600) (when applicable). + @@ -167,19 +166,19 @@ Total message size (raw) = ≅ 3 Tb ---- + -We now know our cluster needs 3 Tb of storage for Bookkeeper ledger data. +We now know our cluster needs 3 Tb of storage for {bookkeeper-short} ledger data. -. Calculate the number of Bookkeeper nodes with an individual ledger disk capacity. +. Calculate the number of {bookkeeper-short} nodes with an individual ledger disk capacity. + -[source,plain] +[source,plain,subs="+attributes"] ---- -Bookkeeper count(raw)=ceiling(3/(4 * 0.85)) = 1 +{bookkeeper-short} count(raw)=ceiling(3/(4 * 0.85)) = 1 ---- + -If our bookie has a 4Tb disk and we anticipate at least 3Tb of workload, only 1 Bookie is needed. +If our bookie has a 4Tb disk and we anticipate at least 3Tb of workload, only 1 bookie is needed. For fault tolerance, we adjust this to a number that is divisible by the number of zones, which equals 3 bookies. -. Given the replication factor of 3, we will need at least 1 broker to write messages to the bookies. That gives us a broker-to-bookkeeper ratio of 1:3. Now we can calculate the total number of brokers across 3 zones. +. Given the replication factor of 3, we will need at least 1 broker to write messages to the bookies. That gives us a broker-to-{bookkeeper-short} ratio of 1:3. Now we can calculate the total number of brokers across 3 zones. + [source,plain] ---- @@ -191,7 +190,7 @@ To be evenly divisible by the number of zones, we will set brokers to 3. === {pulsar-short} component instance counts -Now that we know how many server instances of broker and Bookie are required to support our workload, we include the other components to size the overall cluster. +Now that we know how many server instances of broker and bookie are required to support our workload, we include the other components to size the overall cluster. .{pulsar-short} cluster component count [cols="2,2,2", options=header] @@ -200,11 +199,11 @@ Now that we know how many server instances of broker and Bookie are required to |VM Count |Notes -|Zookeeper +|{zookeeper-short} |3 |1 per zone -|Bookkeeper (bookie) +|{bookkeeper-short} (bookie) |3 |Calculated above diff --git a/modules/install-upgrade/pages/quickstart-helm-installs.adoc b/modules/install-upgrade/pages/quickstart-helm-installs.adoc index 0ac3a19..17cadf2 100644 --- a/modules/install-upgrade/pages/quickstart-helm-installs.adoc +++ b/modules/install-upgrade/pages/quickstart-helm-installs.adoc @@ -2,7 +2,7 @@ [IMPORTANT] ==== -The {company} production-ready Helm chart is now deprecated. For new deployments, we recommend using the {company} {kaap-operator-repo}[KAAP operator Helm chart], which provides Kubernetes-native autoscaling and simplified management for {pulsar} clusters. For more information, see the xref:kaap-operator::index.adoc[Kubernetes Autoscaling for {pulsar} (KAAP)] documentation. +The {company} production-ready Helm chart is now deprecated. For new deployments, we recommend using the {company} {kaap-operator-repo}[{kaap-short} Helm chart], which provides Kubernetes-native autoscaling and simplified management for {pulsar} clusters. For more information, see the xref:kaap-operator::index.adoc[{kaap}] documentation. ==== You have options for installing *{company} {product}*: @@ -18,13 +18,13 @@ The resulting configuration includes support for: * xref:install-upgrade:quickstart-helm-installs.adoc#tls[TLS] * xref:install-upgrade:quickstart-helm-installs.adoc#authentication[Authentication] -* WebSocket Proxy +* WebSocket proxy * Standalone Functions Workers -* {pulsar-short} IO Connectors -* xref:install-upgrade:quickstart-helm-installs.adoc#_tiered_storage_configuration[Tiered Storage] including Tardigarde distributed cloud storage -* xref:install-upgrade:quickstart-helm-installs.adoc#_pulsar_sql_configuration[{pulsar-short} SQL Workers] +* {pulsar-short} I/O connectors +* xref:install-upgrade:quickstart-helm-installs.adoc#_tiered_storage_configuration[Tiered storage] including Tardigarde distributed cloud storage +* xref:install-upgrade:quickstart-helm-installs.adoc#_pulsar_sql_configuration[{pulsar-short} SQL workers] * {pulsar-short} Admin Console for managing the cluster -* {pulsar-short} heartbeat +* {pulsar-short} Heartbeat * Burnell for API-based token generation * Prometheus, Grafana, and Alertmanager stack with default Grafana dashboards and {pulsar-short}-specific alerting rules * cert-manager with support for self-signed certificates as well as public certificates using ACME; such as Let's Encrypt @@ -44,13 +44,13 @@ The resulting configuration includes support for: * Depending on the cloud provider, the latest 'Storage Driver' should be used, along with the fastest disk type (for example, GP3 in AWS) -* 5 Zookeeper replicas +* Five {zookeeper-short} replicas -* 3 Bookies +* Three bookies -* 3 Brokers +* Three brokers -* 3 Proxies +* Three proxies For the local machine running the Helm chart, you will need: @@ -75,7 +75,7 @@ default_storage: existingStorageClassName: default ---- -For a component like BookKeeper, which requires stateful storage, we need to override the `default_storage` class when the BookKeeper Persistent Volume Claims (PVCs) are created. +For a component like {bookkeeper-short}, which requires stateful storage, we need to override the `default_storage` class when the {bookkeeper-short} Persistent Volume Claims (PVCs) are created. There are two ways to override `default_storage`: @@ -132,7 +132,7 @@ There are two ways to override `default_storage`: replication-type: none ---- -* Create a custom storage configuration as a `yaml` file ({pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the BookKeeper PVCs. +* Create a custom storage configuration as a `yaml` file ({pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the {bookkeeper-short} PVCs. + [source,yaml] ---- @@ -324,7 +324,7 @@ There are several example configurations in the {pulsar-helm-chart-repo}/blob/ma * {pulsar-helm-chart-repo}/blob/master/examples/dev-values.yaml[dev-values.yaml example file]. A configuration for setting up a development environment to run in a local Kubernetes environment (for example, https://minikube.sigs.k8s.io/docs/start/[minikube], or https://kind.sigs.k8s.io/[kind]). Message/state persistence, redundancy, authentication, and TLS are disabled. + -TIP: With message/state persistence disabled, the cluster will not survive a restart of the ZooKeeper or BookKeeper. +TIP: With message/state persistence disabled, the cluster will not survive a restart of the {zookeeper-short} or {bookkeeper-short}. * `dev-values-persistence.yaml`. Same as above, but persistence is enabled. This will allow for the cluster to survive the restarts of the pods, but requires persistent volume claims (PVC) to be supported by the Kubernetes environment. @@ -332,7 +332,7 @@ TIP: With message/state persistence disabled, the cluster will not survive a res + `helm install pulsar -f dev-values-auth.yaml datastax-pulsar/pulsar` -* `dev-values-tls.yaml`. Development environment with self-signed certificate created by cert-manager. You need to install the cert-manager CRDs before installing the Helm chart. The chart will install the cert-manager application. +* `dev-values-tls.yaml`. Development environment with self-signed certificate created by cert-manager. You need to install the cert-manager {crd} before installing the Helm chart. The chart will install the cert-manager application. + [source,shell] ---- @@ -379,7 +379,7 @@ In addition, you can configure any S3 compatible storage. There is explicit supp === {pulsar-short} SQL Configuration -If you enable {pulsar-short} SQL, the cluster provides https://trino.io/[Trino] access to the data stored in BookKeeper (and tiered storage, if enabled). Trino is exposed on the service named `-sql`. +If you enable {pulsar-short} SQL, the cluster provides https://trino.io/[Trino] access to the data stored in {bookkeeper-short} (and tiered storage, if enabled). Trino is exposed on the service named `-sql`. The easiest way to access the Trino command line is to log into the bastion host and then connect to the Trino service port, like this: diff --git a/modules/install-upgrade/pages/quickstart-server-installs.adoc b/modules/install-upgrade/pages/quickstart-server-installs.adoc index 8734f89..6e19384 100644 --- a/modules/install-upgrade/pages/quickstart-server-installs.adoc +++ b/modules/install-upgrade/pages/quickstart-server-installs.adoc @@ -28,10 +28,10 @@ The servers must be on the same network so they can communicate with each other. * Servers should have at least 50 GB in their root disk volume. -* BookKeeper should use one volume device for the journal, and one volume device for the ledgers. +* {bookkeeper-reg} should use one volume device for the journal, and one volume device for the ledgers. The journal device should be 20GB. The ledger volume device should be sized to hold the expected amount of stored message data. -* {company} recommends a separate data disk volume for ZooKeeper. +* {company} recommends a separate data disk volume for {zookeeper-short}. * Operating System Settings + @@ -49,7 +49,7 @@ Check this setting with `cat /sys/kernel/mm/transparent_hugepage/enabled` and `c |{product} filename |Included components |`lunastreaming-core--bin.tar.gz` -|Contains the core {pulsar-short} modules: Zookeeper, Broker, BookKeeper, and function worker +|Contains the core {pulsar-short} modules: {zookeeper-short}, broker, {bookkeeper-short}, and function worker |`lunastreaming--bin.tar.gz` |Contains all components from `lunastreaming-core` as well as support for {pulsar-short} SQL @@ -102,7 +102,7 @@ Download the latest version from the {pulsar-heartbeat-repo}/releases/[{company} == Next steps -* For initializing {pulsar-short} components like BookKeeper and ZooKeeper, see the https://pulsar.apache.org/docs/deploy-bare-metal[{pulsar-short} documentation]. +* For initializing {pulsar-short} components like {bookkeeper-short} and {zookeeper-short}, see the https://pulsar.apache.org/docs/deploy-bare-metal[{pulsar-short} documentation]. * For installing optional built-in connectors or tiered storage included in `lunastreaming-all`, see the https://pulsar.apache.org/docs/deploy-bare-metal#install-builtin-connectors-optional[{pulsar-short} documentation]. diff --git a/modules/install-upgrade/pages/upgrade-guide.adoc b/modules/install-upgrade/pages/upgrade-guide.adoc index 09ea450..bf8d2ad 100644 --- a/modules/install-upgrade/pages/upgrade-guide.adoc +++ b/modules/install-upgrade/pages/upgrade-guide.adoc @@ -133,7 +133,7 @@ The following PRs were merged in {product} 3.1 that may impact your deployment's |{apache-pulsar-repo}/pull/19180[#19180] |Deprecate blocking AuthorizationService, AuthorizationProvider methods -|This will affect the public API for the AuthorizationService and the AuthorizationProvider, which only impacts users that are running custom code inside the {pulsar-short} Broker +|This will affect the public API for the AuthorizationService and the AuthorizationProvider, which only impacts users that are running custom code inside the {pulsar-short} broker |{apache-pulsar-repo}/pull/19182[#19182] |Remove AuthorizationProvider methods deprecated in 2.7 and 2.9 @@ -245,15 +245,18 @@ This modification is described in detail in {apache-pulsar-repo}/pull/15376[PIP- This section describes known issues encountered when upgrading to {product} 3.1. -=== Bookkeeper / RocksDB format +=== {bookkeeper} and RocksDB format -**Downgrading to {product} 2.10 from {product} 3.1 is not supported for Bookies and ZooKeeper**. +[IMPORTANT] +==== +Downgrading to {product} 2.10 from {product} 3.1 is not supported for the {bookkeeper-reg} and {zookeeper-reg} components. +==== {pulsar-short} 3.1 uses RocksDB `7.x`, which writes in a format that is not compatible with RocksDB `6.x`. -{product} 2.10 uses Bookkeeper 4.14, which uses RocksDB `6.x`. +{product} 2.10 uses {bookkeeper} 4.14, which uses RocksDB `6.x`. -All other components such as Broker, Proxy, and Functions Worker can be downgraded at any time. +All other components such as broker, proxy, and Functions Worker can be downgraded at any time. For more information, see {apache-pulsar-repo}/issues/22051[Issue 22051]. @@ -265,11 +268,11 @@ This guide only addresses Kubernetes deployment. For more information on upgrading bare metal and Docker {pulsar-short} deployments, see the https://pulsar.apache.org/docs/3.3.x/administration-upgrade/[{pulsar-short} documentation]. -=== Upgrade Kubernetes deployment with KAAP Operator +=== Upgrade Kubernetes deployment with {kaap-short} -Upgrade to {product} 3.1 on Kubernetes with the KAAP (Kubernetes Autoscaling for {pulsar}) operator. +Upgrade to {product} 3.1 on Kubernetes with {kaap}. -For more information, see the xref:kaap-operator::index.adoc[KAAP documentation]. +For more information, see the xref:kaap-operator::index.adoc[{kaap-short} documentation]. . To prevent data loss, back up your existing {pulsar-short} data and configuration files. . To save your current Helm release configuration, run the following command: diff --git a/modules/operations/pages/scale-cluster.adoc b/modules/operations/pages/scale-cluster.adoc index 45c8c6d..bff4a36 100644 --- a/modules/operations/pages/scale-cluster.adoc +++ b/modules/operations/pages/scale-cluster.adoc @@ -112,7 +112,7 @@ helm upgrade pulsar -f ~/dev-values_large.yaml --wait datastax-pulsar/pulsar ./bin/bookkeeper shell listunderreplicated ---- -. Double-check the bookie id of the failing bookie: +. Double-check the `bookieid` of the failing bookie: + [source,bash] ---- diff --git a/modules/operations/partials/operator-scaling.adoc b/modules/operations/partials/operator-scaling.adoc index 0c4105e..e5e359f 100644 --- a/modules/operations/partials/operator-scaling.adoc +++ b/modules/operations/partials/operator-scaling.adoc @@ -1,4 +1,4 @@ [TIP] ==== -The xref:kaap-operator::index.adoc[Kubernetes Autoscaling for {pulsar} (KAAP)] operator takes care of scaling {pulsar-short} cluster components, deploying new clusters, and even migrating your existing cluster to an operator-managed deployment. +xref:kaap-operator::index.adoc[{kaap}] takes care of scaling {pulsar-short} cluster components, deploying new clusters, and even migrating your existing cluster to an operator-managed deployment. ==== \ No newline at end of file From da82ed6bb2f1709cf7605f05af0ddeba92fa4e63 Mon Sep 17 00:00:00 2001 From: April M <36110273+aimurphy@users.noreply.github.com> Date: Mon, 23 Feb 2026 14:27:08 -0800 Subject: [PATCH 6/6] fix --- antora.yml | 1 - modules/install-upgrade/pages/cluster-sizing-reference.adoc | 4 ++-- .../install-upgrade/pages/production-cluster-sizing.adoc | 6 +++--- modules/install-upgrade/pages/upgrade-guide.adoc | 2 +- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/antora.yml b/antora.yml index 77c4734..611cdeb 100644 --- a/antora.yml +++ b/antora.yml @@ -45,7 +45,6 @@ asciidoc: bookkeeper: 'Apache BookKeeper' bookkeeper-short: 'BookKeeper' zookeeper-reg: 'Apache ZooKeeper(TM)' - zookeeper: 'Apache ZooKeeper' zookeeper-short: 'ZooKeeper' crd: 'custom resource definition (CRD)' diff --git a/modules/install-upgrade/pages/cluster-sizing-reference.adoc b/modules/install-upgrade/pages/cluster-sizing-reference.adoc index 0d15991..727eaf5 100644 --- a/modules/install-upgrade/pages/cluster-sizing-reference.adoc +++ b/modules/install-upgrade/pages/cluster-sizing-reference.adoc @@ -71,7 +71,7 @@ The number of function workers depends on the cluster's functions workload. |{pulsar-short} proxy |3 |1 node per AZ^*^ -|Autorecovery +|AutoRecovery |3 |1 per AZ^*^ |=== @@ -134,7 +134,7 @@ a|* CPU: 8 vCPU * Data Disk Journal: 256 GB SSD * Data Disk Ledger: 1024 GB SSD |Ledger disk capacity can be beyond 1TB. -|{pulsar-short} proxy, Autorecovery +|{pulsar-short} proxy, AutoRecovery a|* CPU: 4 vCPU * Memory: 16 GB | diff --git a/modules/install-upgrade/pages/production-cluster-sizing.adoc b/modules/install-upgrade/pages/production-cluster-sizing.adoc index 6bbb124..3e3c3c5 100644 --- a/modules/install-upgrade/pages/production-cluster-sizing.adoc +++ b/modules/install-upgrade/pages/production-cluster-sizing.adoc @@ -42,8 +42,8 @@ Scale bookies up on disc usage percentage. Scale down manually by making a booki The {company} {product} Helm chart deployment includes optional but highly recommended server components for better {pulsar-short} cluster metrics monitoring and operation visibility. -* https://bookkeeper.apache.org/docs/admin/autorecovery[{bookkeeper-short} AutoRecovery] - This is a {pulsar-short} component that recovers {bookkeeper-short} data in the event of a bookie outage. While optional you will want the insurance of Autorecovery working on your behalf. -A single instance of Autorecovery should be adequate - only in the most heavily-used clusters will you need more. +* https://bookkeeper.apache.org/docs/admin/autorecovery[{bookkeeper-short} AutoRecovery] - This is a {pulsar-short} component that recovers {bookkeeper-short} data in the event of a bookie outage. While optional you will want the insurance of AutoRecovery working on your behalf. +A single instance of AutoRecovery should be adequate - only in the most heavily-used clusters will you need more. * https://pulsar.apache.org/docs/concepts-architecture-overview/#pulsar-proxy[{pulsar-short} proxy] - The {pulsar-short} proxy is just that - a proxy. It runs at the edge of the cluster with public facing endpoints. Without it, your brokers would expose those endpoints, which is not an ideal configuration in production. @@ -215,7 +215,7 @@ Now that we know how many server instances of broker and bookie are required to |3 |1 per zone -|Autorecovery +|AutoRecovery |3 |1 per zone diff --git a/modules/install-upgrade/pages/upgrade-guide.adoc b/modules/install-upgrade/pages/upgrade-guide.adoc index bf8d2ad..f7320c2 100644 --- a/modules/install-upgrade/pages/upgrade-guide.adoc +++ b/modules/install-upgrade/pages/upgrade-guide.adoc @@ -245,7 +245,7 @@ This modification is described in detail in {apache-pulsar-repo}/pull/15376[PIP- This section describes known issues encountered when upgrading to {product} 3.1. -=== {bookkeeper} and RocksDB format +=== {bookkeeper-short} and RocksDB format [IMPORTANT] ====