diff --git a/antora.yml b/antora.yml index 2df8c25..2f3f421 100644 --- a/antora.yml +++ b/antora.yml @@ -18,7 +18,25 @@ asciidoc: pulsar-reg: 'Apache Pulsar(TM)' pulsar: 'Apache Pulsar' pulsar-short: 'Pulsar' + pulsar-beam: 'Pulsar Beam' astra-stream: 'Astra Streaming' + kafka-reg: 'Apache Kafka(R)' + kafka: 'Apache Kafka' + kafka-short: 'Kafka' + kafka-connect: 'Kafka Connect' + pulsar-admin-console-repo: 'https://github.com/datastax/pulsar-admin-console' + pulsar-heartbeat-repo: 'https://github.com/datastax/pulsar-heartbeat' + pulsar-sink-repo: 'https://github.com/datastax/pulsar-sink' + pulsar-ansible-repo: 'https://github.com/datastax/pulsar-ansible' + pulsar-helm-chart-repo: 'https://github.com/datastax/pulsar-helm-chart' + pulsar-openid-connect-repo: 'https://github.com/datastax/pulsar-openid-connect-plugin' + pulsar-repo: 'https://github.com/datastax/pulsar' + pulsar-beam-repo: 'https://github.com/kafkaesque-io/pulsar-beam' + bookkeeper-reg: 'Apache BookKeeper(TM)' + bookkeeper-short: 'BookKeeper' + zookeeper-reg: 'Apache ZooKeeper(TM)' + zookeeper-short: 'ZooKeeper' + crd: 'custom resource definition (CRD)' # Required for include::common partials that are shared with Astra Streaming web-ui: 'Admin Console' diff --git a/local-preview-playbook.yml b/local-preview-playbook.yml index 1911874..eb97263 100644 --- a/local-preview-playbook.yml +++ b/local-preview-playbook.yml @@ -54,23 +54,176 @@ asciidoc: xrefstyle: short # CUSTOM ATTRIBUTES company: 'DataStax' + trust-center: 'IBM Trust Center' + trust-center-url: 'https://www.ibm.com/trust' + trust-center-link: '{trust-center-url}[{trust-center}]' + support-url: 'https://www.ibm.com/mysupport/s/' + dsbulk: 'DataStax Bulk Loader (DSBulk)' + dsbulk-short: 'DSBulk' + dsbulk-repo: 'https://github.com/datastax/dsbulk' + astra: 'Astra' + astra-db: 'Astra DB' + astra-ui: 'Astra Portal' + astra-url: 'https://astra.datastax.com' + astra-ui-link: '{astra-url}[{astra-ui}^]' + db-classic: 'Managed Cluster' + db-serverless: 'Serverless (non-vector)' + db-serverless-vector: 'Serverless (vector)' + scb: 'Secure Connect Bundle (SCB)' + scb-short: 'SCB' + scb-brief: 'Secure Connect Bundle' + devops-api: 'DevOps API' + devops-api-ref-url: 'xref:astra-api-docs:ROOT:attachment$devops-api/index.html' + astra-cli: 'Astra CLI' + astra-stream: 'Astra Streaming' + starlight-kafka: 'Starlight for Kafka' + starlight-rabbitmq: 'Starlight for RabbitMQ' + astra-streaming-examples-repo: 'https://github.com/datastax/astra-streaming-examples' + sstable-sideloader: '{astra-db} Sideloader' + zdm: 'Zero Downtime Migration' + zdm-short: 'ZDM' + zdm-proxy: 'ZDM Proxy' + cass-migrator: 'Cassandra Data Migrator (CDM)' + cass-migrator-short: 'CDM' + hcd: 'Hyper-Converged Database (HCD)' + hcd-short: 'HCD' + dse: 'DataStax Enterprise (DSE)' + dse-short: 'DSE' + metrics-collector: 'DSE Metrics Collector' + mc: 'Mission Control' + opscenter: 'DSE OpsCenter' + studio: 'DataStax Studio' + cass-reg: 'Apache Cassandra(R)' + cass: 'Apache Cassandra' + cass-short: 'Cassandra' + cql: 'Cassandra Query Language (CQL)' + cql-shell: 'CQL shell' + cql-console: 'CQL console' + cql-service: 'CQL Service' + pulsar-reg: 'Apache Pulsar(TM)' + pulsar: 'Apache Pulsar' + pulsar-short: 'Pulsar' + spark-reg: 'Apache Spark(TM)' + spark: 'Apache Spark' + spark-short: 'Spark' + spark-connect: 'Spark Connect' + spark-connector: 'Apache Cassandra Spark Connector' + spark-connector-short: 'Spark Connector' + kafka-reg: 'Apache Kafka(R)' + kafka: 'Apache Kafka' + kafka-short: 'Kafka' + kafka-connect: 'Kafka Connect' + kafka-connector: 'DataStax Apache Kafka Connector' + kafka-connector-short: 'Kafka Connector' + solr-reg: 'Apache Solr(TM)' + solr: 'Apache Solr' + solr-short: 'Solr' + lucene-reg: 'Apache Lucene(TM)' + lucene: 'Apache Lucene' + lucene-short: 'Lucene' + hadoop-reg: 'Apache Hadoop(R)' + hadoop: 'Apache Hadoop' + hadoop-short: 'Hadoop' + airflow-reg: 'Apache Airflow(R)' + airflow: 'Apache Airflow' + airflow-short: 'Airflow' + maven-reg: 'Apache Maven(TM)' + maven: 'Apache Maven' + maven-short: 'Maven' + flink-reg: 'Apache Flink(R)' + flink: 'Apache Flink' + flink-short: 'Flink' + beam-reg: 'Apache Beam(R)' + beam: 'Apache Beam' + beam-short: 'Beam' + geode-reg: 'Apache Geode(TM)' + geode: 'Apache Geode' + geode-short: 'Geode' + hbase-reg: 'Apache HBase(R)' + hbase: 'Apache HBase' + hbase-short: 'HBase' + kudu-reg: 'Apache Kudu(TM)' + kudu: 'Apache Kudu' + kudu-short: 'Kudu' + phoenix-reg: 'Apache Phoenix(TM)' + phoenix: 'Apache Phoenix' + phoenix-short: 'Phoenix' + zookeeper-reg: 'Apache ZooKeeper(TM)' + zookeeper: 'Apache ZooKeeper' + zookeeper-short: 'ZooKeeper' + asf: 'Apache Software Foundation (ASF)' + asf-short: 'ASF' + tinkerpop-reg: 'Apache TinkerPop(TM)' + tinkerpop: 'Apache TinkerPop' + tinkerpop-short: 'TinkerPop' + cloudstack-reg: 'Apache CloudStack(R)' + cloudstack: 'Apache CloudStack' + cloudstack-short: 'CloudStack' + tomcat-reg: 'Apache Tomcat(R)' + tomcat: 'Apache Tomcat' + tomcat-short: 'Tomcat' + ajp: 'Apache JServ Protocol (AJP)' + ajp-short: 'AJP' + activemq-reg: 'Apache ActiveMQ(R)' + activemq: 'Apache ActiveMQ' + activemq-short: 'ActiveMQ' + tomee-reg: 'Apache TomEE(TM)' + tomee: 'Apache TomEE' + tomee-short: 'TomEE' + bookkeeper-reg: 'Apache BookKeeper(TM)' + bookkeeper: 'Apache BookKeeper' + bookkeeper-short: 'BookKeeper' + groovy-reg: 'Apache Groovy(TM)' + groovy: 'Apache Groovy' + groovy-short: 'Groovy' + cpp-driver-url: 'https://github.com/datastax/cpp-driver' + csharp-driver-url: 'https://github.com/datastax/csharp-driver' + gocql-astra-url: 'https://github.com/datastax/gocql-astra' + go-driver-url: 'https://github.com/apache/cassandra-gocql-driver' + cql-proxy-url: 'https://github.com/datastax/cql-proxy' + java-driver-url: 'https://github.com/apache/cassandra-java-driver' + nodejs-driver-url: 'https://github.com/datastax/nodejs-driver' + python-driver-url: 'https://github.com/datastax/python-driver' + scala-driver-url: 'https://github.com/apache/cassandra-spark-connector' + cass-driver-cpp-shield: 'image:https://img.shields.io/github/v/tag/datastax/cpp-driver?label=latest[alt="Latest cpp-driver release on GitHub",link="{cpp-driver-url}/tags"]' + cass-driver-csharp-shield: 'image:https://img.shields.io/nuget/v/CassandraCSharpDriver?label=latest[alt="Latest CassandraCSharpDriver release on NuGet",link="https://www.nuget.org/packages/CassandraCSharpDriver"]' + cass-driver-go-shield: 'image:https://img.shields.io/github/v/tag/apache/cassandra-gocql-driver?label=latest%20gocql[alt="Latest gocql release on GitHub",link="{go-driver-url}/tags"]' + cass-driver-java-shield: 'image:https://img.shields.io/github/v/tag/apache/cassandra-java-driver?label=latest[alt="Latest cassandra-java-driver release on GitHub",link="{java-driver-url}/tags"]' + cass-driver-nodejs-shield: 'image:https://img.shields.io/github/v/tag/datastax/nodejs-driver?label=latest[alt="Latest nodejs-driver release on GitHub",link="{nodejs-driver-url}/tags"]' + cass-driver-python-shield: 'image:https://img.shields.io/github/v/tag/datastax/python-driver?label=latest[alt="Latest python-driver release on GitHub",link="{python-driver-url}/tags"]' + cass-driver-scala-shield: 'image:https://img.shields.io/github/v/tag/apache/cassandra-spark-connector?label=latest[alt="Latest cassandra-spark-connector release on GitHub",link="{scala-driver-url}/releases"]' + data-api: 'Data API' + csharp-client-api-ref-url: 'xref:astra-api-docs:ROOT:attachment$csharp-client' + py-client-api-ref-url-2x: 'xref:astra-api-docs:ROOT:attachment$python-client/astrapy' + ts-client-api-ref-url-2x: 'xref:astra-api-docs:ROOT:attachment$typescript-client' + java-client-api-ref-url-2x: 'xref:astra-api-docs:ROOT:attachment$java-client' + python-client-repo-url: 'https://github.com/datastax/astrapy' + typescript-client-repo-url: 'https://github.com/datastax/astra-db-ts' + typescript-client-examples-url: '{typescript-client-repo-url}/blob/v2.x/examples' + java-client-repo-url: 'https://github.com/datastax/astra-db-java' + csharp-client-repo-url: 'https://github.com/datastax/astra-db-csharp' + python-client-python-version: '3.8' + dataapi-java-client-shield: 'image:https://img.shields.io/maven-central/v/com.datastax.astra/astra-db-java.svg?label=latest[alt="Latest astra-db-java release on Maven Central",link="https://search.maven.org/artifact/com.datastax.astra/astra-db-java"]' + dataapi-python-client-shield: 'image:https://img.shields.io/github/v/tag/datastax/astrapy?label=latest[alt="Latest astrapy release on GitHub",link="{python-client-repo-url}/releases"]' + dataapi-typescript-client-shield: 'image:https://img.shields.io/github/v/tag/datastax/astra-db-ts?label=latest[alt="Latest astra-db-ts release on GitHub",link="{typescript-client-repo-url}/releases"]' + dataapi-csharp-client-shield: 'image:https://img.shields.io/github/v/tag/datastax/astra-db-csharp?label=latest[alt="Latest astra-db-csharp release on GitHub",link="{csharp-client-repo-url}/releases"]' + agent: 'DataStax Agent' + repair-service: 'Repair Service' + backup-service: 'Backup Service' + performance-service: 'Performance Service' + monitoring-service: 'OpsCenter Monitoring' + nodesync-service: 'NodeSync Service' + bestpractice-service: 'Best Practice Service' + capacity-service: 'Capacity Service' + lcm: 'Lifecycle Manager (LCM)' + lcm-short: 'LCM' + cr: 'custom resource (CR)' + cr-short: 'CR' + crd: 'custom resource definition (CRD)' + crd-short: 'CRD' + # Custom attributes only used in ragstack-ai astra_db: 'Astra DB' - astra_stream: 'Astra Streaming' astra_ui: 'Astra Portal' - astra_cli: 'Astra CLI' - astra-streaming-examples-repo: 'https://raw.githubusercontent.com/datastax/astra-streaming-examples/master' - luna-streaming-examples-repo: 'https://raw.githubusercontent.com/datastaxdevs/luna-streaming-examples/main' - support_url: 'https://www.ibm.com/mysupport/s/' - glossary-url: 'https://docs.datastax.com/en/glossary/docs/index.html#' - emoji-tada: "🎉" - emoji-rocket: "🚀" - emoji-smile: "😀" - dse: 'DataStax Enterprise (DSE)' - cassandra: 'Apache Cassandra(R)' - classic: 'classic' - classic_cap: 'Classic' - serverless: 'serverless' - serverless_cap: 'Serverless' # Antora Atlas primary-site-url: https://docs.datastax.com/en primary-site-manifest-url: https://docs.datastax.com/en/site-manifest.json diff --git a/modules/ROOT/pages/faqs.adoc b/modules/ROOT/pages/faqs.adoc index f928ff0..ff77e08 100644 --- a/modules/ROOT/pages/faqs.adoc +++ b/modules/ROOT/pages/faqs.adoc @@ -13,7 +13,7 @@ In addition to {pulsar} itself, {company} {product} provides: * An installer that can stand up a dev or production cluster on bare metal or VMs without a pre-existing Kubernetes environment * A Helm chart that can deploy and manage {pulsar-short} on your current Kubernetes infrastructure -* {cass-short}, Elastic, Kinesis, Kafka, and JDBC connectors +* {cass-short}, Elastic, Kinesis, {kafka-reg}, and JDBC connectors * A management dashboard * A monitoring and alerting system @@ -38,11 +38,11 @@ They include Minikube, K8d, Kind, Google Kubernetes Engine (GKE), Microsoft Azur There are several public repos, each with a different purpose. See: -* https://github.com/datastax/pulsar[https://github.com/datastax/pulsar] : This is the distro repo (a fork of apache/pulsar). -* https://github.com/datastax/pulsar-admin-console[https://github.com/datastax/pulsar-admin-console] : This is the repo for the {pulsar-short} admin console, which allows for the configuration and monitoring of {pulsar-short}. -* https://github.com/datastax/pulsar-heartbeat[https://github.com/datastax/pulsar-heartbeat] : This is a monitoring/observability tool for {pulsar-short} that tracks the health of the cluster and can generate alerts in Slack and OpsGenie. -* https://github.com/datastax/pulsar-helm-chart[https://github.com/datastax/pulsar-helm-chart] : This is the Helm chart for deploying the {company} {pulsar-short} Distro in an existing Kubernetes cluster. -* https://github.com/datastax/pulsar-sink[https://github.com/datastax/pulsar-sink] : This is the {company} {pulsar} Connector (`pulsar-sink` for {cass-short}) repo. +* {pulsar-repo}[{pulsar-repo}] : This is the distro repo (a fork of apache/pulsar). +* {pulsar-admin-console-repo}[{pulsar-admin-console-repo}] : This is the repo for the {pulsar-short} admin console, which allows for the configuration and monitoring of {pulsar-short}. +* {pulsar-heartbeat-repo}[{pulsar-heartbeat-repo}] : This is a monitoring/observability tool for {pulsar-short} that tracks the health of the cluster and can generate alerts in Slack and OpsGenie. +* {pulsar-helm-chart-repo}[{pulsar-helm-chart-repo}] : This is the Helm chart for deploying the {company} {pulsar-short} Distro in an existing Kubernetes cluster. +* {pulsar-sink-repo}[{pulsar-sink-repo}] : This is the {company} {pulsar} Connector (`pulsar-sink`) repo. * https://github.com/datastax/burnell[https://github.com/datastax/burnell] : This is a utility for {pulsar-short} that provides various functions, such as key initialization for authentication, and JWT token creation API. == Is there a prerequisite version of Java needed for the {company} {product} installation? @@ -51,9 +51,9 @@ The {company} {product} distribution is designed for Java 11. However, because t == What are the install options for {company} {product}? -* Use the Helm chart provided at https://github.com/apache/pulsar-helm-chart[https://github.com/datastax/pulsar-helm-chart] to install {company} {product} in an existing Kubernetes cluster on your laptop or hosted by a cloud provider. -* Use the tarball provided at https://github.com/datastax/pulsar/releases[https://github.com/datastax/pulsar/releases] to install {company} {product} on a server or VM. -* Use the {company} Ansible scripts provided at https://github.com/datastax/pulsar-ansible[https://github.com/datastax/pulsar-ansible] to install {company} {product} on a server or VM with our provided playbooks. +* Use the Helm chart provided at {pulsar-helm-chart-repo}[{pulsar-helm-chart-repo}] to install {company} {product} in an existing Kubernetes cluster on your laptop or hosted by a cloud provider. +* Use the tarball provided at {pulsar-repo}/releases[{pulsar-repo}/releases] to install {company} {product} on a server or VM. +* Use the {company} Ansible scripts provided at {pulsar-ansible-repo}[{pulsar-ansible-repo}] to install {company} {product} on a server or VM with our provided playbooks. == How do I install {company} {product} in my Kubernetes cluster? @@ -76,11 +76,11 @@ From the Admin Console, you can: == What is {pulsar-short} Heartbeat? -https://github.com/datastax/pulsar-heartbeat[{pulsar-short} Heartbeat] monitors the availability, tracks the performance, and reports failures of the {pulsar-short} cluster. It produces synthetic workloads to measure end-to-end message pubsub latency. {pulsar-short} Heartbeat is a cloud-native application that can be installed by Helm within the {pulsar-short} Kubernetes cluster. +{pulsar-heartbeat-repo}[{pulsar-short} Heartbeat] monitors the availability, tracks the performance, and reports failures of the {pulsar-short} cluster. It produces synthetic workloads to measure end-to-end message pubsub latency. {pulsar-short} Heartbeat is a cloud-native application that can be installed by Helm within the {pulsar-short} Kubernetes cluster. == What are the features provided by {company} {pulsar} Connector (`pulsar-sink`) that are not supported in `kafka-sink`? -The https://pulsar.apache.org/docs/en/io-overview/[{pulsar-short} IO framework] provides many features that are not possible in Kafka, and has different compression formats and auth/security features. The features are handled by {pulsar-short}. For more, see xref:connectors:index.adoc[{product} IO Connectors]. +The https://pulsar.apache.org/docs/en/io-overview/[{pulsar-short} IO framework] provides many features that are not possible in {kafka-short}, and has different compression formats and auth/security features. The features are handled by {pulsar-short}. For more, see xref:connectors:index.adoc[{product} IO Connectors]. The {company} {pulsar} Connector allows single-record acknowledgement and negative acknowledgements. diff --git a/modules/ROOT/pages/index.adoc b/modules/ROOT/pages/index.adoc index cda043a..bfaa5ce 100644 --- a/modules/ROOT/pages/index.adoc +++ b/modules/ROOT/pages/index.adoc @@ -17,7 +17,7 @@ In addition to the distribution of https://pulsar.apache.org/en/versions/[{pulsa * A xref:install-upgrade:quickstart-helm-installs.adoc[Helm chart] that deploys and manages {pulsar-short} on your current CNCF-conformant Kubernetes infrastructure -* {cass-short}, Elastic, Kinesis, Kafka, and JDBC xref:connectors:index.adoc[connectors] +* {cass-short}, Elastic, Kinesis, {kafka-reg}, and JDBC xref:connectors:index.adoc[connectors] * xref:components:admin-console-vm.adoc[{pulsar-short} Admin Console] for simplified administration of your {pulsar-short} environment diff --git a/modules/ROOT/pages/install-aks.adoc b/modules/ROOT/pages/install-aks.adoc index e0b24e5..a6154bc 100644 --- a/modules/ROOT/pages/install-aks.adoc +++ b/modules/ROOT/pages/install-aks.adoc @@ -3,7 +3,7 @@ This document covers installation of {product} on the Azure Kubernetes Service (AKS). -For an example set of production cluster values, see the {company} production-ready https://github.com/datastax/pulsar-helm-chart[Helm chart]. +For an example set of production cluster values, see the {company} production-ready {pulsar-helm-chart-repo}[Helm chart]. {company} recommends these hardware resources for running {product} in a Kubernetes environment: @@ -16,13 +16,13 @@ For the local machine running the Helm chart, you will need: == Helm chart `values.yaml` -The https://github.com/datastax/pulsar-helm-chart/blob/master/helm-chart-sources/pulsar/values.yaml[`values.yaml`] will deploy {pulsar-short} with: +The {pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/values.yaml[`values.yaml`] will deploy {pulsar-short} with: -* 1 ZooKeeper cluster with `replicaCount:3` -* 1 Bookkeeper cluster with `replicaCount:3` -* 1 Broker cluster with `replicaCount: 3` +* One {zookeeper-reg} cluster with `replicaCount:3` +* One {bookkeeper-reg} cluster with `replicaCount:3` +* One broker cluster with `replicaCount: 3` ** Ledgers: `defaultEnsembleSize: 2`, `defaultAckQuorum: 2`, `defaultWriteQuorum: 2` -* 1 Function cluster with a `replicaCount: 2` +* One function cluster with a `replicaCount: 2` === `default_storage` The `default_storage` parameter in `values.yaml` controls the default storage class for all persistent volumes created by the Helm chart. @@ -33,7 +33,7 @@ default_storage: existingStorageClassName: default ---- -For a component like BookKeeper, which requires stateful storage, we need to override the `default_storage` class when the BookKeeper Persistent Volume Claims (PVCs) are created. +For a component like {bookkeeper-short}, which requires stateful storage, we need to override the `default_storage` class when the {bookkeeper-short} Persistent Volume Claims (PVCs) are created. There are two ways to override `default_storage`: @@ -54,7 +54,7 @@ There are two ways to override `default_storage`: replication-type: none ---- -* Create a custom storage configuration as a `yaml` file (https://github.com/datastax/pulsar-helm-chart/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the BookKeeper PVCs. +* Create a custom storage configuration as a `yaml` file ({pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the {bookkeeper-short} PVCs. + [source,yaml] ---- @@ -67,7 +67,7 @@ There are two ways to override `default_storage`: === AKS-specific storage class -Here is an example `storage-values.yaml` as used above for the BookKeeper node. Paste it into a `yaml` file to provision a persistent SSD with the `ext4` file system. +Here is an example `storage-values.yaml` as used above for the {bookkeeper-short} node. Paste it into a `yaml` file to provision a persistent SSD with the `ext4` file system. [source,yaml] ---- @@ -104,11 +104,11 @@ kubectl config set-context $(kubectl config current-context) --namespace=pulsar ---- . Execute `kubectl get pods -A` to view your running pods. -. Once your pods are up and running, continue to xref:install-upgrade:quickstart-helm-installs.adoc[Accessing {pulsar-short} Cluster in Cloud] to access and manage your cluster. +. Once your pods are up and running, continue to xref:install-upgrade:quickstart-helm-installs.adoc[Accessing {pulsar-short} cluster in cloud] to access and manage your cluster. == Next steps -Once your pods are up and running, continue to xref:install-upgrade:quickstart-helm-installs.adoc[Accessing {pulsar-short} Cluster in Cloud] to access and manage your cluster. +Once your pods are up and running, continue to xref:install-upgrade:quickstart-helm-installs.adoc[Accessing {pulsar-short} cluster in cloud] to access and manage your cluster. * To learn more about using the {pulsar-short} Admin Console, see xref:components:admin-console-tutorial.adoc[Admin Console Tutorial]. * To learn more about installing {pulsar-short} on a server or virtual machine, see xref:install-upgrade:quickstart-server-installs.adoc[Quickstart for Server/VM Install]. diff --git a/modules/ROOT/pages/install-eks.adoc b/modules/ROOT/pages/install-eks.adoc index bb1c36f..af8ebca 100644 --- a/modules/ROOT/pages/install-eks.adoc +++ b/modules/ROOT/pages/install-eks.adoc @@ -3,7 +3,7 @@ This document covers installation of {product} on the Amazon Elastic Kubernetes Environment (EKS). -For an example set of production cluster values, see the {company} production-ready https://github.com/datastax/pulsar-helm-chart[Helm chart]. +For an example set of production cluster values, see the {company} production-ready {pulsar-helm-chart-repo}[Helm chart]. {company} recommends these hardware resources for running {product} in a Kubernetes environment: @@ -16,13 +16,13 @@ For the local machine running the Helm chart, you will need: == Helm chart `values.yaml` -The https://github.com/datastax/pulsar-helm-chart/blob/master/helm-chart-sources/pulsar/values.yaml[`values.yaml`] will deploy {pulsar-short} with: +The {pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/values.yaml[`values.yaml`] will deploy {pulsar-short} with: -* 1 ZooKeeper cluster with `replicaCount:3` -* 1 Bookkeeper cluster with `replicaCount:3` -* 1 Broker cluster with `replicaCount: 3` +* One {zookeeper-reg} cluster with `replicaCount:3` +* One {bookkeeper-reg} cluster with `replicaCount:3` +* One broker cluster with `replicaCount: 3` ** Ledgers: `defaultEnsembleSize: 2`, `defaultAckQuorum: 2`, `defaultWriteQuorum: 2` -* 1 Function cluster with a `replicaCount: 2` +* One function cluster with a `replicaCount: 2` === `default_storage` The `default_storage` parameter in `values.yaml` controls the default storage class for all persistent volumes created by the Helm chart. @@ -33,7 +33,7 @@ default_storage: existingStorageClassName: default ---- -For a component like BookKeeper, which requires stateful storage, we need to override the `default_storage` class when the BookKeeper Persistent Volume Claims (PVCs) are created. +For a component like {bookkeeper-short}, which requires stateful storage, we need to override the `default_storage` class when the {bookkeeper-short} Persistent Volume Claims (PVCs) are created. There are two ways to override `default_storage`: @@ -54,7 +54,7 @@ There are two ways to override `default_storage`: iopsPerGB: "10" ---- -* Create a custom storage configuration as a `yaml` file (https://github.com/datastax/pulsar-helm-chart/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the BookKeeper PVCs. +* Create a custom storage configuration as a `yaml` file ({pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the {bookkeeper-short} PVCs. + [source,yaml] ---- @@ -67,7 +67,7 @@ There are two ways to override `default_storage`: === EKS-specific storage class -Here is an example `storage-values.yaml` as used above for the BookKeeper node. Paste it into a `yaml` file to provision a persistent general purpose (`gp2`) SSD with the `ext4` file system. +Here is an example `storage-values.yaml` as used above for the {bookkeeper-short} node. Paste it into a `yaml` file to provision a persistent general purpose (`gp2`) SSD with the `ext4` file system. [source,yaml] ---- @@ -105,7 +105,7 @@ helm install pulsar datastax-pulsar/pulsar --namespace pulsar --values storage_v == Next steps -Once your pods are up and running, continue to xref:install-upgrade:quickstart-helm-installs.adoc[Accessing {pulsar-short} Cluster in Cloud] to access and manage your cluster. +Once your pods are up and running, continue to xref:install-upgrade:quickstart-helm-installs.adoc[Accessing {pulsar-short} cluster in cloud] to access and manage your cluster. * To learn more about using the {pulsar-short} Admin Console, see xref:components:admin-console-tutorial.adoc[Admin Console Tutorial]. * To learn more about installing {pulsar-short} on a server or virtual machine, see xref:install-upgrade:quickstart-server-installs.adoc[Quickstart for Server/VM Install]. diff --git a/modules/ROOT/pages/install-gke.adoc b/modules/ROOT/pages/install-gke.adoc index 919827c..7c0d7a6 100644 --- a/modules/ROOT/pages/install-gke.adoc +++ b/modules/ROOT/pages/install-gke.adoc @@ -4,7 +4,7 @@ This document will cover installation of {product} on the Google Kubernetes Environment (GKE). -For an example set of production cluster values, see the {company} production-ready https://github.com/datastax/pulsar-helm-chart[Helm chart]. +For an example set of production cluster values, see the {company} production-ready {pulsar-helm-chart-repo}[Helm chart]. {company} recommends these hardware resources for running {product} in a Kubernetes environment: @@ -17,13 +17,13 @@ For the local machine running the Helm chart, you will need: == Helm chart `values.yaml` -The https://github.com/datastax/pulsar-helm-chart/blob/master/helm-chart-sources/pulsar/values.yaml[`values.yaml`] will deploy {pulsar-short} with: +The {pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/values.yaml[`values.yaml`] will deploy {pulsar-short} with: -* 1 ZooKeeper cluster with `replicaCount:3` -* 1 Bookkeeper cluster with `replicaCount:3` -* 1 Broker cluster with `replicaCount: 3` +* One {zookeeper-reg} cluster with `replicaCount:3` +* One {bookkeeper-reg} cluster with `replicaCount:3` +* One broker cluster with `replicaCount: 3` ** Ledgers: `defaultEnsembleSize: 2`, `defaultAckQuorum: 2`, `defaultWriteQuorum: 2` -* 1 Function cluster with a `replicaCount: 2` +* One function cluster with a `replicaCount: 2` === `default_storage` The `default_storage` parameter in `values.yaml` controls the default storage class for all persistent volumes created by the Helm chart. @@ -34,7 +34,7 @@ default_storage: existingStorageClassName: default ---- -For a component like BookKeeper, which requires stateful storage, we need to override the `default_storage` class when the BookKeeper Persistent Volume Claims (PVCs) are created. +For a component like {bookkeeper-short}, which requires stateful storage, we need to override the `default_storage` class when the {bookkeeper-short} Persistent Volume Claims (PVCs) are created. There are two ways to override `default_storage`: @@ -55,7 +55,7 @@ There are two ways to override `default_storage`: replication-type: none ---- -* Create a custom storage configuration as a `yaml` file (https://github.com/datastax/pulsar-helm-chart/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the BookKeeper PVCs. +* Create a custom storage configuration as a `yaml` file ({pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the {bookkeeper-short} PVCs. + [source,yaml] ---- @@ -68,7 +68,7 @@ There are two ways to override `default_storage`: === GKE-specific storage class -Here is an example `storage-values.yaml` as used above for the BookKeeper node. Paste it into a `yaml` file to provision a persistent SSD with the `ext4` file system. +Here is an example `storage-values.yaml` as used above for the {bookkeeper-short} node. Paste it into a `yaml` file to provision a persistent SSD with the `ext4` file system. [source,yaml] ---- @@ -105,7 +105,7 @@ kubectl config set-context $(kubectl config current-context) --namespace=pulsar . Execute `kubectl get pods -A` to view your running pods. == Next steps -Once your pods are up and running, continue to xref:install-upgrade:quickstart-helm-installs.adoc#manage-pulsar-cluster[Accessing {pulsar-short} Cluster in Cloud] to access and manage your cluster. +Once your pods are up and running, continue to xref:install-upgrade:quickstart-helm-installs.adoc#manage-pulsar-cluster[Accessing {pulsar-short} cluster in cloud] to access and manage your cluster. * To learn more about using the {pulsar-short} Admin Console, see xref:components:admin-console-tutorial.adoc[Admin Console Tutorial]. * To learn more about installing {pulsar-short} on a server or virtual machine, see xref:install-upgrade:quickstart-server-installs.adoc[Quickstart for Server/VM Install]. \ No newline at end of file diff --git a/modules/ROOT/partials/helm-chart-recommendations.adoc b/modules/ROOT/partials/helm-chart-recommendations.adoc index eaf8d32..a25bce7 100644 --- a/modules/ROOT/partials/helm-chart-recommendations.adoc +++ b/modules/ROOT/partials/helm-chart-recommendations.adoc @@ -4,7 +4,7 @@ ** One `function-worker` node pool for deploying sink and source connectors, and the other node pool for everything else * Must use SSD disks * Depending on the cloud provider, the latest 'Storage Driver' should be used, along with the fastest disk type (for example, GP3 in AWS) -* 5 Zookeeper replicas -* 3 Bookies -* 3 Brokers -* 3 Proxies \ No newline at end of file +* Five {zookeeper-reg} replicas +* Three {bookkeeper-reg} bookies +* Three brokers +* Three proxies \ No newline at end of file diff --git a/modules/components/pages/admin-console-tutorial.adoc b/modules/components/pages/admin-console-tutorial.adoc index a3fa242..f6d501b 100644 --- a/modules/components/pages/admin-console-tutorial.adoc +++ b/modules/components/pages/admin-console-tutorial.adoc @@ -65,7 +65,7 @@ To see detailed information about your topics, go to *Topics*. On the {pulsar-short} Admin Console's *Code Samples* page, there are examples for Java, Python, Golang, Node.js, WebSocket, and HTTP clients. Each example shows Producer, Consumer, and Reader code, plus language-specific examples of setting project properties and dependencies. -For example, selecting Java will show you how to connect your Java project to {pulsar-short} by modifying your Maven's `pom.xml` file. +For example, selecting Java will show you how to connect your Java project to {pulsar-short} by modifying your project's `pom.xml` file. [#connect-to-pulsar] == Connecting to {pulsar-short} @@ -141,4 +141,4 @@ Alternatively, you can save the URL authentication parameters in your `client.co == Next steps -For more on building and running a standalone {pulsar-short} Admin console, see the xref:admin-console-vm.adoc[Admin Console on Server/VM] or the {pulsar-short} Admin console repo https://github.com/datastax/pulsar-admin-console#dev[readme]. +For more on building and running a standalone {pulsar-short} Admin console, see the xref:admin-console-vm.adoc[Admin Console on Server/VM] or the {pulsar-short} Admin console repo {pulsar-admin-console-repo}#dev[readme]. diff --git a/modules/components/pages/admin-console-vm.adoc b/modules/components/pages/admin-console-vm.adoc index d99448b..f6ba049 100644 --- a/modules/components/pages/admin-console-vm.adoc +++ b/modules/components/pages/admin-console-vm.adoc @@ -15,13 +15,13 @@ wget https://nodejs.org/dist/v14.18.3/node-v14.18.3-linux-x64.tar.xz / tar -xf node-v14.18.3-linux-x64.tar.xz ---- -. Download and install the {pulsar-short} Admin console tarball to the VM. You can find the most recent {pulsar-short} Admin Console release https://github.com/datastax/pulsar-admin-console/releases[here]. +. Download and install the {pulsar-short} Admin console tarball to the VM. You can find the most recent {pulsar-short} Admin Console release {pulsar-admin-console-repo}/releases[here]. .. The tarball is also available with `wget`: + -[source,bash,subs="attributes+"] +[source,bash,subs="+attributes"] ---- -wget https://github.com/datastax/pulsar-admin-console/releases/download/{admin-console-version}/pulsar-admin-console-2.0.0.tar.gz +wget {pulsar-admin-console-repo}/releases/download/{admin-console-version}/pulsar-admin-console-2.0.0.tar.gz ---- . Extract the tarball: @@ -76,7 +76,7 @@ These values can be modified in the JSON configuration file. | cluster_name | standalone | Name of {pulsar-short} cluster connecting to. The cluster name can be retrieved with the CLI command `pulsar-admin clusters list`. | functions_disabled | false | If functions are not enabled in the cluster, disable the function sections (Functions, Sinks, Sources). | grafana_url | | If `render_monitoring_tab` is enabled, URL for Grafana. -| host_overrides.http | \http://localhost:8964 | URL to display in console to connect to {pulsar-short} Beam HTTP proxy. +| host_overrides.http | \http://localhost:8964 | URL to display in console to connect to {pulsar-beam} HTTP proxy. | host_overrides.pulsar | \http://localhost:6650 | URL to display in console to connect to {pulsar-short}. | host_overrides.ws | //localhost:8080 | URL to display in console to connect to WebSocket proxy. | notice_text | | Custom notice to appear at top of console. diff --git a/modules/components/pages/heartbeat-vm.adoc b/modules/components/pages/heartbeat-vm.adoc index 6e21b2c..881631d 100644 --- a/modules/components/pages/heartbeat-vm.adoc +++ b/modules/components/pages/heartbeat-vm.adoc @@ -8,7 +8,7 @@ This document describes how to install {pulsar-short} Heartbeat on a virtual mac + For example, `uname -m` in Ubuntu might return `x86_64`, and `uname -o` returns `GNU/Linux`. -. Download the heartbeat binary `.gz` file matching your OS and process architecture from the https://github.com/datastax/pulsar-heartbeat/releases[releases page]. +. Download the heartbeat binary `.gz` file matching your OS and process architecture from the {pulsar-heartbeat-repo}/releases[releases page]. . Uncompress the file to be an executable binary. The filename structure is `pulsar-heartbeat---`. @@ -22,7 +22,7 @@ $ ls ~/Downloads/pulsar-heartbeat-{heartbeat-version}-linux-amd64 == Execute Heartbeat binary -The {pulsar-short} Heartbeat configuration is defined by a `.yaml` file. A yaml template for Heartbeat is available at https://github.com/datastax/pulsar-heartbeat/blob/master/config/runtime-template.yml[]. In this file, the environmental variable `PULSAR_OPS_MONITOR_CFG` tells the application where to source the file. +The {pulsar-short} Heartbeat configuration is defined by a `.yaml` file. A yaml template for Heartbeat is available at {pulsar-heartbeat-repo}/blob/master/config/runtime-template.yml[]. In this file, the environmental variable `PULSAR_OPS_MONITOR_CFG` tells the application where to source the file. Run the binary file `pulsar-heartbeat---`. diff --git a/modules/components/pages/pulsar-beam.adoc b/modules/components/pages/pulsar-beam.adoc index 15c7b60..7f451c0 100644 --- a/modules/components/pages/pulsar-beam.adoc +++ b/modules/components/pages/pulsar-beam.adoc @@ -1,13 +1,13 @@ -= {pulsar-short} Beam with {product} -:navtitle: {pulsar-short} Beam -:description: Install a minimal {product} Helm chart that includes {pulsar-short} Beam += {pulsar-beam} with {product} +:navtitle: {pulsar-beam} +:description: Install a minimal {product} Helm chart that includes {pulsar-beam} :helmValuesPath: https://raw.githubusercontent.com/datastaxdevs/luna-streaming-examples/main/beam/values.yaml -The https://github.com/kafkaesque-io/pulsar-beam[{pulsar-short} Beam] project is an HTTP-based streaming and queueing system for use with {pulsar}. +The {pulsar-beam-repo}[{pulsar-beam}] project is an HTTP-based streaming and queueing system for use with {pulsar}. -With {pulsar-short} Beam, you can send messages over HTTP, push messages to a webhook or cloud function, chain webhooks and functions together, or stream messages through server-sent events (SSE). +With {pulsar-beam}, you can send messages over HTTP, push messages to a webhook or cloud function, chain webhooks and functions together, or stream messages through server-sent events (SSE). -In this guide, you'll install a minimal {company} {pulsar-short} Helm chart that includes {pulsar-short} Beam. +In this guide, you'll install a minimal {company} {pulsar-short} Helm chart that includes {pulsar-beam}. == Prerequisites @@ -21,15 +21,15 @@ include::ROOT:partial$install-helm.adoc[] === Forward service port -In a separate terminal window, port forward the Beam endpoint service: +In a separate terminal window, port forward the {pulsar-beam} endpoint service: [source,shell] ---- kubectl port-forward -n datastax-pulsar service/pulsar-proxy 8085:8085 ---- -The forwarding service will map the URL:PORT https://127.0.0.1:8085 to {pulsar-short} Proxy running in the new cluster. -Because Beam was enabled, the Proxy knows to forward on to the Beam service. +The forwarding service will map the URL:PORT https://127.0.0.1:8085 to {pulsar-short} proxy running in the new cluster. +Because {pulsar-beam} was enabled, the proxy knows to forward on to the {pulsar-beam} service. [source,shell] ---- @@ -49,7 +49,7 @@ curl http://127.0.0.1:8085/v2/sse/persistent/public/default/$TOPIC?SubscriptionI ---- Note the use of `SubscriptionInitialPosition=earliest` in the message consumer. -This instructs Beam to create a subscription on the topic starting at the earliest message. +This instructs {pulsar-beam} to create a subscription on the topic starting at the earliest message. Try changing the value to `latest` to only receive new messages that arrive. === Produce a new message @@ -73,11 +73,11 @@ id: {9 0 0 0 0xc002287ad0} data: Hi there ---- -You have now completed the basics of using Beam in a {pulsar-short} Cluster. Refer to the project's https://github.com/kafkaesque-io/pulsar-beam/blob/master/README.md[readme] to see all the possibilities! +You have now completed the basics of using {pulsar-beam} in a {pulsar-short} cluster. Refer to the project's {pulsar-beam-repo}/blob/master/README.md[readme] to see all the possibilities! == A Python producer and consumer -This is another example of producing and consuming messages using Beam. +This is another example of producing and consuming messages using {pulsar-beam}. Instead of using curl, this example will use the "requests" Python library to issue HTTP requests. === Create project @@ -153,6 +153,6 @@ include::ROOT:partial$cleanup-terminal-and-helm.adoc[] Here are links to resources and guides you might be interested in: -* https://github.com/kafkaesque-io/pulsar-beam[Learn more] about the {pulsar-short} Beam project -* https://kafkaesque-io.github.io/pulsar-beam-swagger[{pulsar-short} Beam API] +* {pulsar-beam-repo}[Learn more] about the {pulsar-beam} project +* https://kafkaesque-io.github.io/pulsar-beam-swagger[{pulsar-beam} API] * xref:pulsar-sql.adoc[] \ No newline at end of file diff --git a/modules/components/pages/pulsar-monitor.adoc b/modules/components/pages/pulsar-monitor.adoc index c85113a..e1149ac 100644 --- a/modules/components/pages/pulsar-monitor.adoc +++ b/modules/components/pages/pulsar-monitor.adoc @@ -12,7 +12,7 @@ TIP: {pulsar-short} Heartbeat is installed automatically for server/VM installat * Monitor message pubsub and admin REST API endpoint * Measure end-to-end message latency from producing to consuming messages * Measure message latency over the websocket interface, and {pulsar-short} function -* Monitor instance availability of broker, proxy, bookkeeper, and zookeeper in a {pulsar-short} Kubernetes cluster +* Monitor instance availability of broker, proxy, {bookkeeper-short}, and {zookeeper-short} in a {pulsar-short} Kubernetes cluster * Monitor individual {pulsar-short} broker's health * Incident alert integration with OpsGenie * Customer configurable alert thresholds and probe test intervals @@ -26,7 +26,7 @@ TIP: {pulsar-short} Heartbeat is installed automatically for server/VM installat * A command line argument `./pulsar-heartbeat -config /path/to/runtime.yml` * A default path to `../config/runtime.yml` -You can download a template https://github.com/datastax/pulsar-heartbeat/blob/master/config/runtime-template.yml[here]. +You can download a template {pulsar-heartbeat-repo}/blob/master/config/runtime-template.yml[here]. == Observability @@ -50,7 +50,7 @@ You can download a template https://github.com/datastax/pulsar-heartbeat/blob/ma | pulsar_k8s_bookkeeper_offline_counter | gauge -| bookkeeper offline instances in Kubernetes cluster +| {bookkeeper-short} offline instances in Kubernetes cluster | pulsar_k8s_broker_offline_counter | gauge @@ -62,7 +62,7 @@ You can download a template https://github.com/datastax/pulsar-heartbeat/blob/ma | pulsar_k8s_bookkeeper_zookeeper_counter | gauge -| zookeeper offline instances in the Kubernetes cluster +| {zookeeper-short} offline instances in the Kubernetes cluster | pulsar_monitor_counter | counter diff --git a/modules/components/pages/pulsar-sql.adoc b/modules/components/pages/pulsar-sql.adoc index e9b03cf..b13cd2f 100644 --- a/modules/components/pages/pulsar-sql.adoc +++ b/modules/components/pages/pulsar-sql.adoc @@ -199,7 +199,7 @@ select * from pulsar."public/default".mytopic limit 10 presto> exit ---- -You have successfully interacted with a {pulsar-short} Cluster via SQL. +You have successfully interacted with a {pulsar-short} cluster via SQL. Want to put your new learnings to the test? Try using the Presto plugin in https://redash.io/data-sources/presto[Redash] or https://superset.apache.org/docs/databases/presto/[Superset] to create useful dashboards. diff --git a/modules/components/pages/starlight.adoc b/modules/components/pages/starlight.adoc index 2d16f8c..2f85434 100644 --- a/modules/components/pages/starlight.adoc +++ b/modules/components/pages/starlight.adoc @@ -11,7 +11,7 @@ The Starlight extensions are open source and included in https://www.ibm.com/doc == {starlight-kafka} -The https://github.com/datastax/starlight-for-kafka[{starlight-kafka} extension] brings native Apache Kafka(R) protocol support to {pulsar} by introducing a Kafka protocol handler on {pulsar-short} brokers. +The https://github.com/datastax/starlight-for-kafka[{starlight-kafka} extension] brings native {kafka-reg} protocol support to {pulsar} by introducing a {kafka-short} protocol handler on {pulsar-short} brokers. For more information, see the xref:starlight-for-kafka:ROOT:index.adoc[{starlight-kafka} documentation]. diff --git a/modules/connectors/pages/index.adoc b/modules/connectors/pages/index.adoc index d951dd6..cfe4e8e 100644 --- a/modules/connectors/pages/index.adoc +++ b/modules/connectors/pages/index.adoc @@ -19,7 +19,7 @@ Supported sink connectors:: The following sink connectors are included in {product}: + * xref:connectors:sinks/astra-db.adoc[{astra-db} and {cass} sink] -* xref:connectors:sinks/kafka.adoc[Apache Kafka sink] +* xref:connectors:sinks/kafka.adoc[{kafka-reg} sink] * xref:connectors:sinks/kinesis.adoc[AWS Kinesis sink] * xref:connectors:sinks/elastic-search.adoc[Elasticsearch sink] * xref:connectors:sinks/jdbc-clickhouse.adoc[JDBC ClickHouse sink] @@ -34,7 +34,7 @@ Source connectors ingest messages from external services into {pulsar-short} top Supported source connectors:: The following source connectors are included in {product}: + -* xref:connectors:sources/kafka.adoc[Apache Kafka source] +* xref:connectors:sources/kafka.adoc[{kafka-reg} source] * xref:connectors:sources/kinesis.adoc[AWS Kinesis source] * xref:connectors:sources/debezium-mongodb.adoc[Debezium MongoDB source] * xref:connectors:sources/debezium-mysql.adoc[Debezium MySQL source] diff --git a/modules/connectors/pages/sinks/astra-db.adoc b/modules/connectors/pages/sinks/astra-db.adoc index b98105e..8388eaa 100644 --- a/modules/connectors/pages/sinks/astra-db.adoc +++ b/modules/connectors/pages/sinks/astra-db.adoc @@ -44,4 +44,4 @@ include::common:streaming:partial$connectors/sinks/astra-db-topic.adoc[] == See also -* https://github.com/datastax/pulsar-sink[{company} {pulsar} connector GitHub repository] \ No newline at end of file +* {pulsar-sink-repo}[{company} {pulsar} connector GitHub repository] \ No newline at end of file diff --git a/modules/connectors/pages/sinks/kafka.adoc b/modules/connectors/pages/sinks/kafka.adoc index 1586b92..e24db2f 100644 --- a/modules/connectors/pages/sinks/kafka.adoc +++ b/modules/connectors/pages/sinks/kafka.adoc @@ -1,12 +1,12 @@ -= Kafka += {kafka-short} :connectorType: kafka -The Kafka sink connector reads messages from {pulsar-short} topics and writes them to https://kafka.apache.org/[Kafka] topics. +The {kafka-short} sink connector reads messages from {pulsar-short} topics and writes them to https://kafka.apache.org/[{kafka-reg}] topics. [#compatibility] == Compatibility -{product} supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/apache/kafka/tree/2.7[Kafka 2.7.2 library]. +{product} supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/apache/kafka/tree/2.7[{kafka} 2.7.2 library]. == Create the connector @@ -29,9 +29,9 @@ include::common:streaming:partial$connectors/connector-params-intro.adoc[] include::common:streaming:partial$connectors/sinks/pulsar-config-params.adoc[] [#configs] -=== Kafka sink connector properties (`configs`) +=== {kafka-short} sink connector properties (`configs`) Set these properties in the `configs` section of the connector configuration. -Generally, all properties provided in the https://pulsar.apache.org/docs/io-kafka-sink[OSS {pulsar} Kafka sink connector] are supported. +Generally, all properties provided in the https://pulsar.apache.org/docs/io-kafka-sink[OSS {pulsar} {kafka-short} sink connector] are supported. Exceptions include properties that aren't relevant to {product} and properties that aren't present in {pulsar} {pulsar-version}. \ No newline at end of file diff --git a/modules/connectors/pages/sources/kafka.adoc b/modules/connectors/pages/sources/kafka.adoc index 5ac9110..49598c4 100644 --- a/modules/connectors/pages/sources/kafka.adoc +++ b/modules/connectors/pages/sources/kafka.adoc @@ -1,7 +1,7 @@ -= Kafka += {kafka-short} :connectorType: kafka -The Kafka source connector pulls data from https://kafka.apache.org/[Kafka] topics and persists it to {pulsar-short} topics. +The {kafka-short} source connector pulls data from https://kafka.apache.org/[{kafka-reg}] topics and persists it to {pulsar-short} topics. == Create the connector @@ -24,9 +24,9 @@ include::common:streaming:partial$connectors/connector-params-intro.adoc[] include::common:streaming:partial$connectors/sources/pulsar-config-params.adoc[] [#configs] -=== Kafka source connector properties (`configs`) +=== {kafka-short} source connector properties (`configs`) Set these properties in the `configs` section of the connector configuration. -Generally, all properties provided in the https://pulsar.apache.org/docs/io-kafka-source[OSS {pulsar} Kafka source connector] are supported. +Generally, all properties provided in the https://pulsar.apache.org/docs/io-kafka-source[OSS {pulsar} {kafka-short} source connector] are supported. Exceptions include properties that aren't relevant to {product} and properties that aren't present in {pulsar} {pulsar-version}. \ No newline at end of file diff --git a/modules/install-upgrade/pages/quickstart-helm-installs.adoc b/modules/install-upgrade/pages/quickstart-helm-installs.adoc index ece8a0c..b38e50c 100644 --- a/modules/install-upgrade/pages/quickstart-helm-installs.adoc +++ b/modules/install-upgrade/pages/quickstart-helm-installs.adoc @@ -4,7 +4,7 @@ You have options for installing *{company} {product}*: * With the provided *{company} Helm chart* for an existing Kubernetes environment locally or with a cloud provider, as covered in this topic. * With the *{company} {product} tarball* for deployment to a single server/VM, or to multiple servers/VMs. See xref:install-upgrade:quickstart-server-installs.adoc[Quick Start for Server/VM installs]. -* With the *{company} Ansible scripts* provided at https://github.com/datastax/pulsar-ansible[https://github.com/datastax/pulsar-ansible]. +* With the *{company} Ansible scripts* provided at {pulsar-ansible-repo}[{pulsar-ansible-repo}]. The Helm chart and options described below configure an {pulsar} cluster. It is designed for production use, but can also be used in local development environments with the proper settings. @@ -13,13 +13,13 @@ The resulting configuration includes support for: * xref:install-upgrade:quickstart-helm-installs.adoc#tls[TLS] * xref:install-upgrade:quickstart-helm-installs.adoc#authentication[Authentication] -* WebSocket Proxy -* Standalone Functions Workers -* {pulsar-short} IO Connectors -* xref:install-upgrade:quickstart-helm-installs.adoc#_tiered_storage_configuration[Tiered Storage] including Tardigarde distributed cloud storage -* xref:install-upgrade:quickstart-helm-installs.adoc#_pulsar_sql_configuration[{pulsar-short} SQL Workers] +* WebSocket proxy +* Standalone functions workers +* {pulsar-short} I/O connectors +* xref:install-upgrade:quickstart-helm-installs.adoc#_tiered_storage_configuration[Tiered storage] including Tardigarde distributed cloud storage +* xref:install-upgrade:quickstart-helm-installs.adoc#_pulsar_sql_configuration[{pulsar-short} SQL workers] * {pulsar-short} Admin Console for managing the cluster -* {pulsar-short} heartbeat +* {pulsar-short} Heartbeat * Burnell for API-based token generation * Prometheus, Grafana, and Alertmanager stack with default Grafana dashboards and {pulsar-short}-specific alerting rules * cert-manager with support for self-signed certificates as well as public certificates using ACME; such as Let's Encrypt @@ -27,7 +27,7 @@ The resulting configuration includes support for: == Prerequisites -For an example set of production cluster values, see the {company} production-ready https://github.com/datastax/pulsar-helm-chart[Helm chart]. +For an example set of production cluster values, see the {company} production-ready {pulsar-helm-chart-repo}[Helm chart]. {company} recommends these hardware resources for running {product} in a Kubernetes environment: @@ -49,7 +49,7 @@ default_storage: existingStorageClassName: default ---- -For a component like BookKeeper, which requires stateful storage, we need to override the `default_storage` class when the BookKeeper Persistent Volume Claims (PVCs) are created. +For a component like {bookkeeper-short}, which requires stateful storage, we need to override the `default_storage` class when the {bookkeeper-short} Persistent Volume Claims (PVCs) are created. There are two ways to override `default_storage`: @@ -106,7 +106,7 @@ There are two ways to override `default_storage`: replication-type: none ---- -* Create a custom storage configuration as a `yaml` file (https://github.com/datastax/pulsar-helm-chart/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the BookKeeper PVCs. +* Create a custom storage configuration as a `yaml` file ({pulsar-helm-chart-repo}/blob/master/helm-chart-sources/pulsar/templates/bookkeeper/bookkeeper-storageclass.yaml[like the {company} example]) and tell the Helm chart to use that storage configuration when it creates the {bookkeeper-short} PVCs. + [source,yaml] ---- @@ -207,7 +207,7 @@ helm repo update curl -LOs https://datastax.github.io/pulsar-helm-chart/examples/dev-values.yaml ---- -The `dev-values.yaml` file can be viewed (https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values.yaml[here]). +The `dev-values.yaml` file can be viewed ({pulsar-helm-chart-repo}/blob/master/examples/dev-values.yaml[here]). To list the version of the chart in the local Helm repository: @@ -276,11 +276,11 @@ kubectl port-forward -n pulsar $(kubectl get pods -n pulsar -l app.kubernetes.io == Example configurations -There are several example configurations in the https://github.com/datastax/pulsar-helm-chart/blob/master/examples[examples] directory: +There are several example configurations in the {pulsar-helm-chart-repo}/blob/master/examples[examples] directory: -* https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values.yaml[dev-values.yaml example file]. A configuration for setting up a development environment to run in a local Kubernetes environment (for example, https://minikube.sigs.k8s.io/docs/start/[minikube], or https://kind.sigs.k8s.io/[kind]). Message/state persistence, redundancy, authentication, and TLS are disabled. +* {pulsar-helm-chart-repo}/blob/master/examples/dev-values.yaml[dev-values.yaml example file]. A configuration for setting up a development environment to run in a local Kubernetes environment (for example, https://minikube.sigs.k8s.io/docs/start/[minikube], or https://kind.sigs.k8s.io/[kind]). Message/state persistence, redundancy, authentication, and TLS are disabled. + -TIP: With message/state persistence disabled, the cluster will not survive a restart of the ZooKeeper or BookKeeper. +TIP: With message/state persistence disabled, the cluster will not survive a restart of the {zookeeper-short} or {bookkeeper-short}. * `dev-values-persistence.yaml`. Same as above, but persistence is enabled. This will allow for the cluster to survive the restarts of the pods, but requires persistent volume claims (PVC) to be supported by the Kubernetes environment. @@ -288,7 +288,7 @@ TIP: With message/state persistence disabled, the cluster will not survive a res + `helm install pulsar -f dev-values-auth.yaml datastax-pulsar/pulsar` -* `dev-values-tls.yaml`. Development environment with self-signed certificate created by cert-manager. You need to install the cert-manager CRDs before installing the Helm chart. The chart will install the cert-manager application. +* `dev-values-tls.yaml`. Development environment with self-signed certificate created by cert-manager. You need to install the cert-manager {crd} before installing the Helm chart. The chart will install the cert-manager application. + ---- kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.1.0/cert-manager.crds.yaml @@ -331,7 +331,7 @@ In addition, you can configure any S3 compatible storage. There is explicit supp === {pulsar-short} SQL Configuration -If you enable {pulsar-short} SQL, the cluster provides https://prestodb.io/[Presto] access to the data stored in BookKeeper (and tiered storage, if enabled). Presto is exposed on the service named `-sql`. +If you enable {pulsar-short} SQL, the cluster provides https://prestodb.io/[Presto] access to the data stored in {bookkeeper-short} (and tiered storage, if enabled). Presto is exposed on the service named `-sql`. The easiest way to access the Presto command line is to log into the bastion host and then connect to the Presto service port, like this: @@ -443,7 +443,7 @@ This is useful if you are using a self-signed certificate. For automated handling of publicly signed certificates, you can use a tool such as https://cert-mananager[cert-manager]. -For more information, see https://github.com/datastax/pulsar-helm-chart/blob/master/aws-customer-docs.md[Using Cert-Manager for {pulsar-short} Certificates in AWS]. +For more information, see {pulsar-helm-chart-repo}/blob/master/aws-customer-docs.md[Using Cert-Manager for {pulsar-short} Certificates in AWS]. Once you have created the secrets that store the certificate info (or specified it in the values), you can enable TLS in the values: diff --git a/modules/install-upgrade/pages/quickstart-server-installs.adoc b/modules/install-upgrade/pages/quickstart-server-installs.adoc index 1c3f8d8..ec6fd5b 100644 --- a/modules/install-upgrade/pages/quickstart-server-installs.adoc +++ b/modules/install-upgrade/pages/quickstart-server-installs.adoc @@ -30,10 +30,10 @@ The servers must be on the same network so they can communicate with each other. * Servers should have at least 50 GB in their root disk volume. -* BookKeeper should use one volume device for the journal, and one volume device for the ledgers. +* {bookkeeper-reg} should use one volume device for the journal, and one volume device for the ledgers. The journal device should be 20GB. The ledger volume device should be sized to hold the expected amount of stored message data. -* {company} recommends a separate data disk volume for ZooKeeper. +* {company} recommends a separate data disk volume for {zookeeper-reg}. * Operating System Settings + @@ -43,7 +43,7 @@ Check this setting with `cat /sys/kernel/mm/transparent_hugepage/enabled` and `c [#install] == Installation -. Download the {company} {product} tarball from the https://github.com/datastax/pulsar/releases[{company} GitHub repo]. There are three versions of {product} currently available: +. Download the {company} {product} tarball from the {pulsar-repo}/releases[{company} GitHub repo]. There are three versions of {product} currently available: + [cols="1,1"] [%autowidth] @@ -52,7 +52,7 @@ Check this setting with `cat /sys/kernel/mm/transparent_hugepage/enabled` and `c |*Included components* |`lunastreaming-core--bin.tar.gz` -|Contains the core {pulsar-short} modules: Zookeeper, Broker, BookKeeper, and function worker +|Contains the core {pulsar-short} modules: {zookeeper-short}, broker, {bookkeeper-short}, and function worker |`lunastreaming--bin.tar.gz` |Contains all components from `lunastreaming-core` as well as support for {pulsar-short} SQL @@ -96,7 +96,7 @@ You have successfully installed the {company} {product} tarball. Once the {company} {product} tarball is installed, you may want to add additional tooling to your server/VM deployment. * *{pulsar-short} Admin Console:* Web-based UI that administrates {pulsar-short}. -Download the latest version from the https://github.com/datastax/pulsar-admin-console[{company} GitHub repo] and follow the instructions xref:components:admin-console-vm.adoc[here]. +Download the latest version from the {pulsar-admin-console-repo}[{company} GitHub repo] and follow the instructions xref:components:admin-console-vm.adoc[here]. + [NOTE] ==== @@ -104,14 +104,14 @@ Admin Console requires https://nodejs.org/download/release/latest-v14.x/[NodeJS ==== * *{pulsar-short} Heartbeat:* Monitors {pulsar-short} cluster availability. -Download the latest version from the https://github.com/datastax/pulsar-heartbeat/releases/[{company} GitHub repo] and follow the instructions xref:components:heartbeat-vm.adoc[here]. +Download the latest version from the {pulsar-heartbeat-repo}/releases/[{company} GitHub repo] and follow the instructions xref:components:heartbeat-vm.adoc[here]. == Next steps -* For initializing {pulsar-short} components like BookKeeper and ZooKeeper, see the https://pulsar.apache.org/docs/deploy-bare-metal[{pulsar-short} documentation]. +* For initializing {pulsar-short} components like {bookkeeper-short} and {zookeeper-short}, see the https://pulsar.apache.org/docs/deploy-bare-metal[{pulsar-short} documentation]. * For installing optional built-in connectors or tiered storage included in `lunastreaming-all`, see the https://pulsar.apache.org/docs/deploy-bare-metal#install-builtin-connectors-optional[{pulsar-short} documentation]. * For installation to existing Kubernetes environments or with a cloud provider, see xref:install-upgrade:quickstart-helm-installs.adoc[Quick Start for Helm Chart installs]. -* For Ansible deployment, use the {company} Ansible scripts provided at https://github.com/datastax/pulsar-ansible[https://github.com/datastax/pulsar-ansible]. +* For Ansible deployment, use the {company} Ansible scripts provided at {pulsar-ansible-repo}[{pulsar-ansible-repo}]. diff --git a/modules/operations/pages/auth.adoc b/modules/operations/pages/auth.adoc index 181f7c2..c52cfcc 100644 --- a/modules/operations/pages/auth.adoc +++ b/modules/operations/pages/auth.adoc @@ -51,7 +51,7 @@ Create the certificate: The resulting secret will be of type `kubernetes.io/tls`. The key should *not* be in `PKCS 8` format, even though that is the format used by {pulsar-short}. The `kubernetes.io/tls` format will be converted by the chart to `PKCS 8`. -If you have a self-signed certificate, manually specify the certificate information directly in https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values-keycloak-auth.yaml[values]: +If you have a self-signed certificate, manually specify the certificate information directly in {pulsar-helm-chart-repo}/blob/master/examples/dev-values-keycloak-auth.yaml[values]: ---- # secrets: @@ -60,17 +60,17 @@ If you have a self-signed certificate, manually specify the certificate informat # caCertificate: | ---- -Once you have created the secrets that store the certificate info (or manually specified it in https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values-keycloak-auth.yaml[values]), enable TLS in the values: +Once you have created the secrets that store the certificate info (or manually specified it in {pulsar-helm-chart-repo}/blob/master/examples/dev-values-keycloak-auth.yaml[values]), enable TLS in the values: `enableTls: yes` == Token Authentication via Keycloak Integration -{company} created the https://github.com/datastax/pulsar-openid-connect-plugin[{pulsar-short} OpenID Connect Authentication Plugin] to provide a more dynamic authentication option for {pulsar-short}. This plugin integrates with any OpenID Connect-compliant identity provider to dynamically retrieve public keys for token validation. This dynamic public key retrieval enables support for key rotation and multiple authentication/identity providers by configuring multiple allowed token issuers. It also means that token secret keys will *not* be stored in Kubernetes secrets. +{company} created the {pulsar-openid-connect-repo}[{pulsar-short} OpenID Connect Authentication Plugin] to provide a more dynamic authentication option for {pulsar-short}. This plugin integrates with any OpenID Connect-compliant identity provider to dynamically retrieve public keys for token validation. This dynamic public key retrieval enables support for key rotation and multiple authentication/identity providers by configuring multiple allowed token issuers. It also means that token secret keys will *not* be stored in Kubernetes secrets. -In order to simplify deployment for {pulsar-short} cluster components, the plugin provides the option to use Keycloak in conjunction with {pulsar-short}'s basic token based authentication. For more, see https://github.com/datastax/pulsar-openid-connect-plugin[{pulsar-short} OpenID Connect Authentication Plugin]. +In order to simplify deployment for {pulsar-short} cluster components, the plugin provides the option to use Keycloak in conjunction with {pulsar-short}'s basic token based authentication. For more, see {pulsar-openid-connect-repo}[{pulsar-short} OpenID Connect Authentication Plugin]. -See the example https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values-keycloak-auth.yaml[Keycloak Helm chart] for deploying a working cluster that integrates with Keycloak. By default, the Helm chart creates a {pulsar-short} realm within Keycloak and sets up the client used by the {pulsar-short} Admin Console as well as a sample client and some sample groups. The configuration for the broker side auth plugin should be placed in the `.Values..configData` maps. +See the example {pulsar-helm-chart-repo}/blob/master/examples/dev-values-keycloak-auth.yaml[Keycloak Helm chart] for deploying a working cluster that integrates with Keycloak. By default, the Helm chart creates a {pulsar-short} realm within Keycloak and sets up the client used by the {pulsar-short} Admin Console as well as a sample client and some sample groups. The configuration for the broker side auth plugin should be placed in the `.Values..configData` maps. === Configuring Keycloak for Token Generation @@ -80,7 +80,7 @@ See the example https://github.com/datastax/pulsar-helm-chart/blob/master/exampl $ helm install test --values ../../examples/dev-values-keycloak-auth.yaml ---- -. Ensure your deployment name matches the working cluster's name. The name of the deployment is *very important* for a working cluster. The https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values-keycloak-auth.yaml[values] file assumes that the cluster's name is `test`. +. Ensure your deployment name matches the working cluster's name. The name of the deployment is *very important* for a working cluster. The {pulsar-helm-chart-repo}/blob/master/examples/dev-values-keycloak-auth.yaml[values] file assumes that the cluster's name is `test`. . Once the cluster is operational, port forward to Keycloak: + @@ -99,7 +99,7 @@ keycloak: . Navigate to `localhost:8080` in a browser and view the {pulsar-short} realm in the Keycloak UI. Note that the realm name must match the configured realm name (`.Values.keycloak.realm`) for the OpenID Connect plugin to work properly. -The OpenID Connect plugin uses the `sub` (subject) claim from the JWT as the role used for authorization within {pulsar-short}. To get Keycloak to generate the JWT for a client with the right `sub`, create a special "mapper" that is a "Hardcoded claim" mapping claim name sub to a claim value that is the desired role, like `superuser`. The default config installed by https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values-keycloak-auth.yaml[this Helm chart] provides examples of how to add custom mapper protocols to clients. +The OpenID Connect plugin uses the `sub` (subject) claim from the JWT as the role used for authorization within {pulsar-short}. To get Keycloak to generate the JWT for a client with the right `sub`, create a special "mapper" that is a "Hardcoded claim" mapping claim name sub to a claim value that is the desired role, like `superuser`. The default config installed by {pulsar-helm-chart-repo}/blob/master/examples/dev-values-keycloak-auth.yaml[this Helm chart] provides examples of how to add custom mapper protocols to clients. === Retrieving and using a token from Keycloak with {pulsar-short} Admin CLI diff --git a/modules/operations/pages/functions.adoc b/modules/operations/pages/functions.adoc index 5fde778..6121dd5 100644 --- a/modules/operations/pages/functions.adoc +++ b/modules/operations/pages/functions.adoc @@ -10,7 +10,7 @@ The function automatically runs for each message published to the specified inpu Functions are implemented using https://pulsar.apache.org/docs/en/functions-overview/[{pulsar-reg} functions]. -https://github.com/datastax/pulsar[{product} 2.10 or later] is required to deploy custom functions in {pulsar-short}. +{pulsar-repo}[{product} 2.10 or later] is required to deploy custom functions in {pulsar-short}. == Manage functions using {pulsar-short} Admin CLI diff --git a/modules/operations/pages/scale-cluster.adoc b/modules/operations/pages/scale-cluster.adoc index f59ef03..b178a50 100644 --- a/modules/operations/pages/scale-cluster.adoc +++ b/modules/operations/pages/scale-cluster.adoc @@ -4,9 +4,9 @@ This page will show you how to scale {product} clusters up for more compute capa == Installing {pulsar-short} cluster -For our {pulsar-short} cluster installation, use this https://github.com/datastax/pulsar-helm-chart[Helm chart]. +For our {pulsar-short} cluster installation, use this {pulsar-helm-chart-repo}[Helm chart]. -To start the cluster, use the values provided in this https://github.com/datastax/pulsar-helm-chart/blob/master/examples/dev-values.yaml[YAML file]. +To start the cluster, use the values provided in this {pulsar-helm-chart-repo}/blob/master/examples/dev-values.yaml[YAML file]. ---- $ diff ~/dev-values.yaml ~/dev-values_large.yaml @@ -99,7 +99,7 @@ $ helm upgrade pulsar -f ~/dev-values_large.yaml --wait datastax-pulsar/pulsar $ ./bin/bookkeeper shell listunderreplicated ---- -. Double-check the bookie id of the failing bookie: +. Double-check the `bookieid` of the failing bookie: + [source,shell] ---- @@ -116,7 +116,7 @@ $ ./bin/bookkeeper shell decommissionbookie -bookieid pulsar-bookkeeper-3.pulsar + [NOTE] ==== -The decommissioning will take some time because it triggers the audit and auto-recovery and blocks until auto-recovery completes. +The decommissioning will take some time because it triggers the audit and AutoRecovery and blocks until AutoRecovery completes. ==== + This command will delete the cookie of the decommissioned bookie: `pulsar-bookkeeper-3.pulsar-bookkeeper.default.svc.cluster.local:3181`.