diff --git a/.github/vale/styles/config/vocabularies/Aiven/accept.txt b/.github/vale/styles/config/vocabularies/Aiven/accept.txt
index a81bd821b..4196bec24 100644
--- a/.github/vale/styles/config/vocabularies/Aiven/accept.txt
+++ b/.github/vale/styles/config/vocabularies/Aiven/accept.txt
@@ -1,11 +1,15 @@
188
ACL
ACLs
+ACU
+ACUs
Addons
africa
AIInsights
Aiven
Aiven's
+AKU
+AKUs
allowlist
allowlists
Altinity
@@ -140,7 +144,6 @@ GitHub
go
Google Cloud Platform
google_columnar_engine_enabled
-google_columnar_engine_enabled
google_columnar_engine_memory_size_percentage
Gzipped
gzipped
@@ -169,6 +172,7 @@ hypertables
IdP
IdPs
InfluxDB
+Inkless
InnoDB
inodes
Instana
@@ -259,7 +263,6 @@ pg_dump
pgAdmin
PGAudit
PgBouncer
-pg_dump
PGHoard
pglookout
pgoutput
diff --git a/docs/products/kafka/concepts/inkless-aku.md b/docs/products/kafka/concepts/inkless-aku.md
new file mode 100644
index 000000000..c083f8efb
--- /dev/null
+++ b/docs/products/kafka/concepts/inkless-aku.md
@@ -0,0 +1,72 @@
+---
+title: AKU plans and scaling
+---
+import RelatedPages from "@site/src/components/RelatedPages";
+
+Inkless uses Aiven Kafka Units (AKUs) to help you size Apache Kafka services based on throughput instead of hardware resources.
+An AKU represents the amount of traffic a service can handle. You estimate the expected
+throughput when creating the service. This estimate determines the initial AKU level and
+the scaling range.
+
+## How AKUs work
+
+- Each AKU corresponds to a specific throughput capacity. It represents the compute and
+ memory resources required to meet that throughput.
+- The initial AKU level is derived from the expected throughput estimate provided during
+ service creation.
+- The service monitors throughput over time.
+- When throughput remains above the threshold for the current AKU level for a period of
+ time, the service scales up within your configured limits.
+- When throughput remains low for a sustained period, the service scales down.
+
+Scaling changes the number of AKUs in use, which affects AKU-hour billing. Scaling
+actions do not affect topic configuration or data retention.
+
+## Throughput measurement
+
+Inkless measures two types of traffic:
+
+- **Ingress:** Data written to topics by producers.
+- **Egress:** Data read from topics by consumers, connectors, and mirroring processes.
+
+Both ingress and egress affect the number of AKUs required. You can track ingress and
+egress usage in the Service utilisation view, which also shows the AKU thresholds.
+
+## Autoscaling limits
+
+Depending on your cloud provider and account, you can configure:
+
+- **Minimum AKUs:** The lowest capacity the service can scale down to.
+- **Maximum AKUs:** The highest capacity the service can scale up to.
+
+Inkless scales automatically within these limits. Scaling occurs only when
+throughput remains above or below a threshold for a sustained period.
+
+## Storage and AKUs
+
+Storage does not influence AKU scaling:
+
+- Diskless topics write directly to object storage.
+- Classic topics use local disk for recent data and move older segments to object storage
+ through tiered storage.
+
+Storage and compute scale independently, so you can adjust retention without changing
+AKU levels.
+
+## When to adjust AKU ranges
+
+Adjust your AKU limits when:
+
+- Workload throughput increases for sustained periods.
+- Traffic spikes begin to persist for longer periods.
+- Reducing costs during low-traffic periods requires a lower maximum AKU.
+- The workload needs a guaranteed minimum level of throughput.
+
+For details on how AKU usage affects billing, see [Billing](/docs/products/kafka/concepts/inkless-billing).
+
+
+
+
+- [Inkless overview](/docs/products/kafka/concepts/inkless)
+- [Billing for Inkless](/docs/products/kafka/concepts/inkless-billing)
+- [Create a Kafka service](/docs/products/kafka/create-kafka-service)
diff --git a/docs/products/kafka/concepts/inkless-billing.md b/docs/products/kafka/concepts/inkless-billing.md
new file mode 100644
index 000000000..64e4c9ee7
--- /dev/null
+++ b/docs/products/kafka/concepts/inkless-billing.md
@@ -0,0 +1,57 @@
+---
+title: Inkless billing
+sidebar_label: Billing
+description: Learn how billing works for Inkless Apache Kafka® on Aiven, including compute billed in AKUs, object storage costs, and topic ingress and egress charges.
+---
+
+import RelatedPages from "@site/src/components/RelatedPages";
+
+Inkless uses a usage-based billing model.
+You are charged for:
+
+- **Compute**, measured in Aiven Kafka Units (AKUs)
+- **Storage**, based on the amount of data retained in object storage
+- **Data movement**, based on topic ingress and egress
+
+:::note
+Inkless BYOC deployments continue to use the existing plans-based pricing model.
+:::
+
+## AKU-hours
+
+Compute charges are measured in AKU-hours.
+
+An AKU (Aiven Kafka Unit) represents the throughput capacity of the service. The service
+bills based on the number of AKUs in use over time, calculated in AKU-hours. When the
+service scales up or down, the AKU-hour charge updates to match the current AKU level.
+
+For details on how scaling works, see
+[AKU plans and scaling](/docs/products/kafka/concepts/inkless-aku).
+
+## Storage
+
+Storage charges are based on the amount of data retained in object storage.
+
+- Diskless topics store all retained data in object storage.
+- Classic topics keep some recent data on local disk before offloading it to
+ object storage.
+
+## Network usage
+
+Network charges apply to:
+
+- **Ingress:** Data written to topics
+- **Egress:** Data read by consumers, connectors, or mirroring processes
+
+Network usage is measured at the service level across all topics.
+
+:::note
+Only data written to and read from Kafka topics is billed.
+Data Kafka replicates between brokers for fault tolerance is not billed.
+:::
+
+
+
+- [Inkless overview](/docs/products/kafka/concepts/inkless)
+- [AKU plans and scaling](/docs/products/kafka/concepts/inkless-aku)
+- [Create a Kafka service](/docs/products/kafka/create-kafka-service)
diff --git a/docs/products/kafka/concepts/inkless.md b/docs/products/kafka/concepts/inkless.md
new file mode 100644
index 000000000..4e724f99a
--- /dev/null
+++ b/docs/products/kafka/concepts/inkless.md
@@ -0,0 +1,71 @@
+---
+title: Inkless overview
+sidebar_label: Overview
+---
+
+import RelatedPages from "@site/src/components/RelatedPages";
+
+Inkless is Aiven’s cloud-native Apache Kafka® service that modernizes Kafka with diskless topics and object storage for data retention.
+It reduces operational overhead while preserving full compatibility with existing
+Kafka clients.
+
+Inkless runs on Kafka 4.x and uses Aiven Kafka Units (AKUs) to size services by throughput
+instead of hardware plans. It supports both classic and diskless topics within the same
+service.
+
+
+## Key differences from classic Kafka
+
+Inkless changes how Kafka services are sized, stored, and managed:
+
+- **Throughput-based sizing:** Services use AKUs instead of hardware plans and scale
+ within defined limits as throughput changes.
+- **Flexible storage:** Diskless topics store all data in object storage. Classic topics
+ use local disk with tiered storage enabled by default.
+- **Managed configuration:** Broker-level settings are fixed to maintain service
+ stability and allow automatic scaling.
+- **KRaft metadata management:** Inkless uses KRaft for metadata and consensus,
+ replacing ZooKeeper.
+- **Cloud availability:** Inkless is available on selected cloud providers, with support
+ expanding over time.
+- **Diskless topics:** Diskless topics are available only in Inkless services.
+
+## When to use Inkless
+
+Use Inkless when:
+
+- Workload throughput fluctuates and requires autoscaling.
+- Storage and compute must scale independently.
+- Your use cases require diskless topics for long-term retention or large datasets.
+- You need a simplified capacity model without hardware planning.
+
+Classic Kafka remains available for existing deployments and appears in the Aiven Console
+only for customers who already run Classic services.
+
+## Inkless capabilities
+
+Inkless supports:
+
+- High-throughput workloads by reducing cross-availability zone network traffic with diskless topics.
+- Workloads with fluctuating throughput through autoscaling.
+- Independent scaling of storage and compute.
+- Diskless topics for long-term retention and large datasets.
+- A simplified, throughput-based capacity model without hardware planning.
+
+
+## Existing Classic Kafka services
+
+Existing Classic Kafka services continue to run unchanged.
+
+Classic Kafka remains available only for existing deployments and appears in the
+Aiven Console only when a project already includes a Classic Kafka service.
+
+Upgrading or migrating an existing Classic Kafka service to Inkless is not supported at this time.
+Service type is fixed at creation. To use Inkless, create a Kafka service and select
+Inkless as the service type.
+
+
+- [Create a Kafka service](/docs/products/kafka/create-kafka-service)
+- [Diskless topics overview](/docs/products/kafka/diskless/concepts/diskless-overview)
+- [AKU plans and scaling](/docs/products/kafka/concepts/inkless-aku)
+- [Billing for Inkless](/docs/products/kafka/concepts/inkless-billing)
diff --git a/docs/products/kafka/create-kafka-service.md b/docs/products/kafka/create-kafka-service.md
index 67c67a7d1..408583f47 100644
--- a/docs/products/kafka/create-kafka-service.md
+++ b/docs/products/kafka/create-kafka-service.md
@@ -1,7 +1,7 @@
---
-title: Create an Aiven for Apache Kafka® service
-sidebar_label: Create Kafka service
-keywords: [create, kafka, service, byoc, diskless]
+title: Create a Kafka service
+sidebar_label: Create service
+keywords: [create, kafka, service, inkless, classic, byoc]
---
import Tabs from '@theme/Tabs';
@@ -14,47 +14,35 @@ import TerraformPrereqs from "@site/static/includes/terraform-get-started-prereq
import TerraformApply from "@site/static/includes/terraform-apply-changes.md";
import TerraformSample from '@site/src/components/CodeSamples/TerraformSample';
-You can create an Aiven for Apache Kafka® service using the Aiven Console, CLI, or Terraform.
-During creation, you can enable **diskless topics** for Bring Your Own Cloud (BYOC)
-deployments. If you do not enable diskless topics, the service stores topic data on
-local disks by default.
+Learn how to create an Apache Kafka® service on Aiven. You can choose between two Kafka
+modes and deploy to either Aiven cloud or your own cloud infrastructure.
-### Decide whether to enable diskless topics
+## Choose your Kafka mode
-Choose the configuration that fits your workload:
+Aiven offers two ways to run Apache Kafka:
-- **Standard Kafka service:** Uses local disk storage for lower latency and all-region
- availability.
-- **Kafka service with diskless topics:** Stores data in cloud object storage for
- cost-optimized scaling in Bring Your Own Cloud (BYOC) environments.
+- **Inkless**: Runs Apache Kafka 4.x with diskless topics and tiered storage enabled by
+ default.
+ - On **Aiven cloud**, compute is usage-based and measured in Aiven Kafka Units (AKUs).
+ - On **Bring Your Own Cloud (BYOC)**, pricing is plan-based.
+ Inkless availability depends on the selected cloud provider.
-Diskless topics are currently supported only for BYOC deployments on AWS.
-
-:::note
-You cannot enable diskless topics on an existing Kafka service that was created with
-local storage only.
-To use diskless topics, create a Kafka service with diskless support enabled.
-Once enabled, you can create both diskless and classic topics within that service.
-:::
-
-For details on the differences between topic types, see
-[Classic vs. diskless topics](/docs/products/kafka/diskless/concepts/topics-vs-classic).
+- **Classic Kafka**: Uses fixed plans with local broker storage. Stores topic data on
+ local disks by default, with optional tiered storage.
## Prerequisites
-Make sure you have the following:
-
-
+
- Access to the [Aiven Console](https://console.aiven.io)
-- An Aiven project to create the service in
+- An Aiven project where you can create services
-- [Aiven CLI](https://github.com/aiven/aiven-client#installation) installed
-- [A personal token](/docs/platform/howto/create_authentication_token)
+- Install the [Aiven CLI](https://github.com/aiven/aiven-client#installation)
+- Create an [API token](/docs/platform/howto/create_authentication_token)
@@ -64,66 +52,150 @@ Make sure you have the following:
-### Additional requirements for diskless topics
-
-To create a Kafka service with diskless topics, make sure that:
-
-- You have a [BYOC environment](/docs/platform/howto/byoc/create-cloud/create-custom-cloud)
- set up in your cloud account on AWS.
-- Diskless topics are enabled for your organization by Aiven. If the option does not
- appear in the Aiven Console, [contact Aiven support](https://aiven.io/contact).
+## Create an Inkless service on Aiven cloud
-## Create a Kafka service
+Inkless on Aiven cloud uses Aiven Kafka Units (AKUs) to size compute capacity. It runs
+Kafka 4.x and enables diskless topics and tiered storage by default.
-Create a Kafka service that stores topic data on local disks by default.
-
-
+
-1. In your project, click .
+1. In the [Aiven Console](https://console.aiven.io), open your project and
+ click .
1. Click **Create service**.
-1. Select **Aiven for Apache Kafka®**.
-1. In the **Optimize cost** section, keep diskless topics turned off to create a standard
- Kafka service.
+1. Select **Apache Kafka®**.
+1. Select **Inkless** as the service type.
+1. Select **Aiven cloud** as the deployment mode.
- :::tip
- To create a Kafka service with diskless topics instead, see
- [Create a Kafka service with diskless topics (BYOC)](#create-a-kafka-service-with-diskless-topics-byoc).
+ :::note
+ Inkless on Aiven cloud is available only on selected cloud providers.
+ If Inkless is not supported in the selected cloud or region, Classic Kafka is used instead.
:::
-1. Select a **Cloud**.
+1. Select a **cloud provider** and **region**.
+1. In **Stream load**, estimate the expected ingress and egress throughput.
+ This estimate is used to determine the initial number of AKUs and estimate costs, and
+ it can be adjusted later.
+1. In **Retention**, enter the data retention period.
+ Retention is used to estimate storage costs and can be adjusted after service creation.
+1. In **Service basics**, enter:
+ - **Name:** Enter a name for the service.
+ :::important
+ You cannot change the name after creation.
+ :::
+ - **Tags:** Optional. Add [resource tags](/docs/platform/howto/tag-resources) to
+ organize your services.
+1. Review the **Service summary**, and click **Create service**.
- :::note
- Available plans and pricing vary between cloud providers and regions.
- :::
+
+
-1. Select a **Plan**.
+Create an Inkless Kafka service using the Aiven CLI:
-1. Optional: Add [disk storage](/docs/platform/howto/add-storage-space).
- You can also enable [Tiered storage](/docs/products/kafka/howto/enable-kafka-tiered-storage)
- to offload older data automatically to object storage.
+```bash
+avn service create SERVICE_NAME \
+ --project PROJECT_NAME \
+ --service-type kafka \
+ --cloud CLOUD_REGION \
+ --plan INKLESS_PLAN
+```
-1. In the **Service basics** section, set the following:
- - **Service name:** Enter a name for the service.
+Parameters:
+
+- `SERVICE_NAME`: Name of the Kafka service.
+- `PROJECT_NAME`: Project that contains the service.
+- `CLOUD_REGION`: Cloud region to deploy the service in.
+- `INKLES_PLAN`: An Inkless Kafka plan available for the selected cloud and account. Plan
+ availability depends on the selected cloud provider and account.
+
+
+
+
+## Create an Inkless service on Bring your own cloud (BYOC)
+
+Inkless services can run in your cloud account through BYOC. Inkless on BYOC uses Kafka
+4.x and enables diskless topics and tiered storage by default.
+
+
+
+
+1. In the [Aiven Console](https://console.aiven.io), open your project and
+ click .
+1. Click **Create service**.
+1. Select **Apache Kafka®**.
+1. Select **Inkless** as the service type.
+1. Select **Bring your own cloud (BYOC)** as the deployment mode.
+1. In the Cloud section, choose your BYOC environment and region.
+1. Choose a **plan**.
+1. In **Service basics**, enter:
+ - **Name:** Enter a name for the service.
:::important
You cannot change the name after creation.
:::
- - **Version:** Select the Kafka version. The latest supported version appears by default.
- **Tags:** Optional. Add [resource tags](/docs/platform/howto/tag-resources) to
organize your services.
+1. Review the **Service summary**, and click **Create service**.
+
+
+
-1. Review the **Service summary**.
- Confirm the version, region, plan, and estimated price.
+Use the Aiven CLI to create the service.
-1. Click **Create service**.
+```bash
+avn service create SERVICE_NAME \
+ --project PROJECT_NAME \
+ --service-type kafka \
+ --cloud CUSTOM_CLOUD_REGION \
+ --plan INKLESS_PLAN
+```
+
+Parameters:
-The service status changes to **Rebuilding** during creation.
-When it changes to **Running**, your Kafka service is ready.
+- `SERVICE_NAME`: Name of the Kafka service.
+- `PROJECT_NAME`: Aiven project name.
+- `CUSTOM_CLOUD_REGION`: BYOC cloud region, such as `custom-aws-eu-central-1`.
+- `INKLESS_PLAN`: Inkless plan, for example `business-8-inkless`.
+
+
+
+
+## Create a Classic Kafka service on Aiven cloud
+
+Classic Kafka uses fixed plans and local broker storage. It stores topic data on local
+disks by default, with optional tiered storage.
+
+
+
+
+1. In the [Aiven Console](https://console.aiven.io), open your project and
+ click .
+1. Click **Create service**.
+1. Select **Apache Kafka®**.
+1. Select **Classic Kafka** as the service type.
+1. Select **Aiven cloud** as the deployment mode.
+1. In the **Cloud** section:
+
+ - Choose a **cloud provider**.
+ - Select a **region**.
+1. In the **Plan** section, choose a plan from the available plan groups.
+1. Optional:
+
+ - Add [disk storage](/docs/platform/howto/add-storage-space).
+ - Enable [Tiered storage](/docs/products/kafka/howto/enable-kafka-tiered-storage) if
+ supported for your plan and region.
+1. In **Service basics**, enter:
+
+ - **Name:** Name of the service.
+ - **Version:** Select the Kafka version. The latest supported version appears by
+ default.
+ - **Tags:** Optional. Add [resource tags](/docs/platform/howto/tag-resources) to
+ organize your services.
+1. Review the **Service summary**, then click **Create service**.
-Create a Kafka service using the Aiven CLI.
+Create a classic Kafka service using the Aiven CLI.
```bash
avn service create SERVICE_NAME \
@@ -134,16 +206,15 @@ avn service create SERVICE_NAME \
Parameters:
-- `SERVICE_NAME`: The name of the Kafka service
-- `CLOUD_REGION`: The cloud and region
-- `PLAN_NAME`: The plan name
+- `SERVICE_NAME`: Name of the Kafka service.
+- `CLOUD_REGION`: Cloud provider and region.
+- `PLAN_NAME`: Classic Kafka plan.
-Wait until the service status changes to **RUNNING**.
-Use Terraform to create a Kafka service in your Aiven project.
+Use Terraform to create a classic Kafka service in your Aiven project.
1. Create a file named `provider.tf` and add the following:
@@ -159,7 +230,8 @@ Use Terraform to create a Kafka service in your Aiven project.
1. Create the `terraform.tfvars` file and add the values for your token and project name.
-1. Optional: To output connection details, create a file named `output.tf` and add the following:
+1. Optional: To output connection details, create a file named `output.tf` and add the
+ following:
@@ -168,147 +240,71 @@ Use Terraform to create a Kafka service in your Aiven project.
-## Create a Kafka service with diskless topics (BYOC)
-
-Use [diskless topics](/docs/products/kafka/diskless/concepts/diskless-overview) to
-store Kafka data in cloud object storage instead of local disks.
-You can use both diskless and classic topics in the same Kafka cluster.
+## Create a Classic Kafka service on Bring your own cloud (BYOC)
-For instructions on setting up a BYOC environment, see
-[Create a custom cloud (BYOC)](/docs/platform/howto/byoc/create-cloud/create-custom-cloud).
+You can run Classic Kafka in your own cloud account using BYOC.
-
+
-1. In your project, click .
+1. In the [Aiven Console](https://console.aiven.io), open your project and
+ click .
1. Click **Create service**.
-1. Select **Aiven for Apache Kafka®**.
-1. Under **Optimize cost**, turn on **Enable diskless topics**.
-1. Under **Add service metadata**, set the following:
+1. Select **Apache Kafka®**.
+1. Select **Classic Kafka** as the service type.
+1. Select **Bring your own cloud (BYOC)** as the deployment mode.
+1. In the **Cloud** section:
+ - Select your **BYOC environment**.
+ - Select a **region**.
+1. In the **Plan** section, choose a plan from the available plan groups.
+1. Optional:
+ - Adjust **Additional disk storage**.
+ - Enable **Tiered storage** if supported for your plan and region.
+1. In **Service basics**, enter:
+ - **Name:** Name of the service.
- **Version:** Select the Kafka version. The latest supported version appears by
default.
- :::note
- Diskless topics require Apache Kafka® version 4.0 or later.
- :::
- - **Service name:** Enter a name for your service.
- :::important
- You cannot change the name after creation.
- :::
- **Tags:** Optional. Add [resource tags](/docs/platform/howto/tag-resources) to
organize your services.
-1. Select the **cloud provider**, **BYOC region**, and **plan**.
-1. Under **Select plan**, choose one of the plans available for diskless topics.
-1. Review the **Service summary** on the right.
- Confirm the version, region, plan, and estimated price.
-1. Click **Create service**.
+1. Review the **Service summary**, then click **Create service**.
-You can create a Kafka service with diskless topics enabled using the Aiven CLI.
+Use the Aiven CLI to create a Classic Kafka BYOC service.
```bash
avn service create SERVICE_NAME \
--project PROJECT_NAME \
--service-type kafka \
- --cloud CLOUD_NAME \
- --plan PLAN_NAME \
- -c kafka_version=4.0 \
- -c kafka_diskless.enabled=true
+ --cloud CUSTOM_CLOUD_REGION \
+ --plan PLAN_NAME
```
Parameters:
-- `SERVICE_NAME`: Name of your Kafka service.
-- `PROJECT_NAME`: Your Aiven project name.
-- `CLOUD_NAME`: Custom BYOC cloud region, for example `custom-aws-eu-central-1`.
-- `PLAN_NAME`: Diskless-compatible plan, such as `business-8-inkless`. Plans that support
- diskless topics include `-inkless` in the plan name.
-- `kafka_diskless.enabled`: Enables diskless topics. Must be set to `true`.
-
-
-
-
-You can create a Kafka service with diskless topics enabled using Terraform.
-
-1. Create a file named `main.tf` and add the following:
-
- ```hcl
- terraform {
- required_providers {
- aiven = {
- source = "aiven/aiven"
- version = ">=4.0.0, <5.0.0"
- }
- }
- }
-
- provider "aiven" {
- api_token = var.aiven_token
- }
-
- resource "aiven_kafka" "diskless_kafka" {
- project = var.aiven_project_name
- service_name = "kafka-diskless"
- cloud_name = "custom-aws-eu-central-1"
- plan = "business-8-inkless"
-
- kafka_user_config = {
- kafka_version = "4.0"
- kafka_diskless = {
- enabled = true
- }
- }
- }
- ```
-
-1. Create a `variables.tf` file:
-
- ```hcl
- variable "aiven_token" {
- description = "Aiven API token"
- type = string
- }
-
- variable "aiven_project_name" {
- description = "Your Aiven project name"
- type = string
- }
- ```
-
-1. Initialize and apply your configuration:
-
- ```hcl
- terraform init
- terraform apply --auto-approve
- ```
+- `CUSTOM_CLOUD_REGION`: Your BYOC region.
+- `PLAN_NAME`: Classic Kafka BYOC plan.
-### After service creation
-
-When you create a Kafka service with diskless topics, Aiven deploys it directly in your
-BYOC environment using your connected cloud account. The service runs entirely within
-your cloud account.
-
-Aiven configures the following:
+## After service creation
-- **Access to object storage** for storing Kafka topic data, either through an
- Aiven-managed or a customer-provided bucket, depending on your BYOC configuration.
-- **A PostgreSQL-based coordinator** managed as a service integration with Kafka.
- This coordinator maintains message ordering and metadata consistency for diskless topics.
- It is required for the current implementation of diskless topics. For details about
- how the coordinator is upgraded, see
- [PostgreSQL service upgrades](/docs/products/kafka/diskless/concepts/limitations#automatic-postgresql-service-upgrades).
+Inkless services require a metadata coordinator and object storage. Aiven provisions
+these components automatically.
-After creation, the **Kafka Diskless PostgreSQL** integration appears on the
- page in the Aiven Console. This integration is managed
-by Aiven and cannot be modified or deleted.
+Aiven configures:
-To learn more about how diskless topics work, see
-[Diskless topics overview](/docs/products/kafka/diskless/concepts/diskless-overview).
+- **Object storage access** for storing diskless topic data. Inkless uses an
+ Aiven-managed object storage bucket, which is created and managed for you.
+- **A PostgreSQL-based coordinator** that stores metadata for diskless topics. The
+ coordinator is provisioned automatically and linked to the Kafka service through a
+ managed integration. It maintains metadata such as batch offsets and storage locations.
+After creation, the **Kafka Inkless PostgreSQL** integration appears on
+the page in the Aiven Console. This integration
+is managed by Aiven and cannot be modified or removed.
diff --git a/sidebars.ts b/sidebars.ts
index afab225c9..b21186232 100644
--- a/sidebars.ts
+++ b/sidebars.ts
@@ -738,14 +738,6 @@ const sidebars: SidebarsConfig = {
type: 'doc',
},
items: [
- {
- type: 'category',
- label: 'Free tier',
- items: [
- 'products/kafka/free-tier/kafka-free-tier',
- 'products/kafka/free-tier/create-free-tier-kafka-service',
- ],
- },
'products/kafka/create-kafka-service',
{
type: 'category',
@@ -765,7 +757,16 @@ const sidebars: SidebarsConfig = {
items: [
{
type: 'category',
- label: 'Core concepts',
+ label: 'Inkless',
+ link: {type: 'doc', id: 'products/kafka/concepts/inkless'},
+ items: [
+ 'products/kafka/concepts/inkless-aku',
+ 'products/kafka/concepts/inkless-billing',
+ ],
+ },
+ {
+ type: 'category',
+ label: 'Kafka fundamentals',
items: [
'products/kafka/concepts/partition-segments',
'products/kafka/concepts/log-compaction',
@@ -775,18 +776,6 @@ const sidebars: SidebarsConfig = {
'products/kafka/concepts/kafka-rest-api',
],
},
- {
- type: 'category',
- label: 'Operating Kafka with Aiven',
- items: [
- 'products/kafka/concepts/upgrade-procedure',
- 'products/kafka/concepts/horizontal-vertical-scaling',
- 'products/kafka/concepts/configuration-backup',
- 'products/kafka/concepts/monitor-consumer-group',
- 'products/kafka/concepts/consumer-lag-predictor',
- 'products/kafka/concepts/follower-fetching',
- ],
- },
{
type: 'category',
label: 'Diskless topics',
@@ -815,12 +804,25 @@ const sidebars: SidebarsConfig = {
'products/kafka/concepts/tiered-storage-limitations',
],
},
+
'products/kafka/concepts/governance-overview',
'products/kafka/concepts/kafka-quotas',
'products/kafka/concepts/kraft-mode',
+
+ {
+ type: 'category',
+ label: 'Operate Kafka on Aiven', // label is fine!
+ items: [
+ 'products/kafka/concepts/upgrade-procedure',
+ 'products/kafka/concepts/horizontal-vertical-scaling',
+ 'products/kafka/concepts/configuration-backup',
+ 'products/kafka/concepts/monitor-consumer-group',
+ 'products/kafka/concepts/consumer-lag-predictor',
+ 'products/kafka/concepts/follower-fetching',
+ ],
+ },
],
},
-
{
type: 'category',
label: 'How to',
@@ -1700,7 +1702,6 @@ const sidebars: SidebarsConfig = {
'products/opensearch/howto/handle-low-disk-space',
'products/opensearch/howto/resolve-shards-too-large',
'products/opensearch/howto/setup-cross-cluster-replication-opensearch',
- 'products/opensearch/howto/enable-slow-query-log',
],
},
{
@@ -1720,8 +1721,8 @@ const sidebars: SidebarsConfig = {
label: 'Reference',
items: [
'products/opensearch/reference/plugins',
- 'products/opensearch/reference/list-of-plugins-for-each-version',
'products/opensearch/reference/advanced-params',
+
'products/opensearch/reference/restapi-limited-access',
'products/opensearch/reference/low-space-watermarks',
'products/opensearch/howto/os-metrics',
@@ -1962,7 +1963,6 @@ const sidebars: SidebarsConfig = {
'products/valkey/concepts/lua-scripts',
'products/valkey/concepts/memory-usage',
'products/valkey/concepts/read-replica',
- 'products/valkey/concepts/valkey-cluster',
],
},
{