From ea92b5fb3618a6edbaf9df2e899d81b2388772fe Mon Sep 17 00:00:00 2001
From: W1y1r <2730956796@qq.com>
Date: Wed, 19 Feb 2025 17:58:57 +0800
Subject: [PATCH] Deployment and Maintenance Module English PR
---
.../Cluster-Deployment_apache.md | 349 ++++++
.../Cluster-Deployment_timecho.md | 486 ++++----
.../Database-Resources.md | 102 +-
.../Deployment-form_apache.md | 52 +
.../Deployment-form_timecho.md | 63 +
.../Docker-Deployment_apache.md | 154 +--
.../Docker-Deployment_timecho.md | 189 ++-
.../Dual-Active-Deployment_timecho.md | 168 +--
.../Environment-Requirements.md | 339 +++---
.../IoTDB-Package_apache.md | 47 +
.../IoTDB-Package_timecho.md | 44 +-
.../Monitoring-panel-deployment.md | 1074 +++++++++--------
.../Stand-Alone-Deployment_apache.md | 192 +++
.../Stand-Alone-Deployment_timecho.md | 326 +++--
.../Cluster-Deployment_apache.md | 2 +-
.../Cluster-Deployment_timecho.md | 5 +-
.../IoTDB-Package_apache.md | 41 +-
.../IoTDB-Package_timecho.md | 44 +-
.../Cluster-Deployment_apache.md | 349 ++++++
.../Cluster-Deployment_timecho.md | 487 ++++----
.../Database-Resources.md | 98 +-
.../Deployment-form_apache.md | 52 +
.../Deployment-form_timecho.md | 63 +
.../Docker-Deployment_apache.md | 154 +--
.../Docker-Deployment_timecho.md | 189 ++-
.../Dual-Active-Deployment_timecho.md | 168 +--
.../Environment-Requirements.md | 339 +++---
.../IoTDB-Package_apache.md | 47 +
.../IoTDB-Package_timecho.md | 44 +-
.../Monitoring-panel-deployment.md | 1074 +++++++++--------
.../Stand-Alone-Deployment_apache.md | 192 +++
.../Stand-Alone-Deployment_timecho.md | 326 +++--
.../Cluster-Deployment_apache.md | 2 +-
.../Cluster-Deployment_timecho.md | 5 +-
.../IoTDB-Package_apache.md | 41 +-
.../IoTDB-Package_timecho.md | 44 +-
.../Cluster-Deployment_apache.md | 13 +-
.../IoTDB-Package_apache.md | 20 +-
.../Stand-Alone-Deployment_apache.md | 9 +-
.../IoTDB-Package_apache.md | 20 +-
.../IoTDB-Package_timecho.md | 22 +-
.../Cluster-Deployment_apache.md | 13 +-
.../IoTDB-Package_apache.md | 20 +-
.../Stand-Alone-Deployment_apache.md | 9 +-
.../IoTDB-Package_apache.md | 20 +-
.../IoTDB-Package_timecho.md | 22 +-
46 files changed, 4492 insertions(+), 3027 deletions(-)
create mode 100644 src/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
create mode 100644 src/UserGuide/Master/Table/Deployment-and-Maintenance/Deployment-form_apache.md
create mode 100644 src/UserGuide/Master/Table/Deployment-and-Maintenance/Deployment-form_timecho.md
create mode 100644 src/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
create mode 100644 src/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
create mode 100644 src/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
create mode 100644 src/UserGuide/latest-Table/Deployment-and-Maintenance/Deployment-form_apache.md
create mode 100644 src/UserGuide/latest-Table/Deployment-and-Maintenance/Deployment-form_timecho.md
create mode 100644 src/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
create mode 100644 src/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
new file mode 100644
index 000000000..0214e7996
--- /dev/null
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
@@ -0,0 +1,349 @@
+
+# Cluster Deployment
+
+This guide describes how to manually deploy a cluster instance consisting of 3 ConfigNodes and 3 DataNodes (commonly referred to as a 3C3D cluster).
+
+
+

+
+
+
+## 1 Prerequisites
+
+1. **System Preparation**: Ensure the system has been configured according to the [System Requirements](../Deployment-and-Maintenance/Environment-Requirements.md).
+
+2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Set the hostname by editing the `/etc/hosts` file. For example, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, run:
+
+```Bash
+echo "192.168.1.3 iotdb-1" >> /etc/hosts
+```
+
+Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration.
+
+3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the [Parameter Configuration](#22-parameters-configuration) section.
+
+
+1. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues.
+2. **User Permissions**: Choose one of the following permissions during installation and deployment:
+ - **Root User (Recommended)**: This avoids permission-related issues.
+ - **Non-Root User**:
+ - Use the same user for all operations, including starting, activating, and stopping services.
+ - Avoid using `sudo`, which can cause permission conflicts.
+
+6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the [Monitoring Board Install and Deploy](../Deployment-and-Maintenance/Monitoring-panel-deployment.md).
+
+## 2 Preparation
+
+1. Prepare the IoTDB database installation package::apache-iotdb-{version}-all-bin.zip(Please refer to the installation package for details:[IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_apache.md))
+
+2. Configure the operating system environment according to environmental requirements (system environment configuration can be found in:[Environment Requirement](../Deployment-and-Maintenance/Environment-Requirements.md))
+
+
+## 3 Installation Steps
+
+Taking a cluster with three Linux servers with the following information as example:
+
+| Node IP | Hostname | Services |
+| ------------- | -------- | -------------------- |
+| 11.101.17.224 | iotdb-1 | ConfigNode, DataNode |
+| 11.101.17.225 | iotdb-2 | ConfigNode, DataNode |
+| 11.101.17.226 | iotdb-3 | ConfigNode, DataNode |
+
+### 3.1 Configure Hostnames
+
+On all three servers, configure the hostnames by editing the `/etc/hosts` file. Use the following commands:
+
+```Bash
+echo "11.101.17.224 iotdb-1" >> /etc/hosts
+echo "11.101.17.225 iotdb-2" >> /etc/hosts
+echo "11.101.17.226 iotdb-3" >> /etc/hosts
+```
+
+### 3.2 Extract Installation Package
+
+Unzip the installation package and navigate to the directory:
+
+```Plain
+unzip apache-iotdb-{version}-all-bin.zip
+cd apache-iotdb-{version}-all-bin
+```
+### 3.3 Parameters Configuration
+
+#### 3.3.1 Memory Configuration
+
+Edit the following files for memory allocation:
+
+- **ConfigNode**: `./conf/confignode-env.sh` (or `.bat` for Windows)
+- **DataNode**: `./conf/datanode-env.sh` (or `.bat` for Windows)
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------ | :--------------------------------- | :---------- | :-------------- | :-------------------------------------- |
+| MEMORY_SIZE | Total memory allocated to the node | Empty | As needed | Effective after restarting the service. |
+
+#### 3.3.2 General Configuration
+
+Set the following parameters in `./conf/iotdb-system.properties`. Refer to `./conf/iotdb-system.properties.template` for a complete list.
+
+**Cluster-Level Parameters**:
+
+| **Parameter** | **Description** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** |
+| :------------------------ | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- |
+| cluster_name | Name of the cluster | defaultCluster | defaultCluster | defaultCluster |
+| schema_replication_factor | Metadata replication factor; DataNode count shall not be fewer than this value | 3 | 3 | 3 |
+| data_replication_factor | Data replication factor; DataNode count shall not be fewer than this value | 2 | 2 | 2 |
+
+**ConfigNode Parameters**:
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** |
+| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- |
+| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. |
+| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | 10710 | 10710 | 10710 | This parameter cannot be modified after the first startup. |
+| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | 10720 | 10720 | 10720 | This parameter cannot be modified after the first startup. |
+| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address and port of the seed ConfigNode (e.g., `cn_internal_address:cn_internal_port`) | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. |
+
+**DataNode Parameters**:
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** |
+| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- |
+| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. |
+| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | 6667 | 6667 | 6667 | Effective after restarting the service. |
+| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. |
+| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | 10730 | 10730 | 10730 | This parameter cannot be modified after the first startup. |
+| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | 10740 | 10740 | 10740 | This parameter cannot be modified after the first startup. |
+| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | 10750 | 10750 | 10750 | This parameter cannot be modified after the first startup. |
+| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | 10760 | 10760 | 10760 | This parameter cannot be modified after the first startup. |
+| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster.(e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address of the first ConfigNode | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. |
+
+**Note:** Ensure files are saved after editing. Tools like VSCode Remote do not save changes automatically.
+
+### 3.4 Start ConfigNode Instances
+
+1. Start the first ConfigNode (`iotdb-1`) as the seed node
+
+```Bash
+ cd sbin
+ ./start-confignode.sh -d # The "-d" flag starts the process in the background.
+ ```
+
+2. Start the remaining ConfigNodes (`iotdb-2` and `iotdb-3`) in sequence.
+
+If the startup fails, refer to the [Common Issues](#5-common-issues) section below for troubleshooting.
+
+### 3.5 Start DataNode Instances
+
+On each server, navigate to the `sbin` directory and start the DataNode:
+
+```Bash
+ cd sbin
+ ./start-datanode.sh -d # The "-d" flag starts the process in the background.
+ ```
+
+### 3.6 Verify Activation
+
+Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated.
+
+## 4 Maintenance
+
+### 4.1 ConfigNode Maintenance
+
+ConfigNode maintenance includes adding and removing ConfigNodes. Common use cases include:
+
+- **Cluster Expansion:** If the cluster contains only 1 ConfigNode, adding 2 more ConfigNodes enhances high availability, resulting in a total of 3 ConfigNodes.
+- **Cluster Fault Recovery:** If a ConfigNode's machine fails and it cannot function normally, remove the faulty ConfigNode and add a new one to the cluster.
+
+**Note:** After completing ConfigNode maintenance, ensure that the cluster contains either 1 or 3 active ConfigNodes. Two ConfigNodes do not provide high availability, and more than three ConfigNodes can degrade performance.
+
+#### 4.1.1 Adding a ConfigNode
+
+**Linux / MacOS :**
+
+```Plain
+sbin/start-confignode.sh
+```
+
+**Windows:**
+
+```Plain
+sbin/start-confignode.bat
+```
+
+#### 4.1.2 Removing a ConfigNode
+
+1. Connect to the cluster using the CLI and confirm the internal address and port of the ConfigNode to be removed:
+
+```Plain
+show confignodes;
+```
+
+Example output:
+
+```Plain
+IoTDB> show confignodes
++------+-------+---------------+------------+--------+
+|NodeID| Status|InternalAddress|InternalPort| Role|
++------+-------+---------------+------------+--------+
+| 0|Running| 127.0.0.1| 10710| Leader|
+| 1|Running| 127.0.0.1| 10711|Follower|
+| 2|Running| 127.0.0.1| 10712|Follower|
++------+-------+---------------+------------+--------+
+Total line number = 3
+It costs 0.030s
+```
+
+2. Remove the ConfigNode using the script:
+
+**Linux / MacOS:**
+
+```Bash
+sbin/remove-confignode.sh [confignode_id]
+# Or:
+sbin/remove-confignode.sh [cn_internal_address:cn_internal_port]
+```
+
+**Windows:**
+
+```Bash
+sbin/remove-confignode.bat [confignode_id]
+# Or:
+sbin/remove-confignode.bat [cn_internal_address:cn_internal_port]
+```
+
+### 4.2 DataNode Maintenance
+
+DataNode maintenance includes adding and removing DataNodes. Common use cases include:
+
+- **Cluster Expansion:** Add new DataNodes to increase cluster capacity.
+- **Cluster Fault Recovery:** If a DataNode's machine fails and it cannot function normally, remove the faulty DataNode and add a new one to the cluster.
+
+**Note:** During and after DataNode maintenance, ensure that the number of active DataNodes is not fewer than the data replication factor (usually 2) or the schema replication factor (usually 3).
+
+#### 4.2.1 Adding a DataNode
+
+**Linux / MacOS:**
+
+```Plain
+sbin/start-datanode.sh
+```
+
+**Windows:**
+
+```Plain
+sbin/start-datanode.bat
+```
+
+**Note:** After adding a DataNode, the cluster load will gradually balance across all nodes as new writes arrive and old data expires (if TTL is set).
+
+#### 4.2.2 Removing a DataNode
+
+1. Connect to the cluster using the CLI and confirm the RPC address and port of the DataNode to be removed:
+
+```Plain
+show datanodes;
+```
+
+Example output:
+
+```Plain
+IoTDB> show datanodes
++------+-------+----------+-------+-------------+---------------+
+|NodeID| Status|RpcAddress|RpcPort|DataRegionNum|SchemaRegionNum|
++------+-------+----------+-------+-------------+---------------+
+| 1|Running| 0.0.0.0| 6667| 0| 0|
+| 2|Running| 0.0.0.0| 6668| 1| 1|
+| 3|Running| 0.0.0.0| 6669| 1| 0|
++------+-------+----------+-------+-------------+---------------+
+Total line number = 3
+It costs 0.110s
+```
+
+2. Remove the DataNode using the script:
+
+**Linux / MacOS:**
+
+```Bash
+sbin/remove-datanode.sh [dn_rpc_address:dn_rpc_port]
+```
+
+**Windows:**
+
+```Bash
+sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port]
+```
+
+## 5 Common Issues
+
+1. ConfigNode Fails to Start
+ 1. Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed.
+ 2. Check the logs for any other errors. If unresolved, contact technical support for assistance.
+ 3. If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps:
+
+ **Clean the Environment**
+
+ 1. Stop all ConfigNode and DataNode processes:
+ ```Bash
+ sbin/stop-standalone.sh
+ ```
+
+ 2. Check for any remaining processes:
+ ```Bash
+ jps
+ # or
+ ps -ef | grep iotdb
+ ```
+
+ 3. If processes remain, terminate them manually:
+ ```Bash
+ kill -9
+
+ #For systems with a single IoTDB instance, you can clean up residual processes with:
+ ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9
+ ```
+
+ 4. Delete the `data` and `logs` directories:
+ ```Bash
+ cd /data/iotdb
+ rm -rf data logs
+ ```
+
+## 6 Appendix
+
+### 6.1 ConfigNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :---------------------------------------------------------- | :------- |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+
+### 6.2 DataNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :----------------------------------------------------------- | :------- |
+| -v | Displays version information. | No |
+| -f | Runs the script in the foreground without backgrounding it. | No |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+| -p | Specifies a file to store the process ID for process management. | No |
+| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No |
+| -g | Prints detailed garbage collection (GC) information. | No |
+| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No |
+| -E | Specifies the file for JVM error logs. | No |
+| -D | Defines system properties in the format `key=value`. | No |
+| -X | Passes `-XX` options directly to the JVM. | No |
+| -h | Displays the help instructions. | No |
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
index 2b7445540..306420109 100644
--- a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
@@ -20,54 +20,56 @@
-->
# Cluster Deployment
-This section describes how to manually deploy an instance that includes 3 ConfigNodes and 3 DataNodes, commonly known as a 3C3D cluster.
+This guide describes how to manually deploy a cluster instance consisting of 3 ConfigNodes and 3 DataNodes (commonly referred to as a 3C3D cluster).
-## Note
+## 1 Prerequisites
-1. Before installation, ensure that the system is complete by referring to [System Requirements](./Environment-Requirements.md)
+1. **System Preparation**: Ensure the system has been configured according to the [System Requirements](../Deployment-and-Maintenance/Environment-Requirements.md).
-2. It is recommended to prioritize using `hostname` for IP configuration during deployment, which can avoid the problem of modifying the host IP in the later stage and causing the database to fail to start. To set the host name, you need to configure /etc/hosts on the target server. For example, if the local IP is 192.168.1.3 and the host name is iotdb-1, you can use the following command to set the server's host name and configure the `cn_internal_address` and `dn_internal_address` of IoTDB using the host name.
+2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Set the hostname by editing the `/etc/hosts` file. For example, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, run:
- ``` shell
- echo "192.168.1.3 iotdb-1" >> /etc/hosts
- ```
+```Bash
+echo "192.168.1.3 iotdb-1" >> /etc/hosts
+```
+
+Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration.
-3. Some parameters cannot be modified after the first startup. Please refer to the "Parameter Configuration" section below for settings.
+3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the [Parameter Configuration](#22-parameters-configuration) section.
-4. Whether in linux or windows, ensure that the IoTDB installation path does not contain Spaces and Chinese characters to avoid software exceptions.
-5. Please note that when installing and deploying IoTDB (including activating and using software), it is necessary to use the same user for operations. You can:
+1. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues.
+2. **User Permissions**: Choose one of the following permissions during installation and deployment:
+ - **Root User (Recommended)**: This avoids permission-related issues.
+ - **Non-Root User**:
+ - Use the same user for all operations, including starting, activating, and stopping services.
+ - Avoid using `sudo`, which can cause permission conflicts.
-- Using root user (recommended): Using root user can avoid issues such as permissions.
-- Using a fixed non root user:
- - Using the same user operation: Ensure that the same user is used for start, activation, stop, and other operations, and do not switch users.
- - Avoid using sudo: Try to avoid using sudo commands as they execute commands with root privileges, which may cause confusion or security issues.
+6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the [Monitoring Board Install and Deploy](../Deployment-and-Maintenance/Monitoring-panel-deployment.md).
-6. It is recommended to deploy a monitoring panel, which can monitor important operational indicators and keep track of database operation status at any time. The monitoring panel can be obtained by contacting the business department,The steps for deploying a monitoring panel can refer to:[Monitoring Panel Deployment](./Monitoring-panel-deployment.md)
+## 2 Preparation
-## Preparation Steps
+1. Obtain the TimechoDB installation package: `timechodb-{version}-bin.zip` following [IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_timecho.md))
-1. Prepare the IoTDB database installation package: timechodb-{version}-bin.zip(The installation package can be obtained from:[IoTDB-Package](./IoTDB-Package_timecho.md))
-2. Configure the operating system environment according to environmental requirements(The system environment configuration can be found in:[Environment Requirement](./Environment-Requirements.md))
+2. Configure the operating system environment according to [Environment Requirement](../Deployment-and-Maintenance/Environment-Requirements.md))
-## Installation Steps
+## 3 Installation Steps
-Assuming there are three Linux servers now, the IP addresses and service roles are assigned as follows:
+Taking a cluster with three Linux servers with the following information as example:
-| Node IP | Host Name | Service |
-| ------------- | --------- | -------------------- |
-| 11.101.17.224 | iotdb-1 | ConfigNode、DataNode |
-| 11.101.17.225 | iotdb-2 | ConfigNode、DataNode |
-| 11.101.17.226 | iotdb-3 | ConfigNode、DataNode |
+| Node IP | Hostname | Services |
+| ------------- | -------- | -------------------- |
+| 11.101.17.224 | iotdb-1 | ConfigNode, DataNode |
+| 11.101.17.225 | iotdb-2 | ConfigNode, DataNode |
+| 11.101.17.226 | iotdb-3 | ConfigNode, DataNode |
-### Set Host Name
+### 3.1 Configure Hostnames
-On three machines, configure the host names separately. To set the host names, configure `/etc/hosts` on the target server. Use the following command:
+On all three servers, configure the hostnames by editing the `/etc/hosts` file. Use the following commands:
```Bash
echo "11.101.17.224 iotdb-1" >> /etc/hosts
@@ -75,185 +77,182 @@ echo "11.101.17.225 iotdb-2" >> /etc/hosts
echo "11.101.17.226 iotdb-3" >> /etc/hosts
```
-### Configuration
+### 3.2 Extract Installation Package
-Unzip the installation package and enter the installation directory
+Unzip the installation package and navigate to the directory:
```Plain
-unzip timechodb-{version}-bin.zip
-cd timechodb-{version}-bin
+unzip timechodb-{version}-bin.zip
+cd timechodb-{version}-bin
```
-#### Environment script configuration
-
-- `./conf/confignode-env.sh` configuration
-
- | **Configuration** | **Description** | **Default** | **Recommended value** | **Note** |
- | :---------------- | :----------------------------------------------------------- | :---------- | :----------------------------------------------------------- | :---------------------------------- |
- | MEMORY_SIZE | The total amount of memory that IoTDB ConfigNode nodes can use | - | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect |
+### 3.3 Parameters Configuration
-- `./conf/datanode-env.sh` configuration
+#### 3.3.1 Memory Configuration
- | **Configuration** | **Description** | **Default** | **Recommended value** | **Note** |
- | :---------------- | :----------------------------------------------------------- | :---------- | :----------------------------------------------------------- | :---------------------------------- |
- | MEMORY_SIZE | The total amount of memory that IoTDB DataNode nodes can use | - | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect |
+Edit the following files for memory allocation:
-#### General Configuration(./conf/iotdb-system.properties)
+- **ConfigNode**: `./conf/confignode-env.sh` (or `.bat` for Windows)
+- **DataNode**: `./conf/datanode-env.sh` (or `.bat` for Windows)
-- Cluster Configuration
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------ | :--------------------------------- | :---------- | :-------------- | :-------------------------------------- |
+| MEMORY_SIZE | Total memory allocated to the node | Empty | As needed | Effective after restarting the service. |
- | **Configuration** | **Description** | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 |
- | ------------------------- | ------------------------------------------------------------ | -------------- | -------------- | -------------- |
- | cluster_name | Cluster Name | defaultCluster | defaultCluster | defaultCluster |
- | schema_replication_factor | The number of metadata replicas, the number of DataNodes should not be less than this number | 3 | 3 | 3 |
- | data_replication_factor | The number of data replicas should not be less than this number of DataNodes | 2 | 2 | 2 |
+#### 3.3.2 General Configuration
-#### ConfigNode Configuration
+Set the following parameters in `./conf/iotdb-system.properties`. Refer to `./conf/iotdb-system.properties.template` for a complete list.
-| **Configuration** | **Description** | **Default** | **Recommended value** | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 | Note |
-| ------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------------ | ------------- | ------------- | ------------- | ---------------------------------------- |
-| cn_internal_address | The address used by ConfigNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | iotdb-1 | iotdb-2 | iotdb-3 | Cannot be modified after initial startup |
-| cn_internal_port | The port used by ConfigNode for communication within the cluster | 10710 | 10710 | 10710 | 10710 | 10710 | Cannot be modified after initial startup |
-| cn_consensus_port | The port used for ConfigNode replica group consensus protocol communication | 10720 | 10720 | 10720 | 10720 | 10720 | Cannot be modified after initial startup |
-| cn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, `cn_internal_address:cn_internal_port` | 127.0.0.1:10710 | The first CongfigNode's `cn_internal-address: cn_internal_port` | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | Cannot be modified after initial startup |
+**Cluster-Level Parameters**:
-#### Datanode Configuration
+| **Parameter** | **Description** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** |
+| :------------------------ | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- |
+| cluster_name | Name of the cluster | defaultCluster | defaultCluster | defaultCluster |
+| schema_replication_factor | Metadata replication factor; DataNode count shall not be fewer than this value | 3 | 3 | 3 |
+| data_replication_factor | Data replication factor; DataNode count shall not be fewer than this value | 2 | 2 | 2 |
-| **Configuration** | **Description** | **Default** | **Recommended value** | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 | Note |
-| ------------------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------------ | ------------- | ------------- | ------------- | ---------------------------------------- |
-| dn_rpc_address | The address of the client RPC service | 127.0.0.1 | Recommend using the **IPV4 address or hostname** of the server where it is located | iotdb-1 | iotdb-2 | iotdb-3 | Restarting the service takes effect |
-| dn_rpc_port | The port of the client RPC service | 6667 | 6667 | 6667 | 6667 | 6667 | Restarting the service takes effect |
-| dn_internal_address | The address used by DataNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | iotdb-1 | iotdb-2 | iotdb-3 | Cannot be modified after initial startup |
-| dn_internal_port | The port used by DataNode for communication within the cluster | 10730 | 10730 | 10730 | 10730 | 10730 | Cannot be modified after initial startup |
-| dn_mpp_data_exchange_port | The port used by DataNode to receive data streams | 10740 | 10740 | 10740 | 10740 | 10740 | Cannot be modified after initial startup |
-| dn_data_region_consensus_port | The port used by DataNode for data replica consensus protocol communication | 10750 | 10750 | 10750 | 10750 | 10750 | Cannot be modified after initial startup |
-| dn_schema_region_consensus_port | The port used by DataNode for metadata replica consensus protocol communication | 10760 | 10760 | 10760 | 10760 | 10760 | Cannot be modified after initial startup |
-| dn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, i.e. `cn_internal-address: cn_internal_port` | 127.0.0.1:10710 | The first CongfigNode's cn_internal-address: cn_internal_port | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | Cannot be modified after initial startup |
+**ConfigNode Parameters**:
-> ❗️Attention: Editors such as VSCode Remote do not have automatic configuration saving function. Please ensure that the modified files are saved persistently, otherwise the configuration items will not take effect
+| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** |
+| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- |
+| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. |
+| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | 10710 | 10710 | 10710 | This parameter cannot be modified after the first startup. |
+| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | 10720 | 10720 | 10720 | This parameter cannot be modified after the first startup. |
+| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address and port of the seed ConfigNode (e.g., `cn_internal_address:cn_internal_port`) | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. |
-### Start ConfigNode
+**DataNode Parameters**:
-Start the first confignode of IoTDB-1 first, ensuring that the seed confignode node starts first, and then start the second and third confignode nodes in sequence
-
-```Bash
-cd sbin
-
-./start-confignode.sh -d #"- d" parameter will start in the background
-```
+| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** |
+| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- |
+| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. |
+| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | 6667 | 6667 | 6667 | Effective after restarting the service. |
+| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. |
+| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | 10730 | 10730 | 10730 | This parameter cannot be modified after the first startup. |
+| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | 10740 | 10740 | 10740 | This parameter cannot be modified after the first startup. |
+| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | 10750 | 10750 | 10750 | This parameter cannot be modified after the first startup. |
+| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | 10760 | 10760 | 10760 | This parameter cannot be modified after the first startup. |
+| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster.(e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address of the first ConfigNode | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. |
-If the startup fails, please refer to [Common Questions](#common-questions).
+**Note:** Ensure files are saved after editing. Tools like VSCode Remote do not save changes automatically.
-### Start DataNode
+### 3.4 Start ConfigNode Instances
- Enter the `sbin` directory of iotdb and start three datanode nodes in sequence:
+1. Start the first ConfigNode (`iotdb-1`) as the seed node
-```Go
-cd sbin
+```Bash
+ cd sbin
+ ./start-confignode.sh -d # The "-d" flag starts the process in the background.
+ ```
-./start-datanode.sh -d #"- d" parameter will start in the background
-```
+2. Start the remaining ConfigNodes (`iotdb-2` and `iotdb-3`) in sequence.
-### Activate Database
+If the startup fails, refer to the [Common Issues](#5-common-issues) section below for troubleshooting.
-#### Method 1: Activate file copy activation
+### 3.5 Start DataNode Instances
-- After starting three Confignode Datanode nodes in sequence, copy the `activation` folder of each machine and the `system_info` file of each machine to the Timecho staff;
+On each server, navigate to the `sbin` directory and start the DataNode:
-- The staff will return the license files for each ConfigNode Datanode node, where 3 license files will be returned;
+```Bash
+ cd sbin
+ ./start-datanode.sh -d # The "-d" flag starts the process in the background.
+ ```
-- Put the three license files into the `activation` folder of the corresponding ConfigNode node;
+### 3.6 Activate the Database
-#### Method 2: Activate Script Activation
+#### Option 1: File-Based Activation
-- Retrieve the machine codes of 3 machines in sequence and enter IoTDB CLI
+1. Start all ConfigNodes and DataNodes.
+2. Copy the `system_info` file from the `activation` directory on each server and send them to the Timecho team.
+3. Place the license files provided by the Timecho team into the corresponding `activation` folder for each node.
- - Table Model CLI Enter Command:
+#### Option 2: Command-Based Activation
- ```SQL
- # Linux or MACOS
- ./start-cli.sh -sql_dialect table
-
- # windows
- ./start-cli.bat -sql_dialect table
- ```
+1. Enter the IoTDB CLI for each node:
+ - **For Table Model**:
+ ```SQL
+ # For Linux or macOS
+ ./start-cli.sh -sql_dialect table
+
+ # For Windows
+ ./start-cli.bat -sql_dialect table
+ ```
- - Enter the tree model CLI command:
+ - **For Tree Model**:
+ ```SQL
+ # For Linux or macOS
+ ./start-cli.sh
+
+ # For Windows
+ ./start-cli.bat
+ ```
+2. Run the following command to retrieve the machine code required for activation:
- ```SQL
- # Linux or MACOS
- ./start-cli.sh
-
- # windows
- ./start-cli.bat
- ```
+ ```Bash
+ show system info
+ ```
- - Execute the following to obtain the machine code required for activation:
- - Note: Currently, activation is only supported in tree models
+**Note**: Activation is currently supported only in the Tree Model.
- ```Bash
- show system info
- ```
+3. Copy the returned machine code of each server (displayed as a green string) and send it to the Timecho team:
- - The following information is displayed, which shows the machine code of one machine:
-
- ```Bash
- +--------------------------------------------------------------+
- | SystemInfo|
- +--------------------------------------------------------------+
- |01-TE5NLES4-UDDWCMYE,01-GG5NLES4-XXDWCMYE,01-FF5NLES4-WWWWCMYE|
- +--------------------------------------------------------------+
- Total line number = 1
- It costs 0.030s
- ```
-
-- The other two nodes enter the CLI of the IoTDB tree model in sequence, execute the statement, and copy the machine codes of the three machines obtained to the Timecho staff
-
-- The staff will return three activation codes, which normally correspond to the order of the three machine codes provided. Please paste each activation code into the CLI separately, as prompted below:
+```Bash
++--------------------------------------------------------------+
+| SystemInfo|
++--------------------------------------------------------------+
+|01-TE5NLES4-UDDWCMYE,01-GG5NLES4-XXDWCMYE,01-FF5NLES4-WWWWCMYE|
++--------------------------------------------------------------+
+Total line number = 1
+It costs 0.030s
+```
- - Note: The activation code needs to be marked with a `'`symbol before and after, as shown in
+4. Enter the activation codes provided by the Timecho team in the CLI in sequence using the following format. Wrap the activation code in single quotes ('):
- ```Bash
- IoTDB> activate '01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA==='
- ```
+```Bash
+IoTDB> activate '01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA==='
+```
-### Verify Activation
+### 3.7 Verify Activation
-When the status of the 'Result' field is displayed as' success', it indicates successful activation
+Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated.

-## Node Maintenance Steps
-
-### ConfigNode Node Maintenance
+## 4 Maintenance
-ConfigNode node maintenance is divided into two types of operations: adding and removing ConfigNodes, with two common use cases:
+### 4.1 ConfigNode Maintenance
-- Cluster expansion: For example, when there is only one ConfigNode in the cluster, and you want to increase the high availability of ConfigNode nodes, you can add two ConfigNodes, making a total of three ConfigNodes in the cluster.
+ConfigNode maintenance includes adding and removing ConfigNodes. Common use cases include:
-- Cluster failure recovery: When the machine where a ConfigNode is located fails, making the ConfigNode unable to run normally, you can remove this ConfigNode and then add a new ConfigNode to the cluster.
+- **Cluster Expansion:** If the cluster contains only 1 ConfigNode, adding 2 more ConfigNodes enhances high availability, resulting in a total of 3 ConfigNodes.
+- **Cluster Fault Recovery:** If a ConfigNode's machine fails and it cannot function normally, remove the faulty ConfigNode and add a new one to the cluster.
-> ❗️Note, after completing ConfigNode node maintenance, you need to ensure that there are 1 or 3 ConfigNodes running normally in the cluster. Two ConfigNodes do not have high availability, and more than three ConfigNodes will lead to performance loss.
+**Note:** After completing ConfigNode maintenance, ensure that the cluster contains either 1 or 3 active ConfigNodes. Two ConfigNodes do not provide high availability, and more than three ConfigNodes can degrade performance.
-#### Adding ConfigNode Nodes
+#### 4.1.1 Adding a ConfigNode
-Script command:
+**Linux / MacOS :**
-```shell
-# Linux / MacOS
-# First switch to the IoTDB root directory
+```Plain
sbin/start-confignode.sh
+```
-# Windows
-# First switch to the IoTDB root directory
+**Windows:**
+
+```Plain
sbin/start-confignode.bat
```
-#### Removing ConfigNode Nodes
+#### 4.1.2 Removing a ConfigNode
-First connect to the cluster through the CLI and confirm the internal address and port number of the ConfigNode you want to remove by using `show confignodes`:
+1. Connect to the cluster using the CLI and confirm the internal address and port of the ConfigNode to be removed:
-```Bash
+```Plain
+show confignodes;
+```
+
+Example output:
+
+```Plain
IoTDB> show confignodes
+------+-------+---------------+------------+--------+
|NodeID| Status|InternalAddress|InternalPort| Role|
@@ -266,48 +265,60 @@ Total line number = 3
It costs 0.030s
```
-Then use the script to remove the DataNode. Script command:
+2. Remove the ConfigNode using the script:
+
+**Linux / MacOS:**
```Bash
-# Linux / MacOS
sbin/remove-confignode.sh [confignode_id]
+# Or:
+sbin/remove-confignode.sh [cn_internal_address:cn_internal_port]
+```
-#Windows
-sbin/remove-confignode.bat [confignode_id]
+**Windows:**
+```Bash
+sbin/remove-confignode.bat [confignode_id]
+# Or:
+sbin/remove-confignode.bat [cn_internal_address:cn_internal_port]
```
-### DataNode Node Maintenance
-
-There are two common scenarios for DataNode node maintenance:
+### 4.2 DataNode Maintenance
-- Cluster expansion: For the purpose of expanding cluster capabilities, add new DataNodes to the cluster
+DataNode maintenance includes adding and removing DataNodes. Common use cases include:
-- Cluster failure recovery: When a machine where a DataNode is located fails, making the DataNode unable to run normally, you can remove this DataNode and add a new DataNode to the cluster
+- **Cluster Expansion:** Add new DataNodes to increase cluster capacity.
+- **Cluster Fault Recovery:** If a DataNode's machine fails and it cannot function normally, remove the faulty DataNode and add a new one to the cluster.
-> ❗️Note, in order for the cluster to work normally, during the process of DataNode node maintenance and after the maintenance is completed, the total number of DataNodes running normally should not be less than the number of data replicas (usually 2), nor less than the number of metadata replicas (usually 3).
+**Note:** During and after DataNode maintenance, ensure that the number of active DataNodes is not fewer than the data replication factor (usually 2) or the schema replication factor (usually 3).
-#### Adding DataNode Nodes
+#### 4.2.1 Adding a DataNode
-Script command:
+**Linux / MacOS:**
-```Bash
-# Linux / MacOS
-# First switch to the IoTDB root directory
+```Plain
sbin/start-datanode.sh
+```
+
+**Windows:**
-# Windows
-# First switch to the IoTDB root directory
+```Plain
sbin/start-datanode.bat
```
-Note: After adding a DataNode, as new writes arrive (and old data expires, if TTL is set), the cluster load will gradually balance towards the new DataNode, eventually achieving a balance of storage and computation resources on all nodes.
+**Note:** After adding a DataNode, the cluster load will gradually balance across all nodes as new writes arrive and old data expires (if TTL is set).
-#### Removing DataNode Nodes
+#### 4.2.2 Removing a DataNode
-First connect to the cluster through the CLI and confirm the RPC address and port number of the DataNode you want to remove with `show datanodes`:
+1. Connect to the cluster using the CLI and confirm the RPC address and port of the DataNode to be removed:
-```Bash
+```Plain
+show datanodes;
+```
+
+Example output:
+
+```Plain
IoTDB> show datanodes
+------+-------+----------+-------+-------------+---------------+
|NodeID| Status|RpcAddress|RpcPort|DataRegionNum|SchemaRegionNum|
@@ -320,80 +331,79 @@ Total line number = 3
It costs 0.110s
```
-Then use the script to remove the DataNode. Script command:
+2. Remove the DataNode using the script:
-```Bash
-# Linux / MacOS
-sbin/remove-datanode.sh [datanode_id]
+**Linux / MacOS:**
-#Windows
-sbin/remove-datanode.bat [datanode_id]
+```Bash
+sbin/remove-datanode.sh [dn_rpc_address:dn_rpc_port]
```
-## Common Questions
-
-1. Multiple prompts indicating activation failure during deployment process
+**Windows:**
- - Use the `ls -al` command: Use the `ls -al` command to check if the owner information of the installation package root directory is the current user.
-
- - Check activation directory: Check all files in the `./activation` directory and whether the owner information is the current user.
-
-2. Confignode failed to start
-
- Step 1: Please check the startup log to see if any parameters that cannot be changed after the first startup have been modified.
-
- Step 2: Please check the startup log for any other abnormalities. If there are any abnormal phenomena in the log, please contact Timecho Technical Support personnel for consultation on solutions.
-
- Step 3: If it is the first deployment or data can be deleted, you can also clean up the environment according to the following steps, redeploy, and restart.
+```Bash
+sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port]
+```
- Step 4: Clean up the environment:
+## 5 Common Issues
- a. Terminate all ConfigNode Node and DataNode processes.
+1. Activation Fails Repeatedly
+ - Use the `ls -al` command to verify that the ownership of the installation directory matches the current user.
+ - Check the ownership of all files in the `./activation` directory to ensure they belong to the current user.
- ```Bash
- # 1. Stop the ConfigNode and DataNode services
- sbin/stop-standalone.sh
-
- # 2. Check for any remaining processes
- jps
- # Or
- ps -ef|gerp iotdb
+2. ConfigNode Fails to Start
+ 1. Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed.
+ 2. Check the logs for any other errors. If unresolved, contact technical support for assistance.
+ 3. If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps:
- # 3. If there are any remaining processes, manually kill the
- kill -9
- # If you are sure there is only one iotdb on the machine, you can use the following command to clean up residual processes
- ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9
- ```
-
- b. Delete the data and logs directories.
-
- Explanation: Deleting the data directory is necessary, deleting the logs directory is for clean logs and is not mandatory.
-
- ```Bash
- cd /data/iotdb
- rm -rf data logs
- ```
-
-## Appendix
-
-### Introduction to Configuration Node Parameters
-
-| Parameter | Description | Is it required |
-| :-------- | :---------------------------------------------- | :------------- |
-| -d | Start in daemon mode, running in the background | No |
-
-### Introduction to Datanode Node Parameters
-
-| Abbreviation | Description | Is it required |
-| :----------- | :----------------------------------------------------------- | :------------- |
-| -v | Show version information | No |
-| -f | Run the script in the foreground, do not put it in the background | No |
-| -d | Start in daemon mode, i.e. run in the background | No |
-| -p | Specify a file to store the process ID for process management | No |
-| -c | Specify the path to the configuration file folder, the script will load the configuration file from here | No |
-| -g | Print detailed garbage collection (GC) information | No |
-| -H | Specify the path of the Java heap dump file, used when JVM memory overflows | No |
-| -E | Specify the path of the JVM error log file | No |
-| -D | Define system properties, in the format key=value | No |
-| -X | Pass -XX parameters directly to the JVM | No |
-| -h | Help instruction | No |
\ No newline at end of file
+ **Clean the Environment**
+
+ 1. Stop all ConfigNode and DataNode processes:
+ ```Bash
+ sbin/stop-standalone.sh
+ ```
+
+ 2. Check for any remaining processes:
+ ```Bash
+ jps
+ # or
+ ps -ef | grep iotdb
+ ```
+
+ 3. If processes remain, terminate them manually:
+ ```Bash
+ kill -9
+
+ #For systems with a single IoTDB instance, you can clean up residual processes with:
+ ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9
+ ```
+
+ 4. Delete the `data` and `logs` directories:
+ ```Bash
+ cd /data/iotdb
+ rm -rf data logs
+ ```
+
+## 6 Appendix
+
+### 6.1 ConfigNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :---------------------------------------------------------- | :------- |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+
+### 6.2 DataNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :----------------------------------------------------------- | :------- |
+| -v | Displays version information. | No |
+| -f | Runs the script in the foreground without backgrounding it. | No |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+| -p | Specifies a file to store the process ID for process management. | No |
+| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No |
+| -g | Prints detailed garbage collection (GC) information. | No |
+| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No |
+| -E | Specifies the file for JVM error logs. | No |
+| -D | Defines system properties in the format `key=value`. | No |
+| -X | Passes `-XX` options directly to the JVM. | No |
+| -h | Displays the help instructions. | No |
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources.md
index 3314b6532..2ca497e6e 100644
--- a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources.md
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources.md
@@ -19,50 +19,50 @@
-->
# Database Resources
-## CPU
+## 1 CPU
- | Number of timeseries (frequency<=1HZ) |
+ Number of timeseries (frequency<=1HZ) |
CPU |
Number of nodes |
- | standalone mode |
- Double active |
+ standalone |
+ Dual-Active |
Distributed |
| Within 100000 |
- 2core-4core |
+ 2-4 cores |
1 |
2 |
3 |
| Within 300000 |
- 4core-8core |
+ 4-8 cores |
1 |
2 |
3 |
| Within 500000 |
- 8core-26core |
+ 8-16 cores |
1 |
2 |
3 |
| Within 1000000 |
- 16core-32core |
+ 16-32 cores |
1 |
2 |
3 |
| Within 2000000 |
- 32core-48core |
+ 32-48 cores |
1 |
2 |
3 |
@@ -81,50 +81,50 @@
-## Memory
+## 2 Memory
- | Number of timeseries (frequency<=1HZ) |
+ Number of timeseries (frequency<=1HZ) |
Memory |
Number of nodes |
- | standalone mode |
- Double active |
+ standalone |
+ Dual-Active |
Distributed |
| Within 100000 |
- 4G-8G |
+ 4-8G |
1 |
2 |
3 |
| Within 300000 |
- 12G-32G |
+ 12-32G |
1 |
2 |
3 |
| Within 500000 |
- 24G-48G |
+ 24-48G |
1 |
2 |
3 |
| Within 1000000 |
- 32G-96G |
+ 32-96G |
1 |
2 |
3 |
| Within 2000000 |
- 64G-128G |
+ 64-128G |
1 |
2 |
3 |
@@ -143,19 +143,23 @@
-## Storage (Disk)
-### Storage space
-Calculation formula: Number of measurement points * Sampling frequency (Hz) * Size of each data point (Byte, different data types may vary, see table below) * Storage time (seconds) * Number of copies (usually 1 copy for a single node and 2 copies for a cluster) ÷ Compression ratio (can be estimated at 5-10 times, but may be higher in actual situations)
+## 3 Storage (Disk)
+### 3.1 Storage space
+Calculation Formula:
+
+```Plain
+Storage Space = Number of Measurement Points * Sampling Frequency (Hz) * Size of Each Data Point (Bytes, see the table below) * Storage Duration * Replication Factor / Compression Ratio
+```
+
+Data Point Size Calculation Table:
+
- | Data point size calculation |
-
-
- | data type |
+ Data Type |
Timestamp (Bytes) |
Value (Bytes) |
- Total size of data points (in bytes)
+ | Total Data Point Size (Bytes)
|
@@ -165,36 +169,48 @@ Calculation formula: Number of measurement points * Sampling frequency (Hz) * Si
| 9 |
- | INT32/FLOAT |
+ INT32 / FLOAT (Single Precision) |
8 |
4 |
12 |
- | INT64/DOUBLE |
+ INT64 / DOUBLE (Double Precision) |
8 |
8 |
16 |
- | TEXT |
+ TEXT (String) |
8 |
- The average is a |
+ Average = a |
8+a |
+Example:
+
+- Scenario: 1,000 devices, 100 measurement points per device, i.e. 100,000 sequences in total. Data type is INT32. Sampling frequency is 1Hz (once per second). Storage duration is 1 year. Replication factor is 3.
+- Full Calculation:
+ ```Plain
+ 1,000 devices * 100 measurement points * 12 bytes per data point * 86,400 seconds per day * 365 days per year * 3 replicas / 10 compression ratio = 11 TB
+ ```
+- Simplified Calculation:
+ ```Plain
+ 1,000 * 100 * 12 * 86,400 * 365 * 3 / 10 = 11 TB
+ ```
+### 3.2 Storage Configuration
+
+- For systems with > 10 million measurement points or high query loads, SSD is recommended.
+
+## 4 Network (NIC)
+When the write throughput does not exceed 10 million points per second, a gigabit network card is required. When the write throughput exceeds 10 million points per second, a 10-gigabit network card is required.
+
+| **Write** **Throughput** **(Data Points/Second)** | **NIC** **Speed** |
+| ------------------------------------------------- | -------------------- |
+| < 10 million | 1 Gbps (Gigabit) |
+| ≥ 10 million | 10 Gbps (10 Gigabit) |
+
+## 5 Additional Notes
-Example: 1000 devices, each with 100 measurement points, a total of 100000 sequences, INT32 type. Sampling frequency 1Hz (once per second), storage for 1 year, 3 copies.
-- Complete calculation formula: 1000 devices * 100 measurement points * 12 bytes per data point * 86400 seconds per day * 365 days per year * 3 copies / 10 compression ratio / 1024 / 1024 / 1024 / 1024 =11T
-- Simplified calculation formula: 1000 * 100 * 12 * 86400 * 365 * 3 / 10 / 1024 / 1024 / 1024 / 1024 =11T
-### Storage Configuration
-If the number of nodes is over 10000000 or the query load is high, it is recommended to configure SSD
-## Network (Network card)
-If the write throughput does not exceed 10 million points/second, configure 1Gbps network card. When the write throughput exceeds 10 million points per second, a 10Gbps network card needs to be configured.
-| **Write throughput (data points per second)** | **NIC rate** |
-| ------------------- | ------------- |
-| <10 million | 1Gbps |
-| >=10 million | 10Gbps |
-## Other instructions
-IoTDB has the ability to scale up clusters in seconds, and expanding node data does not require migration. Therefore, you do not need to worry about the limited cluster capacity estimated based on existing data. In the future, you can add new nodes to the cluster when you need to scale up.
\ No newline at end of file
+- IoTDB supports second-level cluster scaling . Data migration is not required when adding new nodes, so there is no need to worry about limited cluster capacity based on current data estimates. You can add new nodes to the cluster when scaling is needed in the future.
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Deployment-form_apache.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Deployment-form_apache.md
new file mode 100644
index 000000000..29ee83861
--- /dev/null
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Deployment-form_apache.md
@@ -0,0 +1,52 @@
+
+# Deployment-Mode_apache
+
+IoTDB has two operation modes: standalone mode and cluster mode.
+
+## 1 Standalone Mode
+
+An IoTDB standalone instance includes 1 ConfigNode and 1 DataNode, referred to as 1C1D.
+
+- **Features**: Easy for developers to install and deploy, with lower deployment and maintenance costs, and convenient operation.
+- **Applicable scenarios**: Situations with limited resources or where high availability is not a critical requirement, such as edge servers.
+- **Deployment method**:[Stand-Alone Deployment](../Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md)
+
+
+## 2 Cluster Mode
+
+An IoTDB cluster instance consists of 3 ConfigNodes and no fewer than 3 DataNodes, typically 3 DataNodes, referred to as 3C3D. In the event of partial node failures, the remaining nodes can still provide services, ensuring high availability of the database service, and the database performance can be improved with the addition of nodes.
+
+- **Features**: High availability and scalability, with the ability to enhance system performance by adding DataNodes.
+- **Applicable scenarios**: Enterprise-level application scenarios that require high availability and reliability.
+- **Deployment method**: [Cluster Deployment](../Deployment-and-Maintenance/Cluster-Deployment_apache.md)
+
+## 3 Summary of Features
+
+| **Dimension** | **Stand-Alone Mode** | **Cluster Mode** |
+| :-------------------------- | :----------------------------------------------------- | :----------------------------------------------------------- |
+| Applicable Scenario | Edge deployment, low requirement for high availability | High-availability business, disaster recovery scenarios, etc. |
+| Number of Machines Required | 1 | ≥3 |
+| Security and Reliability | Cannot tolerate single-point failure | High, can tolerate single-point failure |
+| Scalability | Scalable by adding DataNodes to improve performance | Scalable by adding DataNodes to improve performance |
+| Performance | Scalable with the number of DataNodes | Scalable with the number of DataNodes |
+
+- The deployment steps for standalone mode and cluster mode are similar (adding ConfigNodes and DataNodes one by one), with the only differences being the number of replicas and the minimum number of nodes required to provide services.
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Deployment-form_timecho.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Deployment-form_timecho.md
new file mode 100644
index 000000000..90230e5c5
--- /dev/null
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Deployment-form_timecho.md
@@ -0,0 +1,63 @@
+
+# Deployment-Mode_apache
+
+IoTDB has two operation modes: standalone mode and cluster mode.
+
+## 1 Standalone Mode
+
+An IoTDB standalone instance includes 1 ConfigNode and 1 DataNode, i.e., 1C1D.
+
+- **Features**: Easy for developers to install and deploy, with low deployment and maintenance costs and convenient operations.
+- **Use Cases**: Scenarios with limited resources or low high-availability requirements, such as edge servers.
+- **Deployment Method**: [Stand-Alone Deployment](../Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md)
+
+## 2 Dual-Active Mode
+
+Dual-Active Deployment is a feature of TimechoDB, where two independent instances synchronize bidirectionally and can provide services simultaneously. If one instance stops and restarts, the other instance will resume data transfer from the breakpoint.
+
+> An IoTDB Dual-Active instance typically consists of 2 standalone nodes, i.e., 2 sets of 1C1D. Each instance can also be a cluster.
+
+- **Features**: The high-availability solution with the lowest resource consumption.
+- **Use Cases**: Scenarios with limited resources (only two servers) but requiring high availability.
+- **Deployment Method**: [Dual-Active Deployment](../Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md)
+
+## 3 Cluster Mode
+
+An IoTDB cluster instance consists of 3 ConfigNodes and no fewer than 3 DataNodes, typically 3 DataNodes, i.e., 3C3D. If some nodes fail, the remaining nodes can still provide services, ensuring high availability of the database. Performance can be improved by adding DataNodes.
+
+- **Features**: High availability, high scalability, and improved system performance by adding DataNodes.
+- **Use Cases**: Enterprise-level application scenarios requiring high availability and reliability.
+- **Deployment Method**: [Cluster Deployment](../Deployment-and-Maintenance/Cluster-Deployment_timecho.md)
+
+
+
+## 4 Feature Summary
+
+| **Dimension** | **Stand-Alone Mode** | **Dual-Active Mode** | **Cluster Mode** |
+| :-------------------------- | :------------------------------------------------------- | :------------------------------------------------------ | :------------------------------------------------------ |
+| Use Cases | Edge-side deployment, low high-availability requirements | High-availability services, disaster recovery scenarios | High-availability services, disaster recovery scenarios |
+| Number of Machines Required | 1 | 2 | ≥3 |
+| Security and Reliability | Cannot tolerate single-point failure | High, can tolerate single-point failure | High, can tolerate single-point failure |
+| Scalability | Can expand DataNodes to improve performance | Each instance can be scaled as needed | Can expand DataNodes to improve performance |
+| Performance | Can scale with the number of DataNodes | Same as one of the instances | Can scale with the number of DataNodes |
+
+- The deployment steps for Stand-Alone Mode and Cluster Mode are similar (adding ConfigNodes and DataNodes one by one), with differences only in the number of replicas and the minimum number of nodes required to provide services.
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Docker-Deployment_apache.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Docker-Deployment_apache.md
index 048c3e0d8..176db37f8 100644
--- a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Docker-Deployment_apache.md
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Docker-Deployment_apache.md
@@ -20,43 +20,44 @@
-->
# Docker Deployment
-## Environmental Preparation
+## 1 Environment Preparation
-### Docker Installation
+### 1.1 Install Docker
-```SQL
-#Taking Ubuntu as an example, other operating systems can search for installation methods themselves
-#step1: Install some necessary system tools
+```Bash
+#Taking Ubuntu as an example. For other operating systems, you can search for installation methods on your own.
+#step1: Install necessary system tools
sudo apt-get update
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
#step2: Install GPG certificate
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
-#step3: Write software source information
+#step3: Add the software source
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
#step4: Update and install Docker CE
sudo apt-get -y update
sudo apt-get -y install docker-ce
-#step5: Set Docker to start automatically upon startup
+#step5: Set Docker to start automatically on boot
sudo systemctl enable docker
-#step6: Verify if Docker installation is successful
-docker --version #Display version information, indicating successful installation
+#step6: Verify if Docker is installed successfully
+docker --version #Display version information, indicating successful installation.
```
-### Docker-compose Installation
+### 1.2 Install Docker Compose
-```SQL
+```Bash
#Installation command
curl -L "https://github.com/docker/compose/releases/download/v2.20.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
-#Verify if the installation was successful
-docker-compose --version #Displaying version information indicates successful installation
+#Verify the installation
+docker-compose --version #Display version information, indicating successful installation.
```
-## Stand-Alone Deployment
+## 2 Stand-Alone Deployment
This section demonstrates how to deploy a standalone Docker version of 1C1D.
+
### Pull Image File
The Docker image of Apache IoTDB has been uploaded tohttps://hub.docker.com/r/apache/iotdb。
@@ -75,31 +76,29 @@ docker images

-### Create Docker Bridge Network
+### 2.2 Create a Docker Bridge Network
```Bash
docker network create --driver=bridge --subnet=172.18.0.0/16 --gateway=172.18.0.1 iotdb
```
-### Write The Yml File For Docker-Compose
+### 2.3 Write the Docker-Compose YML File
-Here we take the example of consolidating the IoTDB installation directory and yml files in the/docker iotdb folder:
+Assume the IoTDB installation directory and the YML file are placed under the `/docker-iotdb` folder. The directory structure is as follows:`docker-iotdb/iotdb`, `/docker-iotdb/docker-compose-standalone.yml`
-The file directory structure is:`/docker-iotdb/iotdb`, `/docker-iotdb/docker-compose-standalone.yml `
-
-```bash
+```Bash
docker-iotdb:
├── iotdb #Iotdb installation directory
│── docker-compose-standalone.yml #YML file for standalone Docker Composer
```
-The complete docker-compose-standalone.yml content is as follows:
+The complete content of `docker-compose-standalone.yml` is as follows:
-```bash
+```Bash
version: "3"
services:
iotdb-service:
- image: apache/iotdb:1.3.2-standalone #The image used
+ image: iotdb-enterprise:1.3.2.3-standalone #The image used
hostname: iotdb
container_name: iotdb
restart: always
@@ -120,8 +119,11 @@ services:
- dn_seed_config_node=iotdb:10710
privileged: true
volumes:
+ - ./iotdb/activation:/iotdb/activation
- ./iotdb/data:/iotdb/data
- ./iotdb/logs:/iotdb/logs
+ - /usr/sbin/dmidecode:/usr/sbin/dmidecode:ro
+ - /dev/mem:/dev/mem:ro
networks:
iotdb:
ipv4_address: 172.18.0.6
@@ -130,16 +132,17 @@ networks:
external: true
```
-### Start IoTDB
+### 2.4 First Startup
Use the following command to start:
-```bash
+```Bash
cd /docker-iotdb
-docker-compose -f docker-compose-standalone.yml up -d #Background startup
+docker-compose -f docker-compose-standalone.yml up
```
-### Validate Deployment
+
+### 2.5 Verify the Deployment
- Viewing the log, the following words indicate successful startup
@@ -172,17 +175,18 @@ You can see that all services are running and the activation status shows as act

-### Map/conf Directory (optional)
+### 2.6 Map the `/conf` Directory (Optional)
-If you want to directly modify the configuration file in the physical machine in the future, you can map the/conf folder in the container in three steps:
-Step 1: Copy the /conf directory from the container to `/docker-iotdb/iotdb/conf`
+If you want to modify configuration files directly on the physical machine, you can map the `/conf` folder from the container. Follow these steps:
-```bash
+**Step 1**: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf`:
+
+```Bash
docker cp iotdb:/iotdb/conf /docker-iotdb/iotdb/conf
```
-Step 2: Add mappings in docker-compose-standalone.yml
+**Step 2**: Add the mapping in `docker-compose-standalone.yml`:
```bash
volumes:
@@ -191,35 +195,35 @@ Step 2: Add mappings in docker-compose-standalone.yml
- ./iotdb/logs:/iotdb/logs
```
-Step 3: Restart IoTDB
+**Step 3**: Restart IoTDB:
-```bash
+```Bash
docker-compose -f docker-compose-standalone.yml up -d
```
-## Cluster Deployment
+## 3 Cluster Deployment
-This section describes how to manually deploy an instance that includes 3 Config Nodes and 3 Data Nodes, commonly known as a 3C3D cluster.
+This section describes how to manually deploy a cluster consisting of 3 ConfigNodes and 3 DataNodes, commonly referred to as a 3C3D cluster.
-**Note: The cluster version currently only supports host and overlay networks, and does not support bridge networks.**
+**Note: The cluster version currently only supports host and overlay networks, and does not support bridge networks.**
-Taking the host network as an example, we will demonstrate how to deploy a 3C3D cluster.
+Below, we demonstrate how to deploy a 3C3D cluster using the host network as an example.
-### Set Host Name
+### 3.1 Set Hostnames
-Assuming there are currently three Linux servers, the IP addresses and service role assignments are as follows:
+Assume there are 3 Linux servers with the following IP addresses and service roles:
-| Node IP | Host Name | Service |
-| ----------- | --------- | -------------------- |
-| 192.168.1.3 | iotdb-1 | ConfigNode、DataNode |
-| 192.168.1.4 | iotdb-2 | ConfigNode、DataNode |
-| 192.168.1.5 | iotdb-3 | ConfigNode、DataNode |
+| Node IP | Hostname | Services |
+| :---------- | :------- | :------------------- |
+| 192.168.1.3 | iotdb-1 | ConfigNode, DataNode |
+| 192.168.1.4 | iotdb-2 | ConfigNode, DataNode |
+| 192.168.1.5 | iotdb-3 | ConfigNode, DataNode |
-Configure the host names on three machines separately. To set the host names, configure `/etc/hosts` on the target server using the following command:
+On each of the 3 machines, configure the hostnames by editing the `/etc/hosts` file. Use the following commands:
```Bash
echo "192.168.1.3 iotdb-1" >> /etc/hosts
@@ -227,7 +231,7 @@ echo "192.168.1.4 iotdb-2" >> /etc/hosts
echo "192.168.1.5 iotdb-3" >> /etc/hosts
```
-### Pull Image File
+### 3.2 Load the Image File
The Docker image of Apache IoTDB has been uploaded tohttps://hub.docker.com/r/apache/iotdb。
@@ -245,24 +249,22 @@ docker images

-### Write The Yml File For Docker Compose
+### 3.3. Write the Docker-Compose YML Files
-Here we take the example of consolidating the IoTDB installation directory and yml files in the `/docker-iotdb` folder:
+Here, we assume the IoTDB installation directory and YML files are placed under the `/docker-iotdb` folder. The directory structure is as follows:
-The file directory structure is :`/docker-iotdb/iotdb`, `/docker-iotdb/confignode.yml`,`/docker-iotdb/datanode.yml`
-
-```SQL
+```Bash
docker-iotdb:
-├── confignode.yml #Yml file of confignode
-├── datanode.yml #Yml file of datanode
-└── iotdb #IoTDB installation directory
+├── confignode.yml #ConfigNode YML file
+├── datanode.yml #DataNode YML file
+└── iotdb #IoTDB installation directory
```
-On each server, two yml files need to be written, namely confignnode. yml and datanode. yml. The example of yml is as follows:
+On each server, create two YML files: `confignode.yml` and `datanode.yml`. Examples are provided below:
**confignode.yml:**
-```bash
+```Bash
#confignode.yml
version: "3"
services:
@@ -291,7 +293,7 @@ services:
**datanode.yml:**
-```bash
+```Bash
#datanode.yml
version: "3"
services:
@@ -324,29 +326,31 @@ services:
network_mode: "host" #Using the host network
```
-### Starting Confignode For The First Time
+### 3.4 Start ConfigNode for the First Time
-First, start configNodes on each of the three servers to obtain the machine code. Pay attention to the startup order, start the first iotdb-1 first, then start iotdb-2 and iotdb-3.
+Start the ConfigNode on all 3 servers. **Note the startup order**: Start `iotdb-1` first, followed by `iotdb-2` and `iotdb-3`.
-```bash
+Run the following command on each server:
+
+```Bash
cd /docker-iotdb
docker-compose -f confignode.yml up -d #Background startup
```
-### Start Datanode
+### 3.5 Start DataNode
-Start datanodes on 3 servers separately
+Start the DataNode on all 3 servers:
-```SQL
+```Bash
cd /docker-iotdb
docker-compose -f datanode.yml up -d #Background startup
```

-### Validate Deployment
+### 3.6 Verify Deployment
-- Viewing the logs, the following words indicate that the datanode has successfully started
+- Check the logs: If you see the following message, the DataNode has started successfully.
```SQL
docker logs -f iotdb-datanode #View log command
@@ -355,7 +359,7 @@ docker-compose -f datanode.yml up -d #Background startup

-- Enter any container to view the service running status and activation information
+- Enter the container and check the service status:
View the launched container
@@ -365,7 +369,7 @@ docker-compose -f datanode.yml up -d #Background startup

- Enter the container, log in to the database through CLI, and use the `show cluster` command to view the service status and activation status
+ Enter any container, log in to the database via CLI, and use the `show cluster` command to check the service status:
```SQL
docker exec -it iotdb-datanode /bin/bash #Entering the container
@@ -373,23 +377,23 @@ docker-compose -f datanode.yml up -d #Background startup
IoTDB> show cluster #View status
```
- You can see that all services are running and the activation status shows as activated.
+ If all services are in the `running` state, the IoTDB deployment is successful.

-### Map/conf Directory (optional)
+### 3.7 Map the `/conf` Directory (Optional)
-If you want to directly modify the configuration file in the physical machine in the future, you can map the/conf folder in the container in three steps:
+If you want to modify configuration files directly on the physical machine, you can map the `/conf` folder from the container. Follow these steps:
-Step 1: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf` on each of the three servers
+**Step 1**: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf` on all 3 servers:
-```bash
+```Bash
docker cp iotdb-confignode:/iotdb/conf /docker-iotdb/iotdb/conf
or
docker cp iotdb-datanode:/iotdb/conf /docker-iotdb/iotdb/conf
```
-Step 2: Add `/conf` directory mapping in `confignode.yml` and `datanode. yml` on 3 servers
+**Step 2**: Add the `/conf` directory mapping in both `confignode.yml` and `datanode.yml` on all 3 servers:
```bash
#confignode.yml
@@ -407,7 +411,7 @@ Step 2: Add `/conf` directory mapping in `confignode.yml` and `datanode. yml` on
- /dev/mem:/dev/mem:ro
```
-Step 3: Restart IoTDB on 3 servers
+**Step 3**: Restart IoTDB on all 3 servers:
```bash
cd /docker-iotdb
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md
index 4aec6d8ee..608bc363c 100644
--- a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md
@@ -20,68 +20,66 @@
-->
# Docker Deployment
-## Environmental Preparation
+## 1 Environment Preparation
-### Docker Installation
+### 1.1 Install Docker
```Bash
-#Taking Ubuntu as an example, other operating systems can search for installation methods themselves
-#step1: Install some necessary system tools
+#Taking Ubuntu as an example. For other operating systems, you can search for installation methods on your own.
+#step1: Install necessary system tools
sudo apt-get update
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
#step2: Install GPG certificate
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
-#step3: Write software source information
+#step3: Add the software source
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
#step4: Update and install Docker CE
sudo apt-get -y update
sudo apt-get -y install docker-ce
-#step5: Set Docker to start automatically upon startup
+#step5: Set Docker to start automatically on boot
sudo systemctl enable docker
-#step6: Verify if Docker installation is successful
-docker --version #Display version information, indicating successful installation
+#step6: Verify if Docker is installed successfully
+docker --version #Display version information, indicating successful installation.
```
-### Docker-compose Installation
+### 1.2 Install Docker Compose
```Bash
#Installation command
curl -L "https://github.com/docker/compose/releases/download/v2.20.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
-#Verify if the installation was successful
-docker-compose --version #Displaying version information indicates successful installation
+#Verify the installation
+docker-compose --version #Display version information, indicating successful installation.
```
-### Install The Dmidecode Plugin
+### 1.3 Install dmidecode
-By default, Linux servers should already be installed. If not, you can use the following command to install them.
+By default, Linux servers should already have dmidecode. If not, you can use the following command to install it.
```Bash
sudo apt-get install dmidecode
```
-After installing dmidecode, search for the installation path: `wherever dmidecode`. Assuming the result is `/usr/sbin/dmidecode`, remember this path as it will be used in the later docker compose yml file.
+After installing `dmidecode`, you can locate its installation path by running:`whereis dmidecode`. Assuming the result is `/usr/sbin/dmidecode`, please remember this path as it will be used in the YML file of Docker Compose later.
-### Get Container Image Of IoTDB
+### 1.4 Obtain the Container Image
-You can contact business or technical support to obtain container images for IoTDB Enterprise Edition.
+For the TimechoDB container image, you can contact the Timecho team to acquire it.
-## Stand-Alone Deployment
+## 2 Stand-Alone Deployment
This section demonstrates how to deploy a standalone Docker version of 1C1D.
-### Load Image File
+### 2.1 Load the Image File
-For example, the container image file name of IoTDB obtained here is: `iotdb-enterprise-1.3.2-3-standalone-docker.tar.gz`
-
-Load image:
+For example, if the IoTDB container image file you obtained is named: `iotdb-enterprise-1.3.2.3-standalone-docker.tar.gz`, use the following command to load the image:
```Bash
docker load -i iotdb-enterprise-1.3.2.3-standalone-docker.tar.gz
```
-View image:
+To view the loaded image, use the following command:
```Bash
docker images
@@ -89,17 +87,15 @@ docker images

-### Create Docker Bridge Network
+### 2.2 Create a Docker Bridge Network
```Bash
docker network create --driver=bridge --subnet=172.18.0.0/16 --gateway=172.18.0.1 iotdb
```
-### Write The Yml File For docker-compose
-
-Here we take the example of consolidating the IoTDB installation directory and yml files in the/docker iotdb folder:
+### 2.3 Write the Docker-Compose YML File
-The file directory structure is:`/docker-iotdb/iotdb`, `/docker-iotdb/docker-compose-standalone.yml `
+Assume the IoTDB installation directory and the YML file are placed under the `/docker-iotdb` folder. The directory structure is as follows:`docker-iotdb/iotdb`, `/docker-iotdb/docker-compose-standalone.yml`
```Bash
docker-iotdb:
@@ -107,7 +103,7 @@ docker-iotdb:
│── docker-compose-standalone.yml #YML file for standalone Docker Composer
```
-The complete docker-compose-standalone.yml content is as follows:
+The complete content of `docker-compose-standalone.yml` is as follows:
```Bash
version: "3"
@@ -147,7 +143,7 @@ networks:
external: true
```
-### First Launch
+### 2.4 First Startup
Use the following command to start:
@@ -156,21 +152,21 @@ cd /docker-iotdb
docker-compose -f docker-compose-standalone.yml up
```
-Due to lack of activation, it is normal to exit directly upon initial startup. The initial startup is to obtain the machine code file for the subsequent activation process.
+Since the system is not activated yet, it will exit immediately after the first startup, which is normal. The purpose of the first startup is to generate the machine code file for the activation process.

-### Apply For Activation
+### 2.5 Apply for Activation
-- After the first startup, a system_info file will be generated in the physical machine directory `/docker-iotdb/iotdb/activation`, and this file will be copied to the Timecho staff.
+- After the first startup, a `system_info` file will be generated in the physical machine directory `/docker-iotdb/iotdb/activation`. Copy this file and send it to the Timecho team.

-- Received the license file returned by the staff, copy the license file to the `/docker iotdb/iotdb/activation` folder.
+- Once you receive the `license` file, copy it to the `/docker-iotdb/iotdb/activation` folder.

-### Restart IoTDB
+### 2.6 Start IoTDB Again
```Bash
docker-compose -f docker-compose-standalone.yml up -d
@@ -178,9 +174,9 @@ docker-compose -f docker-compose-standalone.yml up -d

-### Validate Deployment
+### 2.7 Verify the Deployment
-- Viewing the log, the following words indicate successful startup
+- Check the logs: If you see the following message, the startup is successful.
```Bash
docker logs -f iotdb-datanode #View log command
@@ -189,7 +185,7 @@ docker-compose -f docker-compose-standalone.yml up -d

-- Enter the container to view the service running status and activation information
+- Enter the container and check the service status:
View the launched container
@@ -199,69 +195,67 @@ docker-compose -f docker-compose-standalone.yml up -d

- Enter the container, log in to the database through CLI, and use the `show cluster` command to view the service status and activation status
+ Enter the container, log in to the database through CLI, and use the show cluster command to view the service status and activation status
```Bash
- docker exec -it iotdb /bin/bash #Entering the container
+ docker exec -it iotdb /bin/bash #Enter the container
./start-cli.sh -h iotdb #Log in to the database
- IoTDB> show cluster #View status
+ IoTDB> show cluster #Check the service status
```
- You can see that all services are running and the activation status shows as activated.
+ If all services are in the `running` state, the IoTDB deployment is successful.

-### Map/conf Directory (optional)
+### 2.8 Map the `/conf` Directory (Optional)
-If you want to directly modify the configuration file in the physical machine in the future, you can map the/conf folder in the container in three steps:
+If you want to modify configuration files directly on the physical machine, you can map the `/conf` folder from the container. Follow these steps:
-Step 1: Copy the/conf directory from the container to/docker-iotdb/iotdb/conf
+**Step 1**: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf`:
```Bash
docker cp iotdb:/iotdb/conf /docker-iotdb/iotdb/conf
```
-Step 2: Add mappings in docker-compose-standalone.yml
+**Step 2**: Add the mapping in `docker-compose-standalone.yml`:
```Bash
volumes:
- - ./iotdb/conf:/iotdb/conf #Add mapping for this/conf folder
- - ./iotdb/activation:/iotdb/activation
+ - ./iotdb/conf:/iotdb/conf # Add this mapping for the /conf folder
- ./iotdb/data:/iotdb/data
- ./iotdb/logs:/iotdb/logs
- - /usr/sbin/dmidecode:/usr/sbin/dmidecode:ro
- /dev/mem:/dev/mem:ro
```
-Step 3: Restart IoTDB
+**Step 3**: Restart IoTDB:
```Bash
docker-compose -f docker-compose-standalone.yml up -d
```
-## Cluster Deployment
+## 3 Cluster Deployment
-This section describes how to manually deploy an instance that includes 3 Config Nodes and 3 Data Nodes, commonly known as a 3C3D cluster.
+This section describes how to manually deploy a cluster consisting of 3 ConfigNodes and 3 DataNodes, commonly referred to as a 3C3D cluster.
-**Note: The cluster version currently only supports host and overlay networks, and does not support bridge networks.**
+**Note: The cluster version currently only supports host and overlay networks, and does not support bridge networks.**
-Taking the host network as an example, we will demonstrate how to deploy a 3C3D cluster.
+Below, we demonstrate how to deploy a 3C3D cluster using the host network as an example.
-### Set Host Name
+### 3.1 Set Hostnames
-Assuming there are currently three Linux servers, the IP addresses and service role assignments are as follows:
+Assume there are 3 Linux servers with the following IP addresses and service roles:
-| Node IP | Host Name | Service |
-| ----------- | --------- | -------------------- |
-| 192.168.1.3 | iotdb-1 | ConfigNode、DataNode |
-| 192.168.1.4 | iotdb-2 | ConfigNode、DataNode |
-| 192.168.1.5 | iotdb-3 | ConfigNode、DataNode |
+| Node IP | Hostname | Services |
+| :---------- | :------- | :------------------- |
+| 192.168.1.3 | iotdb-1 | ConfigNode, DataNode |
+| 192.168.1.4 | iotdb-2 | ConfigNode, DataNode |
+| 192.168.1.5 | iotdb-3 | ConfigNode, DataNode |
-Configure the host names on three machines separately. To set the host names, configure `/etc/hosts` on the target server using the following command:
+On each of the 3 machines, configure the hostnames by editing the `/etc/hosts` file. Use the following commands:
```Bash
echo "192.168.1.3 iotdb-1" >> /etc/hosts
@@ -269,17 +263,15 @@ echo "192.168.1.4 iotdb-2" >> /etc/hosts
echo "192.168.1.5 iotdb-3" >> /etc/hosts
```
-### Load Image File
+### 3.2 Load the Image File
-For example, the container image file name obtained for IoTDB is: `iotdb-enterprise-1.3.23-standalone-docker.tar.gz`
-
-Execute the load image command on three servers separately:
+For example, if the IoTDB container image file is named `iotdb-enterprise-1.3.2.3-standalone-docker.tar.gz`, execute the following command on all 3 servers to load the image:
```Bash
docker load -i iotdb-enterprise-1.3.2.3-standalone-docker.tar.gz
```
-View image:
+To view the loaded images, run:
```Bash
docker images
@@ -287,20 +279,18 @@ docker images

-### Write The Yml File For Docker Compose
-
-Here we take the example of consolidating the IoTDB installation directory and yml files in the /docker-iotdb folder:
+### 3.3. Write the Docker-Compose YML Files
-The file directory structure is:/docker-iotdb/iotdb, /docker-iotdb/confignode.yml,/docker-iotdb/datanode.yml
+Here, we assume the IoTDB installation directory and YML files are placed under the `/docker-iotdb` folder. The directory structure is as follows:
```Bash
docker-iotdb:
-├── confignode.yml #Yml file of confignode
-├── datanode.yml #Yml file of datanode
-└── iotdb #IoTDB installation directory
+├── confignode.yml #ConfigNode YML file
+├── datanode.yml #DataNode YML file
+└── iotdb #IoTDB installation directory
```
-On each server, two yml files need to be written, namely confignnode. yml and datanode. yml. The example of yml is as follows:
+On each server, create two YML files: `confignode.yml` and `datanode.yml`. Examples are provided below:
**confignode.yml:**
@@ -366,30 +356,32 @@ services:
network_mode: "host" #Using the host network
```
-### Starting Confignode For The First Time
+### 3.4 Start ConfigNode for the First Time
+
+Start the ConfigNode on all 3 servers. **Note the startup order**: Start `iotdb-1` first, followed by `iotdb-2` and `iotdb-3`.
-First, start configNodes on each of the three servers to obtain the machine code. Pay attention to the startup order, start the first iotdb-1 first, then start iotdb-2 and iotdb-3.
+Run the following command on each server:
```Bash
cd /docker-iotdb
docker-compose -f confignode.yml up -d #Background startup
```
-### Apply For Activation
+### 3.5 Apply for Activation
-- After starting three confignodes for the first time, a system_info file will be generated in each physical machine directory `/docker-iotdb/iotdb/activation`, and the system_info files of the three servers will be copied to the Timecho staff;
+- After starting the 3 ConfigNodes for the first time, a `system_info` file will be generated in the `/docker-iotdb/iotdb/activation` directory on each physical machine. Copy the `system_info` files from all 3 servers and send them to the Timecho team.

-- Put the three license files into the `/docker iotdb/iotdb/activation` folder of the corresponding Configurable Node node;
+- Place the 3 `license` files into the corresponding `/docker-iotdb/iotdb/activation` folders on each ConfigNode server.

-- After the license is placed in the corresponding activation folder, confignode will be automatically activated without restarting confignode
+- Once the `license` files are placed in the `activation` folders, the ConfigNodes will automatically activate. **No restart is required for the ConfigNodes.**
-### Start Datanode
+### 3.6 Start DataNode
-Start datanodes on 3 servers separately
+Start the DataNode on all 3 servers:
```Bash
cd /docker-iotdb
@@ -398,9 +390,9 @@ docker-compose -f datanode.yml up -d #Background startup

-### Validate Deployment
+### 3.7 Verify Deployment
-- Viewing the logs, the following words indicate that the datanode has successfully started
+- Check the logs: If you see the following message, the DataNode has started successfully.
```Bash
docker logs -f iotdb-datanode #View log command
@@ -409,7 +401,7 @@ docker-compose -f datanode.yml up -d #Background startup

-- Enter any container to view the service running status and activation information
+- Enter the container and check the service status:
View the launched container
@@ -419,23 +411,23 @@ docker-compose -f datanode.yml up -d #Background startup

- Enter the container, log in to the database through CLI, and use the `show cluster` command to view the service status and activation status
+ Enter any container, log in to the database via CLI, and use the `show cluster` command to check the service status:
- ```Bash
- docker exec -it iotdb-datanode /bin/bash #Entering the container
- ./start-cli.sh -h iotdb-1 #Log in to the database
- IoTDB> show cluster #View status
- ```
+```Bash
+docker exec -it iotdb-datanode /bin/bash #Entering the container
+./start-cli.sh -h iotdb-1 #Log in to the database
+IoTDB> show cluster #View status
+```
- You can see that all services are running and the activation status shows as activated.
+If all services are in the `running` state, the IoTDB deployment is successful.

-### Map/conf Directory (optional)
+### 3.8 Map the `/conf` Directory (Optional)
-If you want to directly modify the configuration file in the physical machine in the future, you can map the/conf folder in the container in three steps:
+If you want to modify configuration files directly on the physical machine, you can map the `/conf` folder from the container. Follow these steps:
-Step 1: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf` on each of the three servers
+**Step 1**: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf` on all 3 servers:
```Bash
docker cp iotdb-confignode:/iotdb/conf /docker-iotdb/iotdb/conf
@@ -443,7 +435,7 @@ or
docker cp iotdb-datanode:/iotdb/conf /docker-iotdb/iotdb/conf
```
-Step 2: Add `/conf` directory mapping in `confignode.yml` and `datanode. yml` on 3 servers
+**Step 2**: Add the `/conf` directory mapping in both `confignode.yml` and `datanode.yml` on all 3 servers:
```Bash
#confignode.yml
@@ -465,11 +457,10 @@ Step 2: Add `/conf` directory mapping in `confignode.yml` and `datanode. yml` on
- /dev/mem:/dev/mem:ro
```
-Step 3: Restart IoTDB on 3 servers
+**Step 3**: Restart IoTDB on all 3 servers:
```Bash
cd /docker-iotdb
docker-compose -f confignode.yml up -d
docker-compose -f datanode.yml up -d
-```
-
+```
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md
index 1936905cf..e6cb33081 100644
--- a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md
@@ -22,95 +22,100 @@
## What is a double active version?
-Dual active usually refers to two independent machines (or clusters) that perform real-time mirror synchronization. Their configurations are completely independent and can simultaneously receive external writes. Each independent machine (or cluster) can synchronize the data written to itself to another machine (or cluster), and the data of the two machines (or clusters) can achieve final consistency.
+Dual-active mode refers to two independent instances (either standalone or clusters) with completely independent configurations. These instances can simultaneously handle external read and write operations, with real-time bi-directional synchronization and breakpoint recovery capabilities.
-- Two standalone machines (or clusters) can form a high availability group: when one of the standalone machines (or clusters) stops serving, the other standalone machine (or cluster) will not be affected. When the single machine (or cluster) that stopped the service is restarted, another single machine (or cluster) will synchronize the newly written data. Business can be bound to two standalone machines (or clusters) for read and write operations, thereby achieving high availability.
-- The dual active deployment scheme allows for high availability with fewer than 3 physical nodes and has certain advantages in deployment costs. At the same time, the physical supply isolation of two sets of single machines (or clusters) can be achieved through the dual ring network of power and network, ensuring the stability of operation.
-- At present, the dual active capability is a feature of the enterprise version.
+Key features include:
+
+- **Mutual Backup of Instances**: If one instance stops service, the other remains unaffected. When the stopped instance resumes, the other instance will synchronize newly written data. Businesses can bind both instances for read and write operations, achieving high availability.
+- **Cost-Effective Deployment**: The dual-active deployment solution achieves high availability with only two physical nodes, offering cost advantages. Additionally, physical resource isolation for the two instances can be ensured by leveraging dual-ring power and network designs, enhancing operational stability.
+
+**Note:** The dual-active functionality is exclusively available in enterprise-grade TimechoDB.

-## Note
+## Prerequisites
-1. It is recommended to prioritize using `hostname` for IP configuration during deployment to avoid the problem of database failure caused by modifying the host IP in the later stage. To set the hostname, you need to configure `/etc/hosts` on the target server. If the local IP is 192.168.1.3 and the hostname is iotdb-1, you can use the following command to set the server's hostname and configure IoTDB's `cn_internal-address` and` dn_internal-address` using the hostname.
+1. **Hostname Configuration**: It is recommended to prioritize hostname over IP during deployment to avoid issues where the database cannot start due to later changes in the host IP. For instance, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, configure it in `/etc/hosts` using:
- ```Bash
- echo "192.168.1.3 iotdb-1" >> /etc/hosts
- ```
+```Bash
+echo "192.168.1.3 iotdb-1" >> /etc/hosts
+```
-2. Some parameters cannot be modified after the first startup, please refer to the "Installation Steps" section below to set them.
+Use the hostname to configure IoTDB’s `cn_internal_address` and `dn_internal_address`.
-3. Recommend deploying a monitoring panel, which can monitor important operational indicators and keep track of database operation status at any time. The monitoring panel can be obtained by contacting the business department. The steps for deploying the monitoring panel can be referred to [Monitoring Panel Deployment](../Deployment-and-Maintenance/Monitoring-panel-deployment.md)
+2. **Immutable Parameters**: Some parameters cannot be changed after the initial startup. Follow the steps in the "Installation Steps" section to configure them correctly.
-## Installation Steps
+3. **Monitoring Panel**: Deploying a monitoring panel is recommended to monitor key performance indicators and stay informed about the database’s operational status. Contact the Timecho team to obtain the monitoring panel and refer to the corresponding [Monitoring Panel Deployment](../Deployment-and-Maintenance/Monitoring-panel-deployment.md) for deployment steps.
-Taking the dual active version IoTDB built by two single machines A and B as an example, the IP addresses of A and B are 192.168.1.3 and 192.168.1.4, respectively. Here, we use hostname to represent different hosts. The plan is as follows:
+## 3 Installation Steps
-| Machine | Machine IP | Host Name |
-| ------- | ----------- | --------- |
-| A | 192.168.1.3 | iotdb-1 |
-| B | 192.168.1.4 | iotdb-2 |
+This guide uses two standalone nodes, A and B, to deploy the dual-active version of TimechoDB. The IP addresses and hostnames for the nodes are as follows:
-### Step1:Install Two Independent IoTDBs Separately
+| Machine | IP Address | Hostname |
+| ------- | ----------- | -------- |
+| A | 192.168.1.3 | iotdb-1 |
+| B | 192.168.1.4 | iotdb-2 |
-Install IoTDB on two machines separately, and refer to the deployment documentation for the standalone version [Stand-Alone Deployment](../Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md),The deployment document for the cluster version can be referred to [Cluster Deployment](../Deployment-and-Maintenance/Cluster-Deployment_timecho.md)。**It is recommended that the configurations of clusters A and B remain consistent to achieve the best dual active effect**
+### Step1:Install Two Independent TimechoDB Instances
-### Step2:Create A Aata Synchronization Task On Machine A To Machine B
+Install TimechoDB on both machines (A and B) independently. For detailed instructions, refer to the standalone [Stand-Alone Deployment](../Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md)or cluster [Cluster Deployment](../Deployment-and-Maintenance/Cluster-Deployment_timecho.md)deployment guides.
-- Create a data synchronization process on machine A, where the data on machine A is automatically synchronized to machine B. Use the cli tool in the sbin directory to connect to the IoTDB database on machine A:
+Ensure that configurations for A and B are consistent for optimal dual-active performance.
- ```Bash
- ./sbin/start-cli.sh -h iotdb-1
- ```
+### Step2:Configure Data Synchronization from Machine A to Machine B
-- Create and start the data synchronization command with the following SQL:
+- Connect to the database on Machine A using the CLI tool from the `sbin` directory:
- ```Bash
- create pipe AB
- with source (
- 'source.mode.double-living' ='true'
- )
- with sink (
- 'sink'='iotdb-thrift-sink',
- 'sink.ip'='iotdb-2',
- 'sink.port'='6667'
- )
- ```
+```Bash
+./sbin/start-cli.sh -h iotdb-1
+```
-- Note: To avoid infinite data loops, it is necessary to set the parameter `source.mode.double-living` on both A and B to `true`, indicating that data transmitted from another pipe will not be forwarded.
+- Then create and start a data synchronization process. Use the following SQL command:
-### Step3:Create A Data Synchronization Task On Machine B To Machine A
+```Bash
+create pipe AB
+with source (
+ 'source.mode.double-living' ='true'
+with sink (
+ 'sink'='iotdb-thrift-sink',
+ 'sink.ip'='iotdb-2',
+ 'sink.port'='6667'
+)
+```
+
+- **Note:** To avoid infinite data loops, ensure the parameter `source.mode.double-living` is set to `true` on both A and B. This prevents retransmission of data received through the other instance's pipe.
-- Create a data synchronization process on machine B, where the data on machine B is automatically synchronized to machine A. Use the cli tool in the sbin directory to connect to the IoTDB database on machine B
+### Step3:Configure Data Synchronization from Machine B to Machine A
- ```Bash
- ./sbin/start-cli.sh -h iotdb-2
- ```
+- Connect to the database on Machine B:
+
+```Bash
+./sbin/start-cli.sh -h iotdb-2
+```
- Create and start the pipe with the following SQL:
+- Then create and start the synchronization process with the following SQL command:
- ```Bash
- create pipe BA
- with source (
- 'source.mode.double-living' ='true'
- )
- with sink (
- 'sink'='iotdb-thrift-sink',
- 'sink.ip'='iotdb-1',
- 'sink.port'='6667'
- )
- ```
+```Bash
+create pipe BA
+with source (
+'source.mode.double-living' ='true'
+)
+with sink (
+ 'sink'='iotdb-thrift-sink',
+ 'sink.ip'='iotdb-1',
+ 'sink.port'='6667'
+)
+```
-- Note: To avoid infinite data loops, it is necessary to set the parameter `source.mode.double-living` on both A and B to `true` , indicating that data transmitted from another pipe will not be forwarded.
+- **Note:** To avoid infinite data loops, ensure the parameter `source.mode.double-living` is set to `true` on both A and B. This prevents retransmission of data received through the other instance's pipe.
-### Step4:Validate Deployment
+### Step4:Verify Deployment
-After the above data synchronization process is created, the dual active cluster can be started.
+#### Check Cluster Status
-#### Check the running status of the cluster
+Run the `show cluster` command on both nodes to verify the status of the TimechoDB services:
```Bash
-#Execute the show cluster command on two nodes respectively to check the status of IoTDB service
show cluster
```
@@ -122,43 +127,42 @@ show cluster

-Ensure that every Configurable Node and DataNode is in the Running state.
+Ensure all `ConfigNode` and `DataNode` processes are in the `Running` state.
-#### Check synchronization status
+#### Check Synchronization Status
-- Check the synchronization status on machine A
+Use the `show pipes` command on both nodes:
```Bash
show pipes
```
-
+Confirm that all pipes are in the `RUNNING` state:
-- Check the synchronization status on machine B
+On machine A:
-```Bash
-show pipes
-```
+
-
+On machine B:
-Ensure that every pipe is in the RUNNING state.
+
-### Step5:Stop Dual Active Version IoTDB
+### Step5:Stop the Dual-Active Instances
-- Execute the following command on machine A:
+To stop the dual-active instances:
- ```SQL
- ./sbin/start-cli.sh -h iotdb-1 #Log in to CLI
- IoTDB> stop pipe AB #Stop the data synchronization process
- ./sbin/stop-standalone.sh #Stop database service
- ```
+On machine A:
-- Execute the following command on machine B:
+```SQL
+./sbin/start-cli.sh -h iotdb-1 # Log in to CLI
+IoTDB> stop pipe AB # Stop data synchronization
+./sbin/stop-standalone.sh # Stop database service
+```
- ```SQL
- ./sbin/start-cli.sh -h iotdb-2 #Log in to CLI
- IoTDB> stop pipe BA #Stop the data synchronization process
- ./sbin/stop-standalone.sh #Stop database service
- ```
+On machine B:
+```SQL
+./sbin/start-cli.sh -h iotdb-2 # Log in to CLI
+IoTDB> stop pipe BA # Stop data synchronization
+./sbin/stop-standalone.sh # Stop database service
+```
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Environment-Requirements.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Environment-Requirements.md
index 3cd56b38d..a1a168b86 100644
--- a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Environment-Requirements.md
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Environment-Requirements.md
@@ -20,175 +20,182 @@
-->
# System Requirements
-## Disk Array
-
-### Configuration Suggestions
-
-IoTDB has no strict operation requirements on disk array configuration. It is recommended to use multiple disk arrays to store IoTDB data to achieve the goal of concurrent writing to multiple disk arrays. For configuration, refer to the following suggestions:
-
-1. Physical environment
- System disk: You are advised to use two disks as Raid1, considering only the space occupied by the operating system itself, and do not reserve system disk space for the IoTDB
- Data disk:
- Raid is recommended to protect data on disks
- It is recommended to provide multiple disks (1-6 disks) or disk groups for the IoTDB. (It is not recommended to create a disk array for all disks, as this will affect the maximum performance of the IoTDB.)
-2. Virtual environment
- You are advised to mount multiple hard disks (1-6 disks).
-3. When deploying IoTDB, it is recommended to avoid using network storage devices such as NAS.
-
-### Configuration Example
-
-- Example 1: Four 3.5-inch hard disks
-
-Only a few hard disks are installed on the server. Configure Raid5 directly.
-The recommended configurations are as follows:
-| **Use classification** | **Raid type** | **Disk number** | **Redundancy** | **Available capacity** |
-| ----------- | -------- | -------- | --------- | -------- |
-| system/data disk | RAID5 | 4 | 1 | 3 | is allowed to fail|
-
-- Example 2: Twelve 3.5-inch hard disks
-
-The server is configured with twelve 3.5-inch disks.
-Two disks are recommended as Raid1 system disks. The two data disks can be divided into two Raid5 groups. Each group of five disks can be used as four disks.
-The recommended configurations are as follows:
-| **Use classification** | **Raid type** | **Disk number** | **Redundancy** | **Available capacity** |
-| -------- | -------- | -------- | --------- | -------- |
-| system disk | RAID1 | 2 | 1 | 1 |
-| data disk | RAID5 | 5 | 1 | 4 |
-| data disk | RAID5 | 5 | 1 | 4 |
-- Example 3:24 2.5-inch disks
-
-The server is configured with 24 2.5-inch disks.
-Two disks are recommended as Raid1 system disks. The last two disks can be divided into three Raid5 groups. Each group of seven disks can be used as six disks. The remaining block can be idle or used to store pre-write logs.
-The recommended configurations are as follows:
-| **Use classification** | **Raid type** | **Disk number** | **Redundancy** | **Available capacity** |
-| -------- | -------- | -------- | --------- | -------- |
-| system disk | RAID1 | 2 | 1 | 1 |
-| data disk | RAID5 | 7 | 1 | 6 |
-| data disk | RAID5 | 7 | 1 | 6 |
-| data disk | RAID5 | 7 | 1 | 6 |
-| data disk | NoRaid | 1 | 0 | 1 |
-
-## Operating System
-
-### Version Requirements
-
-IoTDB supports operating systems such as Linux, Windows, and MacOS, while the enterprise version supports domestic CPUs such as Loongson, Phytium, and Kunpeng. It also supports domestic server operating systems such as Neokylin, KylinOS, UOS, and Linx.
-
-### Disk Partition
-
-- The default standard partition mode is recommended. LVM extension and hard disk encryption are not recommended.
-- The system disk needs only the space used by the operating system, and does not need to reserve space for the IoTDB.
-- Each disk group corresponds to only one partition. Data disks (with multiple disk groups, corresponding to raid) do not need additional partitions. All space is used by the IoTDB.
-The following table lists the recommended disk partitioning methods.
+## 1 Disk Array
+
+### 1.1 Configuration Suggestions
+
+IoTDB does not have strict operational requirements for disk array configurations. It is recommended to use multiple disk arrays to store IoTDB data to achieve concurrent writing across multiple disk arrays. The following configuration suggestions can be referenced:
+
+1. Physical Environment
+ - System Disk: It is recommended to use 2 disks for RAID1, considering only the space occupied by the operating system itself. No additional space needs to be reserved for IoTDB on the system disk.
+ - Data Disk:
+ - It is recommended to use RAID for data protection at the disk level.
+ - It is recommended to provide multiple disks (around 1-6) or disk groups for IoTDB (avoiding creating a single disk array with all disks, as it may affect IoTDB's performance ceiling).
+2. Virtual Environment
+ - It is recommended to mount multiple hard drives (around 1-6).
+
+### 1.2 Configuration Examples
+
+- Example 1: 4 x 3.5-inch Hard Drives
+
+ - Since the server has fewer installed hard drives, RAID5 can be directly configured without additional settings.
+
+ - Recommended configuration:
+
+ | Classification | RAID Type | Number of Hard Drives | Redundancy | Usable Drives |
+ | :--------------- | :-------- | :-------------------- | :--------------------- | :------------ |
+ | System/Data Disk | RAID5 | 4 | 1 disk failure allowed | 3 |
+
+- Example 2: 12 x 3.5-inch Hard Drives
+
+ - The server is configured with 12 x 3.5-inch hard drives.
+
+ - The first 2 disks are recommended for RAID1 as the system disk. The data disks can be divided into 2 groups of RAID5, with 5 disks in each group (4 usable).
+
+ - Recommended configuration:
+
+ | Classification | RAID Type | Number of Hard Drives | Redundancy | Usable Drives |
+ | :------------- | :-------- | :-------------------- | :--------------------- | :------------ |
+ | System Disk | RAID1 | 2 | 1 disk failure allowed | 1 |
+ | Data Disk | RAID5 | 5 | 1 disk failure allowed | 4 |
+ | Data Disk | RAID5 | 5 | 1 disk failure allowed | 4 |
+
+- Example 3: 24 x 2.5-inch Hard Drives
+
+ - The server is configured with 24 x 2.5-inch hard drives.
+
+ - The first 2 disks are recommended for RAID1 as the system disk. The remaining disks can be divided into 3 groups of RAID5, with 7 disks in each group (6 usable). The last disk can be left idle or used for storing write-ahead logs.
+
+ - Recommended configuration:
+
+ | Usage Classification | RAID Type | Number of Hard Drives | Redundancy | Usable Drives |
+ | :------------------- | :-------- | :-------------------- | :--------------------- | :------------ |
+ | System Disk | RAID1 | 2 | 1 disk failure allowed | 1 |
+ | Data Disk | RAID5 | 7 | 1 disk failure allowed | 6 |
+ | Data Disk | RAID5 | 7 | 1 disk failure allowed | 6 |
+ | Data Disk | RAID5 | 7 | 1 disk failure allowed | 6 |
+ | Data Disk | No RAID | 1 | Data loss if damaged | 1 |
+
+## 2 Operating System
+
+### 2.1 Version Requirements
+
+IoTDB supports operating systems such as Linux, Windows, and MacOS. TimechoDB also supports Chinese CPUs like Loongson, Phytium, and Kunpeng, as well as Chinese operating systems like Kylin, UOS, and NingSi.
+
+### 2.2 Hard Disk Partitioning
+
+- It is recommended to use the default standard partitioning method. LVM expansion and hard disk encryption are not recommended.
+- The system disk only needs to meet the space requirements of the operating system. No additional space needs to be reserved for IoTDB.
+- Each disk group should correspond to a single partition. Data disks (with multiple disk groups corresponding to RAID) do not need additional partitioning, and all space should be allocated to IoTDB.
+
+Recommended disk partitioning is as follows:
+
- | Disk classification |
- Disk set |
- Drive |
- Capacity |
- File system type |
-
+ Hard Disk Classification |
+ Disk Group |
+ Corresponding Drive Letter |
+ Size |
+ File System Type |
+
+
+ | System Disk |
+ Disk Group 0 |
+ /boot |
+ 1GB |
+ Default |
+
+
+ | / |
+ Remaining space of disk group |
+ Default |
+
- | System disk |
- Disk group0 |
- /boot |
- 1GB |
- Acquiesce |
-
-
- | / |
- Remaining space of the disk group |
- Acquiesce |
-
-
- | Data disk |
- Disk set1 |
- /data1 |
- Full space of disk group1 |
- Acquiesce |
-
-
- | Disk set2 |
- /data2 |
- Full space of disk group2 |
- Acquiesce |
-
-
- | ...... |
-
+ Data Disk |
+ Disk Group 1 |
+ /data1 |
+ Entire space of disk group 1 |
+ Default |
+
+
+ | Disk Group 2 |
+ /data2 |
+ Entire space of disk group 2 |
+ Default |
+
+
+ | ...... |
+
-### Network Configuration
-
-1. Disable the firewall
-
-```Bash
-# View firewall
-systemctl status firewalld
-# Disable firewall
-systemctl stop firewalld
-# Disable firewall permanently
-systemctl disable firewalld
-```
-2. Ensure that the required port is not occupied
-
-(1) Check the ports occupied by the cluster: In the default cluster configuration, ConfigNode occupies ports 10710 and 10720, and DataNode occupies ports 6667, 10730, 10740, 10750, 10760, 9090, 9190, and 3000. Ensure that these ports are not occupied. Check methods are as follows:
-
-```Bash
-lsof -i:6667 or netstat -tunp | grep 6667
-lsof -i:10710 or netstat -tunp | grep 10710
-lsof -i:10720 or netstat -tunp | grep 10720
-# If the command outputs, the port is occupied.
-```
-
-(2) Checking the port occupied by the cluster deployment tool: When using the cluster management tool opskit to install and deploy the cluster, enable the SSH remote connection service configuration and open port 22.
-
-```Bash
-yum install openssh-server # Install the ssh service
-systemctl start sshd # Enable port 22
-```
-
-3. Ensure that servers are connected to each other
-
-### Other Configuration
-
-1. Disable the system swap memory
-
-```Bash
-echo "vm.swappiness = 0">> /etc/sysctl.conf
-# The swapoff -a and swapon -a commands are executed together to dump the data in swap back to memory and to empty the data in swap.
-# Do not omit the swappiness setting and just execute swapoff -a; Otherwise, swap automatically opens again after the restart, making the operation invalid.
-swapoff -a && swapon -a
-# Make the configuration take effect without restarting.
-sysctl -p
-# Check memory allocation, expecting swap to be 0
-free -m
-```
-2. Set the maximum number of open files to 65535 to avoid the error of "too many open files".
-
-```Bash
-# View current restrictions
-ulimit -n
-# Temporary changes
-ulimit -n 65535
-# Permanent modification
-echo "* soft nofile 65535" >> /etc/security/limits.conf
-echo "* hard nofile 65535" >> /etc/security/limits.conf
-# View after exiting the current terminal session, expect to display 65535
-ulimit -n
-```
-## Software Dependence
-
-Install the Java runtime environment (Java version >= 1.8). Ensure that jdk environment variables are set. (It is recommended to deploy JDK17 for V1.3.2.2 or later. In some scenarios, the performance of JDK of earlier versions is compromised, and Datanodes cannot be stopped.)
-
-```Bash
-# The following is an example of installing in centos7 using JDK-17:
-tar -zxvf JDk-17_linux-x64_bin.tar # Decompress the JDK file
-Vim ~/.bashrc # Configure the JDK environment
-{ export JAVA_HOME=/usr/lib/jvm/jdk-17.0.9
- export PATH=$JAVA_HOME/bin:$PATH
-} # Add JDK environment variables
-source ~/.bashrc # The configuration takes effect
-java -version # Check the JDK environment
-```
\ No newline at end of file
+
+### 2.3 Network Configuration
+
+1. **Disable Firewall**
+ ```Bash
+ # Check firewall status
+ systemctl status firewalld
+ # Stop firewall
+ systemctl stop firewalld
+ # Permanently disable firewall
+ systemctl disable firewalld
+ ```
+2. **Ensure Required Ports Are Not Occupied**
+ - Cluster Ports: By default, ConfigNode uses ports 10710 and 10720, while DataNode uses ports 6667, 10730, 10740, 10750, 10760, 9090, 9190, and 3000. Ensure these ports are not occupied. Check as follows:
+ ```Bash
+ lsof -i:6667 or netstat -tunp | grep 6667
+ lsof -i:10710 or netstat -tunp | grep 10710
+ lsof -i:10720 or netstat -tunp | grep 10720
+ # If the command outputs anything, the port is occupied.
+ ```
+ - Cluster Deployment Tool Ports: When using the cluster management tool `opskit` for installation and deployment, ensure the SSH remote connection service is configured and port 22 is open.
+ ```Bash
+ yum install openssh-server # Install SSH service
+ systemctl start sshd # Enable port 22
+ ```
+3. Ensure Network Connectivity Between Servers
+
+### 2.4 Other Configurations
+
+1. Disable System Swap Memory
+ ```Bash
+ echo "vm.swappiness = 0" >> /etc/sysctl.conf
+ # Execute both swapoff -a and swapon -a to transfer data from swap back to memory and clear swap data.
+ # Do not omit the swappiness setting and only execute swapoff -a; otherwise, swap will automatically reopen after reboot, rendering the operation ineffective.
+ swapoff -a && swapon -a
+ # Apply the configuration without rebooting.
+ sysctl -p
+ # Check memory allocation; swap should be 0.
+ free -m
+ ```
+2. Set System Maximum Open Files to 65535 to avoid "too many open files" errors.
+ ```Bash
+ # Check current limit
+ ulimit -n
+ # Temporarily modify
+ ulimit -n 65535
+ # Permanently modify
+ echo "* soft nofile 65535" >> /etc/security/limits.conf
+ echo "* hard nofile 65535" >> /etc/security/limits.conf
+ # After exiting the current terminal session, check; it should display 65535.
+ ulimit -n
+ ```
+
+
+
+## 3 Software Dependencies
+
+Install Java Runtime Environment, Java version >= 1.8. Ensure JDK environment variables are set. (For versions V1.3.2.2 and later, it is recommended to directly deploy JDK17. Older JDK versions may have performance issues in some scenarios, and DataNode may fail to stop.)
+
+ ```Bash
+ # Example of installing JDK-17 on CentOS7:
+ tar -zxvf jdk-17_linux-x64_bin.tar # Extract JDK files
+ vim ~/.bashrc # Configure JDK environment
+ {
+ export JAVA_HOME=/usr/lib/jvm/jdk-17.0.9
+ export PATH=$JAVA_HOME/bin:$PATH
+ } # Add JDK environment variables
+ source ~/.bashrc # Apply environment configuration
+ java -version # Check JDK environment
+ ```
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
new file mode 100644
index 000000000..45aeedd4e
--- /dev/null
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
@@ -0,0 +1,47 @@
+
+# Obtain TimechoDB
+
+## 1 How to obtain TimechoDB
+
+The installation package can be directly obtained from the Apache IoTDB official website:https://iotdb.apache.org/Download/
+
+
+## 2 Installation Package Structure
+
+
+Install the package after decompression(`apache-iotdb--all-bin.zip`),After decompressing the installation package, the directory structure is as follows:
+
+| **Catologue** | **Type** | **Description** |
+| :--------------- | :------- | :----------------------------------------------------------- |
+| conf | Folder | Configuration files directory, containing ConfigNode, DataNode, JMX, and logback configuration files. |
+| data | Folder | Default data file directory, containing data files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| lib | Folder | Library files directory. |
+| licenses | Folder | Directory for open-source license certificates. |
+| logs | Folder | Default log file directory, containing log files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| sbin | Folder | Main scripts directory, containing scripts for starting, stopping, and managing the database. |
+| tools | Folder | Tools directory. |
+| ext | Folder | Directory for pipe, trigger, and UDF plugin-related files. |
+| LICENSE | File | Open-source license file. |
+| NOTICE | File | Open-source notice file. |
+| README_ZH.md | File | User manual (Chinese version). |
+| README.md | File | User manual (English version). |
+| RELEASE_NOTES.md | File | Release notes. |
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md
index 57cad838b..261c8a10f 100644
--- a/src/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md
@@ -19,24 +19,28 @@
-->
# Obtain TimechoDB
-## How to obtain TimechoDB
-The enterprise version installation package can be obtained through product trial application or by directly contacting the business personnel who are in contact with you.
-## Installation Package Structure
-The directory structure after unpacking the installation package is as follows:
-| **catalogue** | **Type** | **Explanation** |
-| :--------------: | -------- | ------------------------------------------------------------ |
-| activation | folder | The directory where the activation file is located, including the generated machine code and the enterprise version activation code obtained from the business side (this directory will only be generated after starting ConfigNode to obtain the activation code) |
-| conf | folder | Configuration file directory, including configuration files such as ConfigNode, DataNode, JMX, and logback |
-| data | folder | The default data file directory contains data files for ConfigNode and DataNode. (The directory will only be generated after starting the program) |
-| lib | folder | IoTDB executable library file directory |
-| licenses | folder | Open source community certificate file directory |
-| logs | folder | The default log file directory, which includes log files for ConfigNode and DataNode (this directory will only be generated after starting the program) |
-| sbin | folder | Main script directory, including start, stop, and other scripts |
-| tools | folder | Directory of System Peripheral Tools |
-| ext | folder | Related files for pipe, trigger, and UDF plugins (created by the user when needed) |
-| LICENSE | file | certificate |
-| NOTICE | file | Tip |
-| README_ZH\.md | file | Explanation of the Chinese version in Markdown format |
-| README\.md | file | Instructions for use |
-| RELEASE_NOTES\.md | file | Version Description |
+## 1 How to obtain TimechoDB
+
+The TimechoDB installation package can be obtained through product trial application or by directly contacting the Timecho team.
+
+## 2 Installation Package Structure
+
+After unpacking the installation package(`iotdb-enterprise-{version}-bin.zip`),you will see the directory structure is as follows:
+
+| **Catologue** | **Type** | **Description** |
+| :--------------- | :------- | :----------------------------------------------------------- |
+| activation | Folder | Directory for activation files, including the generated machine code and the TimechoDB activation code obtained from Timecho staff. *(This directory is generated after starting the ConfigNode, enabling you to obtain the activation code.)* |
+| conf | Folder | Configuration files directory, containing ConfigNode, DataNode, JMX, and logback configuration files. |
+| data | Folder | Default data file directory, containing data files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| lib | Folder | Library files directory. |
+| licenses | Folder | Directory for open-source license certificates. |
+| logs | Folder | Default log file directory, containing log files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| sbin | Folder | Main scripts directory, containing scripts for starting, stopping, and managing the database. |
+| tools | Folder | Tools directory. |
+| ext | Folder | Directory for pipe, trigger, and UDF plugin-related files. |
+| LICENSE | File | Open-source license file. |
+| NOTICE | File | Open-source notice file. |
+| README_ZH.md | File | User manual (Chinese version). |
+| README.md | File | User manual (English version). |
+| RELEASE_NOTES.md | File | Release notes. |
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md
index 96b91f273..9bd72b9fc 100644
--- a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md
@@ -20,66 +20,72 @@
-->
# Monitoring Panel Deployment
-The IoTDB monitoring panel is one of the supporting tools for the IoTDB Enterprise Edition. It aims to solve the monitoring problems of IoTDB and its operating system, mainly including operating system resource monitoring, IoTDB performance monitoring, and hundreds of kernel monitoring indicators, in order to help users monitor the health status of the cluster, and perform cluster optimization and operation. This article will take common 3C3D clusters (3 Confignodes and 3 Datanodes) as examples to introduce how to enable the system monitoring module in an IoTDB instance and use Prometheus+Grafana to visualize the system monitoring indicators.
+The monitoring panel is one of the supporting tools for TimechoDB. It aims to solve the monitoring problems of TimechoDB and its operating system, mainly including operating system resource monitoring, TimechoDB performance monitoring, and hundreds of kernel monitoring metrics, in order to help users monitor cluster health, optimize performance, and perform maintenance. This guide demonstrates how to enable the system monitoring module in a TimechoDB instance and visualize monitoring metrics using Prometheus + Grafana, using a typical 3C3D cluster (3 ConfigNodes and 3 DataNodes) as an example.
## Installation Preparation
1. Installing IoTDB: You need to first install IoTDB V1.0 or above Enterprise Edition. You can contact business or technical support to obtain
2. Obtain the IoTDB monitoring panel installation package: Based on the enterprise version of IoTDB database monitoring panel, you can contact business or technical support to obtain
-## Installation Steps
+## 1 Installation Preparation
-### Step 1: IoTDB enables monitoring indicator collection
+1. Installing TimechoDB: Install TimechoDB V1.0 or above. Contact sales or technical support to obtain the installation package.
-1. Open the monitoring configuration item. The configuration items related to monitoring in IoTDB are disabled by default. Before deploying the monitoring panel, you need to open the relevant configuration items (note that the service needs to be restarted after enabling monitoring configuration).
+2. Obtain the monitoring panel installation package: The monitoring panel is exclusive to the enterprise-grade TimechoDB. Contact sales or technical support to obtain it.
-| **Configuration** | Located in the configuration file | **Description** |
-| :--------------------------------- | :-------------------------------- | :----------------------------------------------------------- |
-| cn_metric_reporter_list | conf/iotdb-system.properties | Uncomment the configuration item and set the value to PROMETHEUS |
-| cn_metric_level | conf/iotdb-system.properties | Uncomment the configuration item and set the value to IMPORTANT |
-| cn_metric_prometheus_reporter_port | conf/iotdb-system.properties | Uncomment the configuration item to maintain the default setting of 9091. If other ports are set, they will not conflict with each other |
-| dn_metric_reporter_list | conf/iotdb-system.properties | Uncomment the configuration item and set the value to PROMETHEUS |
-| dn_metric_level | conf/iotdb-system.properties | Uncomment the configuration item and set the value to IMPORTANT |
-| dn_metric_prometheus_reporter_port | conf/iotdb-system.properties | Uncomment the configuration item and set it to 9092 by default. If other ports are set, they will not conflict with each other |
+## 2 Installation Steps
+
+### Step 1: Enable Monitoring Metrics Collection in TimechoDB
+
+1. Enable related configuration options. The configuration options related to monitoring in TimechoDB are disabled by default. Before deploying the monitoring panel, you need to enable certain configuration options (note that the service needs to be restarted after enabling monitoring configuration).
+
+| **Configuration** | **Configuration File** | **Description** |
+| :--------------------------------- | :------------------------------- | :----------------------------------------------------------- |
+| cn_metric_reporter_list | conf/iotdb-confignode.properties | Uncomment the configuration option and set the value to PROMETHEUS |
+| cn_metric_level | conf/iotdb-confignode.properties | Uncomment the configuration option and set the value to IMPORTANT |
+| cn_metric_prometheus_reporter_port | conf/iotdb-confignode.properties | Uncomment the configuration option and keep the default port `9091` or set another port (ensure no conflict) |
+| dn_metric_reporter_list | conf/iotdb-datanode.properties | Uncomment the configuration option and set the value to PROMETHEUS |
+| dn_metric_level | conf/iotdb-datanode.properties | Uncomment the configuration option and set the value to IMPORTANT |
+| dn_metric_prometheus_reporter_port | conf/iotdb-datanode.properties | Uncomment the configuration option and keep the default port `9092` or set another port (ensure no conflict) |
Taking the 3C3D cluster as an example, the monitoring configuration that needs to be modified is as follows:
-| Node IP | Host Name | Cluster Role | Configuration File Path | Configuration |
-| ----------- | --------- | ------------ | -------------------------------- | ------------------------------------------------------------ |
+| Node IP | Host Name | Cluster Role | Configuration File Path | Configuration |
+| ----------- | --------- | ------------ | ---------------------------- | ------------------------------------------------------------ |
| 192.168.1.3 | iotdb-1 | confignode | conf/iotdb-system.properties | cn_metric_reporter_list=PROMETHEUS cn_metric_level=IMPORTANT cn_metric_prometheus_reporter_port=9091 |
| 192.168.1.4 | iotdb-2 | confignode | conf/iotdb-system.properties | cn_metric_reporter_list=PROMETHEUS cn_metric_level=IMPORTANT cn_metric_prometheus_reporter_port=9091 |
| 192.168.1.5 | iotdb-3 | confignode | conf/iotdb-system.properties | cn_metric_reporter_list=PROMETHEUS cn_metric_level=IMPORTANT cn_metric_prometheus_reporter_port=9091 |
-| 192.168.1.3 | iotdb-1 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
-| 192.168.1.4 | iotdb-2 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
-| 192.168.1.5 | iotdb-3 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
+| 192.168.1.3 | iotdb-1 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
+| 192.168.1.4 | iotdb-2 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
+| 192.168.1.5 | iotdb-3 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
-2. Restart all nodes. After modifying the monitoring indicator configuration of three nodes, the confignode and datanode services of all nodes can be restarted:
+2. Restart all nodes. After modifying the monitoring configurations on all 3 nodes, restart the ConfigNode and DataNode services:
```Bash
-./sbin/stop-standalone.sh #Stop confignode and datanode first
-./sbin/start-confignode.sh -d #Start confignode
-./sbin/start-datanode.sh -d #Start datanode
-```
+ ./sbin/stop-standalone.sh #Stop confignode and datanode first
+ ./sbin/start-confignode.sh -d #Start confignode
+ ./sbin/start-datanode.sh -d #Start datanode
+ ```
-3. After restarting, confirm the running status of each node through the client. If the status is Running, it indicates successful configuration:
+3. After restarting, confirm the running status of each node through the client. If all nodes are running, the configuration is successful.

-### Step 2: Install and configure Prometheus
+### Step 2: Install and Configure Prometheus
-> Taking Prometheus installed on server 192.168.1.3 as an example.
+> In this example, Prometheus is installed on server 192.168.1.3.
-1. Download the Prometheus installation package, which requires installation of V2.30.3 and above. You can go to the Prometheus official website to download it(https://prometheus.io/docs/introduction/first_steps/)
-2. Unzip the installation package and enter the unzipped folder:
+1. Download Prometheus (version 2.30.3 or later). You can download it on Prometheus homepage (https://prometheus.io/docs/introduction/first_steps/)
+2. Unzip the installation package and enter the folder:
```Shell
-tar xvfz prometheus-*.tar.gz
-cd prometheus-*
-```
+ tar xvfz prometheus-*.tar.gz
+ cd prometheus-*
+ ```
-3. Modify the configuration. Modify the configuration file prometheus.yml as follows
- 1. Add configNode task to collect monitoring data for ConfigNode
- 2. Add a datanode task to collect monitoring data for DataNodes
+3. Modify the configuration. Modify the configuration file `prometheus.yml` as follows
+ - Add a confignode job to collect monitoring data for ConfigNode
+ - Add a datanode job to collect monitoring data for DataNodes
```YAML
global:
@@ -102,45 +108,46 @@ scrape_configs:
4. Start Prometheus. The default expiration time for Prometheus monitoring data is 15 days. In production environments, it is recommended to adjust it to 180 days or more to track historical monitoring data for a longer period of time. The startup command is as follows:
```Shell
-./prometheus --config.file=prometheus.yml --storage.tsdb.retention.time=180d
-```
+ ./prometheus --config.file=prometheus.yml --storage.tsdb.retention.time=180d
+ ```
-5. Confirm successful startup. Enter in browser http://192.168.1.3:9090 Go to Prometheus and click on the Target interface under Status. When you see that all States are Up, it indicates successful configuration and connectivity.
+5. Confirm successful startup. Open a browser and navigate to http://192.168.1.3:9090 . Navitage to "Status" -> "Targets". If the states of all targets were up, the configuration is successful.
-6. Clicking on the left link in Targets will redirect you to web monitoring and view the monitoring information of the corresponding node:
+6. Click the links in the `Targets` page to view monitoring information for the respective nodes.

-### Step 3: Install Grafana and configure the data source
+### Step 3: Install Grafana and Configure the Data Source
+
+> n this example, Grafana is installed on server 192.168.1.3.
-> Taking Grafana installed on server 192.168.1.3 as an example.
+1. Download Grafana (version 8.4.2 or later). You can download it on Grafana homepage (https://grafana.com/grafana/download)
-1. Download the Grafana installation package, which requires installing version 8.4.2 or higher. You can go to the Grafana official website to download it(https://grafana.com/grafana/download)
-2. Unzip and enter the corresponding folder
+2. 2. Unzip the installation package and enter the folder:
```Shell
-tar -zxvf grafana-*.tar.gz
-cd grafana-*
-```
+ tar -zxvf grafana-*.tar.gz
+ cd grafana-*
+ ```
3. Start Grafana:
```Shell
-./bin/grafana-server web
-```
+ ./bin/grafana-server web
+ ```
-4. Log in to Grafana. Enter in browser http://192.168.1.3:3000 (or the modified port), enter Grafana, and the default initial username and password are both admin.
+4. Log in to Grafana. Open a browser and navigate to `http://192.168.1.3:3000` (or the modified port). The default initial username and password are both `admin`.
-5. Configure data sources. Find Data sources in Connections, add a new data source, and configure the Data Source to Prometheus
+5. Configure data sources. Navigate to "Connections" -> "Data sources", add a new data source, and add`Prometheus`as data source.

-When configuring the Data Source, pay attention to the URL where Prometheus is located. After configuring it, click on Save&Test and a Data Source is working prompt will appear, indicating successful configuration
+Ensure the URL for Prometheus is correct. Click "Save & Test". If the message "Data source is working" appears, the configuration is successful.

@@ -158,19 +165,19 @@ When configuring the Data Source, pay attention to the URL where Prometheus is l

-4. Select the JSON file of one of the panels in the IoTDB monitoring panel, using the Apache IoTDB ConfigNode Dashboard as an example (refer to the installation preparation section in this article for the monitoring panel installation package):
+4. Choose one of the JSON files (e.g., `Apache IoTDB ConfigNode Dashboard`).

-5. Select Prometheus as the data source and click Import
+5. Choose Prometheus as the data source and click "Import"

-6. Afterwards, you can see the imported Apache IoTDB ConfigNode Dashboard monitoring panel
+6. The imported `Apache IoTDB ConfigNode Dashboard` will now be displayed.

-7. Similarly, we can import the Apache IoTDB DataNode Dashboard Apache Performance Overview Dashboard、Apache System Overview Dashboard, You can see the following monitoring panel:
+7. Similarly, import other dashboards such as `Apache IoTDB DataNode Dashboard`, `Apache Performance Overview Dashboard`, and `Apache System Overview Dashboard`.

@@ -178,503 +185,502 @@ When configuring the Data Source, pay attention to the URL where Prometheus is l
-8. At this point, all IoTDB monitoring panels have been imported and monitoring information can now be viewed at any time.
+8. The IoTDB monitoring panel is now fully imported, and you can view monitoring information at any time.

-## Appendix, Detailed Explanation of Monitoring Indicators
-
-### System Dashboard
-
-This panel displays the current usage of system CPU, memory, disk, and network resources, as well as partial status of the JVM.
-
-#### CPU
-
-- CPU Core:CPU cores
-- CPU Load:
- - System CPU Load:The average CPU load and busyness of the entire system during the sampling time
- - Process CPU Load:The proportion of CPU occupied by the IoTDB process during sampling time
-- CPU Time Per Minute:The total CPU time of all processes in the system per minute
-
-#### Memory
-
-- System Memory:The current usage of system memory.
- - Commited vm size: The size of virtual memory allocated by the operating system to running processes.
- - Total physical memory:The total amount of available physical memory in the system.
- - Used physical memory:The total amount of memory already used by the system. Contains the actual amount of memory used by the process and the memory occupied by the operating system buffers/cache.
-- System Swap Memory:Swap Space memory usage.
-- Process Memory:The usage of memory by the IoTDB process.
- - Max Memory:The maximum amount of memory that an IoTDB process can request from the operating system. (Configure the allocated memory size in the datanode env/configure env configuration file)
- - Total Memory:The total amount of memory that the IoTDB process has currently requested from the operating system.
- - Used Memory:The total amount of memory currently used by the IoTDB process.
-
-#### Disk
-
-- Disk Space:
- - Total disk space:The maximum disk space that IoTDB can use.
- - Used disk space:The disk space already used by IoTDB.
-- Log Number Per Minute:The average number of logs at each level of IoTDB per minute during the sampling time.
-- File Count:Number of IoTDB related files
- - all:All file quantities
- - TsFile:Number of TsFiles
- - seq:Number of sequential TsFiles
- - unseq:Number of unsequence TsFiles
- - wal:Number of WAL files
- - cross-temp:Number of cross space merge temp files
- - inner-seq-temp:Number of merged temp files in sequential space
- - innser-unseq-temp:Number of merged temp files in unsequential space
- - mods:Number of tombstone files
-- Open File Count:Number of file handles opened by the system
-- File Size:The size of IoTDB related files. Each sub item corresponds to the size of the corresponding file.
-- Disk I/O Busy Rate:Equivalent to the% util indicator in iostat, it to some extent reflects the level of disk busyness. Each sub item is an indicator corresponding to the disk.
-- Disk I/O Throughput:The average I/O throughput of each disk in the system over a period of time. Each sub item is an indicator corresponding to the disk.
-- Disk I/O Ops:Equivalent to the four indicators of r/s, w/s, rrqm/s, and wrqm/s in iostat, it refers to the number of times a disk performs I/O per second. Read and write refer to the number of times a disk performs a single I/O. Due to the corresponding scheduling algorithm of block devices, in some cases, multiple adjacent I/Os can be merged into one. Merge read and merge write refer to the number of times multiple I/Os are merged into one I/O.
-- Disk I/O Avg Time:Equivalent to the await of iostat, which is the average latency of each I/O request. Separate recording of read and write requests.
-- Disk I/O Avg Size:Equivalent to the avgrq sz of iostat, it reflects the size of each I/O request. Separate recording of read and write requests.
-- Disk I/O Avg Queue Size:Equivalent to avgqu sz in iostat, which is the average length of the I/O request queue.
-- I/O System Call Rate:The frequency of process calls to read and write system calls, similar to IOPS.
-- I/O Throughput:The throughput of process I/O can be divided into two categories: actual-read/write and attemppt-read/write. Actual read and actual write refer to the number of bytes that a process actually causes block devices to perform I/O, excluding the parts processed by Page Cache.
-
-#### JVM
-
-- GC Time Percentage:The proportion of GC time spent by the node JVM in the past minute's time window
-- GC Allocated/Promoted Size Detail: The average size of objects promoted to the old era per minute by the node JVM, as well as the size of objects newly applied for by the new generation/old era and non generational new applications
-- GC Data Size Detail:The long-term surviving object size of the node JVM and the maximum intergenerational allowed value
-- Heap Memory:JVM heap memory usage.
- - Maximum heap memory:The maximum available heap memory size for the JVM.
- - Committed heap memory:The size of heap memory that has been committed by the JVM.
- - Used heap memory:The size of heap memory already used by the JVM.
- - PS Eden Space:The size of the PS Young area.
- - PS Old Space:The size of the PS Old area.
- - PS Survivor Space:The size of the PS survivor area.
- - ...(CMS/G1/ZGC, etc)
-- Off Heap Memory:Out of heap memory usage.
- - direct memory:Out of heap direct memory.
- - mapped memory:Out of heap mapped memory.
-- GC Number Per Minute:The average number of garbage collection attempts per minute by the node JVM, including YGC and FGC
-- GC Time Per Minute:The average time it takes for node JVM to perform garbage collection per minute, including YGC and FGC
-- GC Number Per Minute Detail:The average number of garbage collections per minute by node JVM due to different reasons, including YGC and FGC
-- GC Time Per Minute Detail:The average time spent by node JVM on garbage collection per minute due to different reasons, including YGC and FGC
-- Time Consumed Of Compilation Per Minute:The total time JVM spends compiling per minute
-- The Number of Class:
- - loaded:The number of classes currently loaded by the JVM
- - unloaded:The number of classes uninstalled by the JVM since system startup
-- The Number of Java Thread:The current number of surviving threads in IoTDB. Each sub item represents the number of threads in each state.
-
-#### Network
-
-Eno refers to the network card connected to the public network, while lo refers to the virtual network card.
-
-- Net Speed:The speed of network card sending and receiving data
-- Receive/Transmit Data Size:The size of data packets sent or received by the network card, calculated from system restart
-- Packet Speed:The speed at which the network card sends and receives packets, and one RPC request can correspond to one or more packets
-- Connection Num:The current number of socket connections for the selected process (IoTDB only has TCP)
-
-### Performance Overview Dashboard
-
-#### Cluster Overview
-
-- Total CPU Core:Total CPU cores of cluster machines
-- DataNode CPU Load:CPU usage of each DataNode node in the cluster
-- Disk
- - Total Disk Space: Total disk size of cluster machines
- - DataNode Disk Usage: The disk usage rate of each DataNode in the cluster
-- Total Timeseries: The total number of time series managed by the cluster (including replicas), the actual number of time series needs to be calculated in conjunction with the number of metadata replicas
-- Cluster: Number of ConfigNode and DataNode nodes in the cluster
-- Up Time: The duration of cluster startup until now
-- Total Write Point Per Second: The total number of writes per second in the cluster (including replicas), and the actual total number of writes needs to be analyzed in conjunction with the number of data replicas
-- Memory
- - Total System Memory: Total memory size of cluster machine system
- - Total Swap Memory: Total size of cluster machine swap memory
- - DataNode Process Memory Usage: Memory usage of each DataNode in the cluster
-- Total File Number:Total number of cluster management files
-- Cluster System Overview:Overview of cluster machines, including average DataNode node memory usage and average machine disk usage
-- Total DataBase: The total number of databases managed by the cluster (including replicas)
-- Total DataRegion: The total number of DataRegions managed by the cluster
-- Total SchemaRegion: The total number of SchemeRegions managed by the cluster
-
-#### Node Overview
-
-- CPU Core: The number of CPU cores in the machine where the node is located
-- Disk Space: The disk size of the machine where the node is located
-- Timeseries: Number of time series managed by the machine where the node is located (including replicas)
-- System Overview: System overview of the machine where the node is located, including CPU load, process memory usage ratio, and disk usage ratio
-- Write Point Per Second: The write speed per second of the machine where the node is located (including replicas)
-- System Memory: The system memory size of the machine where the node is located
-- Swap Memory:The swap memory size of the machine where the node is located
-- File Number: Number of files managed by nodes
-
-#### Performance
-
-- Session Idle Time:The total idle time and total busy time of the session connection of the node
-- Client Connection: The client connection status of the node, including the total number of connections and the number of active connections
-- Time Consumed Of Operation: The time consumption of various types of node operations, including average and P99
-- Average Time Consumed Of Interface: The average time consumption of each thrust interface of a node
-- P99 Time Consumed Of Interface: P99 time consumption of various thrust interfaces of nodes
-- Task Number: The number of system tasks for each node
-- Average Time Consumed of Task: The average time spent on various system tasks of a node
-- P99 Time Consumed of Task: P99 time consumption for various system tasks of nodes
-- Operation Per Second: The number of operations per second for a node
-- Mainstream Process
- - Operation Per Second Of Stage: The number of operations per second for each stage of the node's main process
- - Average Time Consumed Of Stage: The average time consumption of each stage in the main process of a node
- - P99 Time Consumed Of Stage: P99 time consumption for each stage of the node's main process
-- Schedule Stage
- - OPS Of Schedule: The number of operations per second in each sub stage of the node schedule stage
- - Average Time Consumed Of Schedule Stage:The average time consumption of each sub stage in the node schedule stage
- - P99 Time Consumed Of Schedule Stage: P99 time consumption for each sub stage of the schedule stage of the node
-- Local Schedule Sub Stages
- - OPS Of Local Schedule Stage: The number of operations per second in each sub stage of the local schedule node
- - Average Time Consumed Of Local Schedule Stage: The average time consumption of each sub stage in the local schedule stage of the node
- - P99 Time Consumed Of Local Schedule Stage: P99 time consumption for each sub stage of the local schedule stage of the node
-- Storage Stage
- - OPS Of Storage Stage: The number of operations per second in each sub stage of the node storage stage
- - Average Time Consumed Of Storage Stage: Average time consumption of each sub stage in the node storage stage
- - P99 Time Consumed Of Storage Stage: P99 time consumption for each sub stage of node storage stage
-- Engine Stage
- - OPS Of Engine Stage: The number of operations per second in each sub stage of the node engine stage
- - Average Time Consumed Of Engine Stage: The average time consumption of each sub stage in the engine stage of a node
- - P99 Time Consumed Of Engine Stage: P99 time consumption of each sub stage in the node engine stage
-
-#### System
-
-- CPU Load: CPU load of nodes
-- CPU Time Per Minute: The CPU time per minute of a node, with the maximum value related to the number of CPU cores
-- GC Time Per Minute:The average GC time per minute for nodes, including YGC and FGC
-- Heap Memory: Node's heap memory usage
-- Off Heap Memory: Non heap memory usage of nodes
-- The Number Of Java Thread: Number of Java threads on nodes
-- File Count:Number of files managed by nodes
-- File Size: Node management file size situation
-- Log Number Per Minute: Different types of logs per minute for nodes
-
-### ConfigNode Dashboard
-
-This panel displays the performance of all management nodes in the cluster, including partitioning, node information, and client connection statistics.
-
-#### Node Overview
-
-- Database Count: Number of databases for nodes
-- Region
- - DataRegion Count:Number of DataRegions for nodes
- - DataRegion Current Status: The state of the DataRegion of the node
- - SchemaRegion Count: Number of SchemeRegions for nodes
- - SchemaRegion Current Status: The state of the SchemeRegion of the node
-- System Memory: The system memory size of the node
-- Swap Memory: Node's swap memory size
-- ConfigNodes: The running status of the ConfigNode in the cluster where the node is located
-- DataNodes:The DataNode situation of the cluster where the node is located
-- System Overview: System overview of nodes, including system memory, disk usage, process memory, and CPU load
-
-#### NodeInfo
-
-- Node Count: The number of nodes in the cluster where the node is located, including ConfigNode and DataNode
-- ConfigNode Status: The status of the ConfigNode node in the cluster where the node is located
-- DataNode Status: The status of the DataNode node in the cluster where the node is located
-- SchemaRegion Distribution: The distribution of SchemaRegions in the cluster where the node is located
-- SchemaRegionGroup Leader Distribution: The distribution of leaders in the SchemaRegionGroup of the cluster where the node is located
-- DataRegion Distribution: The distribution of DataRegions in the cluster where the node is located
-- DataRegionGroup Leader Distribution:The distribution of leaders in the DataRegionGroup of the cluster where the node is located
-
-#### Protocol
-
-- Client Count
- - Active Client Num: The number of active clients in each thread pool of a node
- - Idle Client Num: The number of idle clients in each thread pool of a node
- - Borrowed Client Count: Number of borrowed clients in each thread pool of the node
- - Created Client Count: Number of created clients for each thread pool of the node
- - Destroyed Client Count: The number of destroyed clients in each thread pool of the node
-- Client time situation
- - Client Mean Active Time: The average active time of clients in each thread pool of a node
- - Client Mean Borrow Wait Time: The average borrowing waiting time of clients in each thread pool of a node
- - Client Mean Idle Time: The average idle time of clients in each thread pool of a node
-
-#### Partition Table
-
-- SchemaRegionGroup Count: The number of SchemaRegionGroups in the Database of the cluster where the node is located
-- DataRegionGroup Count: The number of DataRegionGroups in the Database of the cluster where the node is located
-- SeriesSlot Count: The number of SeriesSlots in the Database of the cluster where the node is located
-- TimeSlot Count: The number of TimeSlots in the Database of the cluster where the node is located
-- DataRegion Status: The DataRegion status of the cluster where the node is located
-- SchemaRegion Status: The status of the SchemeRegion of the cluster where the node is located
-
-#### Consensus
-
-- Ratis Stage Time: The time consumption of each stage of the node's Ratis
-- Write Log Entry: The time required to write a log for the Ratis of a node
-- Remote / Local Write Time: The time consumption of remote and local writes for the Ratis of nodes
-- Remote / Local Write QPS: Remote and local QPS written to node Ratis
-- RatisConsensus Memory: Memory usage of Node Ratis consensus protocol
-
-### DataNode Dashboard
-
-This panel displays the monitoring status of all data nodes in the cluster, including write time, query time, number of stored files, etc.
-
-#### Node Overview
-
-- The Number Of Entity: Entity situation of node management
-- Write Point Per Second: The write speed per second of the node
-- Memory Usage: The memory usage of the node, including the memory usage of various parts of IoT Consensus, the total memory usage of SchemaRegion, and the memory usage of various databases.
-
-#### Protocol
-
-- Node Operation Time Consumption
- - The Time Consumed Of Operation (avg): The average time spent on various operations of a node
- - The Time Consumed Of Operation (50%): The median time spent on various operations of a node
- - The Time Consumed Of Operation (99%): P99 time consumption for various operations of nodes
-- Thrift Statistics
- - The QPS Of Interface: QPS of various Thrift interfaces of nodes
- - The Avg Time Consumed Of Interface: The average time consumption of each Thrift interface of a node
- - Thrift Connection: The number of Thrfit connections of each type of node
- - Thrift Active Thread: The number of active Thrift connections for each type of node
-- Client Statistics
- - Active Client Num: The number of active clients in each thread pool of a node
- - Idle Client Num: The number of idle clients in each thread pool of a node
- - Borrowed Client Count:Number of borrowed clients for each thread pool of a node
- - Created Client Count: Number of created clients for each thread pool of the node
- - Destroyed Client Count: The number of destroyed clients in each thread pool of the node
- - Client Mean Active Time: The average active time of clients in each thread pool of a node
- - Client Mean Borrow Wait Time: The average borrowing waiting time of clients in each thread pool of a node
- - Client Mean Idle Time: The average idle time of clients in each thread pool of a node
-
-#### Storage Engine
-
-- File Count: Number of files of various types managed by nodes
-- File Size: Node management of various types of file sizes
-- TsFile
- - TsFile Total Size In Each Level: The total size of TsFile files at each level of node management
- - TsFile Count In Each Level: Number of TsFile files at each level of node management
- - Avg TsFile Size In Each Level: The average size of TsFile files at each level of node management
-- Task Number: Number of Tasks for Nodes
-- The Time Consumed of Task: The time consumption of tasks for nodes
-- Compaction
- - Compaction Read And Write Per Second: The merge read and write speed of nodes per second
- - Compaction Number Per Minute: The number of merged nodes per minute
- - Compaction Process Chunk Status: The number of Chunks in different states merged by nodes
- - Compacted Point Num Per Minute: The number of merged nodes per minute
-
-#### Write Performance
-
-- Write Cost(avg): Average node write time, including writing wal and memtable
-- Write Cost(50%): Median node write time, including writing wal and memtable
-- Write Cost(99%): P99 for node write time, including writing wal and memtable
-- WAL
- - WAL File Size: Total size of WAL files managed by nodes
- - WAL File Num:Number of WAL files managed by nodes
- - WAL Nodes Num: Number of WAL nodes managed by nodes
- - Make Checkpoint Costs: The time required to create various types of CheckPoints for nodes
- - WAL Serialize Total Cost: Total time spent on node WAL serialization
- - Data Region Mem Cost: Memory usage of different DataRegions of nodes, total memory usage of DataRegions of the current instance, and total memory usage of DataRegions of the current cluster
- - Serialize One WAL Info Entry Cost: Node serialization time for a WAL Info Entry
- - Oldest MemTable Ram Cost When Cause Snapshot: MemTable size when node WAL triggers oldest MemTable snapshot
- - Oldest MemTable Ram Cost When Cause Flush: MemTable size when node WAL triggers oldest MemTable flush
- - Effective Info Ratio Of WALNode: The effective information ratio of different WALNodes of nodes
+## 3 Appendix, Detailed Monitoring Metrics
+
+### 3.1 System Dashboard
+
+This dashboard displays the current system's **CPU****, memory, disk, and network resource****s**, as well as some **JVM****-related metrics**.
+
+#### 3.1.1 CPU
+
+- **CPU Core:** Number of CPU cores.
+- **CPU Load:**
+ - **System CPU Load:** The average CPU load and utilization of the entire system during the sampling period.
+ - **Process CPU Load:** The percentage of CPU resources occupied by the IoTDB process during the sampling period.
+- **CPU Time Per Minute:** The total CPU time consumed by all processes in the system per minute.
+
+#### 3.1.2 Memory
+
+- **System Memory:** Current system memory usage.
+ - **Committed VM Size:** Virtual memory size allocated by the operating system to running processes.
+ - **Total Physical Memory****:** Total available physical memory in the system.
+ - **Used Physical Memory****:** The total amount of memory currently in use, including memory actively used by processes and memory occupied by the operating system for buffers and caching.
+- **System Swap Memory:** The amount of swap space memory in use.
+- **Process Memory:** Memory usage of the IoTDB process.
+ - **Max Memory:** The maximum amount of memory that the IoTDB process can request from the OS (configured in the `datanode-env`/`confignode-env` configuration files).
+ - **Total Memory:** The total amount of memory currently allocated by the IoTDB process from the OS.
+ - **Used Memory:** The total amount of memory currently in use by the IoTDB process.
+
+#### 3.1.3 Disk
+
+- **Disk Space:**
+ - **Total Disk Space:** Maximum disk space available for IoTDB.
+ - **Used Disk Space:** Disk space currently occupied by IoTDB.
+- **Log Number Per Minute:** Average number of IoTDB logs generated per minute, categorized by log levels.
+- **File Count:** The number of files related to IoTDB.
+ - **All:** Total number of files.
+ - **TsFile:** Number of TsFiles.
+ - **Seq:** Number of sequential TsFiles.
+ - **Unseq:** Number of unordered TsFiles.
+ - **WAL:** Number of WAL (Write-Ahead Log) files.
+ - **Cross-Temp:** Number of temporary files generated during cross-space merge operations.
+ - **Inner-Seq-Temp:** Number of temporary files generated during sequential-space merge operations.
+ - **Inner-Unseq-Temp:** Number of temporary files generated during unordered-space merge operations.
+ - **Mods:** Number of tombstone files.
+- **Open File Count:** Number of open file handles in the system.
+- **File Size:** The size of IoTDB-related files, with each sub-item representing the size of a specific file type.
+- **Disk I/O Busy Rate:** Equivalent to the `%util` metric in `iostat`, indicating the level of disk utilization. Each sub-item corresponds to a specific disk.
+- **Disk I/O Throughput****:** Average I/O throughput of system disks over a given period. Each sub-item corresponds to a specific disk.
+- **Disk I/O Ops:** Equivalent to `r/s`, `w/s`, `rrqm/s`, and `wrqm/s` in `iostat`, representing the number of I/O operations per second.
+- **Disk I/O Avg Time:** Equivalent to the `await` metric in `iostat`, representing the average latency of each I/O request, recorded separately for read and write operations.
+- **Disk I/O Avg Size:** Equivalent to the `avgrq-sz` metric in `iostat`, indicating the average size of each I/O request, recorded separately for read and write operations.
+- **Disk I/O Avg Queue Size:** Equivalent to `avgqu-sz` in `iostat`, representing the average length of the I/O request queue.
+- **I/O System Call Rate:** Frequency of read/write system calls invoked by the process, similar to IOPS.
+- **I/O Throughput****:** I/O throughput of the process, divided into `actual_read/write` and `attempt_read/write`. `Actual read` and `actual write` refer to the number of bytes actually written to or read from the storage device, excluding those handled by the Page Cache.
+
+#### 3.1.4 JVM
+
+- **GC Time Percentage:** Percentage of time spent on garbage collection (GC) by the JVM in the past minute.
+- **GC Allocated/Promoted Size Detail:** The average size of objects promoted to the old generation per minute, as well as newly allocated objects in the young/old generation and non-generational areas.
+- **GC Data Size Detail:** Size of long-lived objects in the JVM and the maximum allowed size for each generation.
+- **Heap Memory:** JVM heap memory usage.
+ - **Maximum Heap Memory:** Maximum available heap memory for the JVM.
+ - **Committed Heap Memory:** Committed heap memory size for the JVM.
+ - **Used Heap Memory:** The amount of heap memory currently in use.
+ - **PS Eden Space:** Size of the PS Young generation's Eden space.
+ - **PS Old Space:** Size of the PS Old generation.
+ - **PS Survivor Space:** Size of the PS Survivor space.
+- **O****ff Heap Memory:** Off-heap memory usage.
+ - **Direct Memory:** The amount of direct memory used.
+ - **Mapped Memory:** The amount of memory used for mapped files.
+- **GC Number Per Minute:** Average number of garbage collections (YGC and FGC) performed per minute.
+- **GC Time Per Minute:** Average time spent on garbage collection (YGC and FGC) per minute.
+- **GC Number Per Minute Detail:** Average number of garbage collections performed per minute due to different causes.
+- **GC Time Per Minute Detail:** Average time spent on garbage collection per minute due to different causes.
+- **Time Consumed of Compilation Per Minute:** Total time spent on JVM compilation per minute.
+- **The Number of Class:**
+ - **Loaded:** Number of classes currently loaded by the JVM.
+ - **Unloaded:** Number of classes unloaded by the JVM since system startup.
+- **The Number of Java Thread:** The number of currently active threads in IoTDB. Each sub-item represents the number of threads in different states.
+
+#### 3.1.5 Network
+
+- **Net Speed:** Data transmission and reception speed by the network interface.
+- **Receive/Transmit Data Size:** The total size of data packets sent and received by the network interface since system startup.
+- **Packet Speed:** The rate of data packets sent and received by the network interface. A single RPC request may correspond to one or more packets.
+- **Connection Num:** Number of socket connections for the current process (IoTDB only uses TCP).
+
+### 3.2 Performance Overview Dashboard
+
+This dashboard provides an overview of the system's overall performance.
+
+#### 3.2.1 Cluster Overview
+
+- **Total CPU Core:** Total number of CPU cores in the cluster.
+- **DataNode CPU Load:** CPU utilization of each DataNode in the cluster.
+- Disk:
+ - **Total Disk Space:** Total disk space across all cluster nodes.
+ - **DataNode Disk Usage:** Disk usage of each DataNode in the cluster.
+- **Total Timeseries:** The total number of time series managed by the cluster (including replicas). The actual number of time series should be calculated considering metadata replicas.
+- **Cluster:** The number of ConfigNode and DataNode instances in the cluster.
+- **Up Time:** The duration since the cluster started.
+- **Total Write Point Per Second:** The total number of data points written per second in the cluster (including replicas). The actual number of writes should be analyzed in conjunction with the data replication factor.
+- Memory:
+ - **Total System Memory:** The total system memory available in the cluster.
+ - **Total Swap Memory:** The total swap memory available in the cluster.
+ - **DataNode Process Memory Usage:** The memory usage of each DataNode in the cluster.
+- **Total File Number:** The total number of files managed by the cluster.
+- **Cluster System Overview:** An overview of cluster-wide system resources, including average DataNode memory usage and average disk usage.
+- **Total Database:** The total number of databases managed by the cluster (including replicas).
+- **Total DataRegion:** The total number of DataRegions in the cluster.
+- **Total SchemaRegion:** The total number of SchemaRegions in the cluster.
+
+#### 3.2.2 Node Overview
+
+- **CPU Core:** Number of CPU cores on the node’s machine.
+- **Disk Space:** Total disk space available on the node’s machine.
+- **Timeseries:** The number of time series managed by the node (including replicas).
+- **System Overview:** Overview of the node’s system resources, including CPU load, process memory usage, and disk usage.
+- **Write Point Per Second:** The write speed of the node, including replicated data.
+- **System Memory:** The total system memory available on the node’s machine.
+- **Swap Memory:** The total swap memory available on the node’s machine.
+- **File Number:** The number of files managed by the node.
+
+#### 3.2.3 Performance
+
+- **Session Idle Time:** The total idle time of session connections on the node.
+- **Client Connection:** The status of client connections on the node, including the total number of connections and the number of active connections.
+- **Time Consumed Of Operation:** The latency of various operations on the node, including the average value and P99 percentile.
+- **Average Time Consumed Of Interface:** The average latency of each **Thrift interface** on the node.
+- **P99 Time Consumed Of Interface:** The P99 latency of each Thrift interface on the node.
+- **Task Number:** The number of system tasks running on the node.
+- **Average Time Consumed Of Task:** The average execution time of system tasks on the node.
+- **P99 Time Consumed Of Task:** The P99 execution time of system tasks on the node.
+- **Operation Per Second:** The number of operations executed per second on the node.
+- Main Process:
+ - **Operation Per Second of Stage:** The number of operations executed per second in different stages of the main process.
+ - **Average Time Consumed of Stage:** The average execution time of different stages in the main process.
+ - **P99 Time Consumed of Stage:** The P99 execution time of different stages in the main process.
+- Scheduling Stage:
+ - **OPS Of Schedule:** The number of operations executed per second in different sub-stages of the scheduling stage.
+ - **Average Time Consumed Of Schedule Stage:** The average execution time in different sub-stages of the scheduling stage.
+ - **P99 Time Consumed Of Schedule Stage:** The P99 execution time in different sub-stages of the scheduling stage.
+- Local Scheduling Stage:
+ - **OPS of Local Schedule Stage:** Number of operations per second at each sub-stage of the local schedule stage.
+ - **Average Time Consumed of Local Schedule Stage:** Average time consumed at each sub-stage of the local schedule stage.
+ - **P99 Time Consumed of Local Schedule Stage:** P99 time consumed at each sub-stage of the local schedule stage.
+- Storage Stage:
+ - **OPS of Storage Stage:** Number of operations per second at each sub-stage of the storage stage.
+ - **Average Time Consumed of Storage Stage:** Average time consumed at each sub-stage of the storage stage.
+ - **P99 Time Consumed of Storage Stage:** P99 time consumed at each sub-stage of the storage stage.
+- Engine Stage:
+ - **OPS Of Engine Stage:** The number of operations executed per second in different sub-stages of the engine stage.
+ - **Average Time Consumed Of Engine Stage:** The average execution time in different sub-stages of the engine stage.
+ - **P99 Time Consumed Of Engine Stage:** The P99 execution time in different sub-stages of the engine stage.
+
+#### 3.2.4 System
+
+- **CPU Load:** The CPU load of the node.
+- **CPU Time Per Minute:** The total CPU time per minute on the node, which is influenced by the number of CPU cores.
+- **GC Time Per Minute:** The average time spent on Garbage Collection (GC) per minute on the node, including Young GC (YGC) and Full GC (FGC).
+- **Heap Memory:** The heap memory usage of the node.
+- **Off-Heap Memory:** The off-heap memory usage of the node.
+- **The Number Of Java Thread:** The number of Java threads on the node.
+- **File Count:** The number of files managed by the node.
+- **File Size:** The total size of files managed by the node.
+- **Log Number Per Minute:** The number of logs generated per minute on the node, categorized by log type.
+
+### 3.3 ConfigNode Dashboard
+
+This dashboard displays the performance metrics of all management nodes in the cluster, including **partition information, node status, and client connection statistics**.
+
+#### 3.3.1 Node Overview
+
+- **Database Count:** Number of databases on the node.
+- Region:
+ - **DataRegion Count:** Number of DataRegions on the node.
+ - **DataRegion Current Status:** Current status of DataRegions on the node.
+ - **SchemaRegion Count:** Number of SchemaRegions on the node.
+ - **SchemaRegion Current Status:** Current status of SchemaRegions on the node.
+- **System Memory:** System memory on the node's machine.
+- **Swap Memory:** Swap memory on the node's machine.
+- **ConfigNodes:** Status of ConfigNodes in the cluster.
+- **DataNodes:** Status of DataNodes in the cluster.
+- **System Overview:** Overview of the node's system resources, including system memory, disk usage, process memory, and CPU load.
+
+#### 3.3.2 NodeInfo
+
+- **Node Count:** The total number of nodes in the cluster, including ConfigNodes and DataNodes.
+- **ConfigNode Status:** The status of ConfigNodes in the cluster.
+- **DataNode Status:** The status of DataNodes in the cluster.
+- **SchemaRegion Distribution:** The distribution of SchemaRegions in the cluster.
+- **SchemaRegionGroup Leader Distribution:** The leader distribution of SchemaRegionGroups in the cluster.
+- **DataRegion Distribution:** The distribution of DataRegions in the cluster.
+- **DataRegionGroup Leader Distribution:** The leader distribution of DataRegionGroups in the cluster.
+
+#### 3.3.3 Protocol
+
+- Client Count Statistics:
+ - **Active Client Num:** The number of active clients in each thread pool on the node.
+ - **Idle Client Num:** The number of idle clients in each thread pool on the node.
+ - **Borrowed Client Count:** The number of borrowed clients in each thread pool on the node.
+ - **Created Client Count:** The number of clients created in each thread pool on the node.
+ - **Destroyed Client Count:** The number of clients destroyed in each thread pool on the node.
+- Client Time Statistics:
+ - **Client Mean Active Time:** The average active time of clients in each thread pool on the node.
+ - **Client Mean Borrow Wait Time:** The average time clients spend waiting for borrowed resources in each thread pool.
+ - **Client Mean Idle Time:** The average idle time of clients in each thread pool.
+
+#### 3.3.4 Partition Table
+
+- **SchemaRegionGroup Count:** The number of **SchemaRegionGroups** in the cluster’s databases.
+- **DataRegionGroup Count:** The number of DataRegionGroups in the cluster’s databases.
+- **SeriesSlot Count:** The number of SeriesSlots in the cluster’s databases.
+- **TimeSlot Count:** The number of TimeSlots in the cluster’s databases.
+- **DataRegion Status:** The status of DataRegions in the cluster.
+- **SchemaRegion Status:** The status of SchemaRegions in the cluster.
+
+#### 3.3.5 Consensus
+
+- **Ratis Stage Time:** The execution time of different stages in the Ratis consensus protocol.
+- **Write Log Entry:** The execution time for writing log entries in Ratis.
+- **Remote / Local Write Time:** The time taken for remote and local writes in Ratis.
+- **Remote / Local Write QPS:** The **queries per second (QPS)** for remote and local writes in Ratis.
+- **RatisConsensus Memory:** The memory usage of the Ratis consensus protocol on the node.
+
+### 3.4 DataNode Dashboard
+
+This dashboard displays the monitoring status of all **DataNodes** in the cluster, including **write latency, query latency, and storage file counts**.
+
+#### 3.4.1 Node Overview
+
+- **The Number of Entity:** The number of entities managed by the node.
+- **Write Point Per Second:** The write speed of the node (points per second).
+- **Memory Usage:** The memory usage of the node, including IoT Consensus memory usage, SchemaRegion memory usage, and per-database memory usage.
+
+#### 3.4.2 Protocol
+
+- Operation Latency:
+ - **The Time Consumed of Operation (avg):** The average latency of operations on the node.
+ - **The Time Consumed of Operation (50%):** The median latency of operations on the node.
+ - **The Time Consumed of Operation (99%):** The P99 latency of operations on the node.
+- Thrift Statistics:
+ - **The QPS of Interface:** The queries per second (QPS) for each Thrift interface on the node.
+ - **The Avg Time Consumed of Interface:** The average execution time for each Thrift interface on the node.
+ - **Thrift Connection:** The number of active Thrift connections on the node.
+ - **Thrift Active Thread:** The number of active Thrift threads on the node.
+- Client Statistics:
+ - **Active Client Num:** The number of active clients in each thread pool.
+ - **Idle Client Num:** The number of idle clients in each thread pool.
+ - **Borrowed Client Count:** The number of borrowed clients in each thread pool.
+ - **Created Client Count:** The number of clients created in each thread pool.
+ - **Destroyed Client Count:** The number of clients destroyed in each thread pool.
+ - **Client Mean Active Time:** The average active time of clients in each thread pool.
+ - **Client Mean Borrow Wait Time:** The average time clients spend waiting for borrowed resources in each thread pool.
+ - **Client Mean Idle Time:** The average idle time of clients in each thread pool.
+
+#### 3.4.3 Storage Engine
+
+- **File Count:** The number of files managed by the node.
+- **File Size:** The total size of files managed by the node.
+- TsFile:
+ - **TsFile Total Size In Each Level:** The total size of TsFiles at each level.
+ - **TsFile Count In Each Level:** The number of TsFiles at each level.
+ - **Avg TsFile Size In Each Level:** The average size of TsFiles at each level.
+- **Task Number:** The number of tasks on the node.
+- **The Time Consumed of Task:** The total execution time of tasks on the node.
+- Compaction:
+ - **Compaction Read And Write Per Second:** The read/write speed of compaction operations.
+ - **Compaction Number Per Minute:** The number of **compaction** operations per minute.
+ - **Compaction Process Chunk Status:** The number of **chunks** in different states during compaction.
+ - **Compacted Point Num Per Minute:** The number of data points compacted per minute.
+
+#### 3.4.4 Write Performance
+
+- **Write Cost (avg):** The average **write latency**, including WAL and **memtable** writes.
+- **Write Cost (50%):** The **median write latency**, including WAL and **memtable** writes.
+- **Write Cost (99%):** The **P99 write latency**, including WAL and **memtable** writes.
+- WAL (Write-Ahead Logging)
+ - **WAL File Size:** The total size of WAL files managed by the node.
+ - **WAL File Num:** The total number of WAL files managed by the node.
+ - **WAL Nodes Num:** The total number of WAL Nodes managed by the node.
+ - **Make Checkpoint Costs:** The time required to create different types of Checkpoints.
+ - **WAL Serialize Total Cost:** The total serialization time for WAL.
+ - **Data Region Mem Cost:** The memory usage of different DataRegions, including total memory usage of DataRegions on the current instance and total memory usage of DataRegions across the entire cluster.
+ - **Serialize One WAL Info Entry Cost:** The time taken to serialize a single WAL Info Entry.
+ - **Oldest MemTable Ram Cost When Cause Snapshot:** The memory size of the oldest MemTable when a snapshot is triggered by WAL.
+ - **Oldest MemTable Ram Cost When Cause Flush:** The memory size of the oldest MemTable when a flush is triggered by WAL.
+ - **Effective Info Ratio of WALNode:** The ratio of effective information in different WALNodes.
- WAL Buffer
- - WAL Buffer Cost: Node WAL flush SyncBuffer takes time, including both synchronous and asynchronous options
- - WAL Buffer Used Ratio: The usage rate of the WAL Buffer of the node
- - WAL Buffer Entries Count: The number of entries in the WAL Buffer of a node
+ - **WAL Buffer Cost:** The time taken to flush the SyncBuffer of WAL, including both synchronous and asynchronous flushes.
+ - **WAL Buffer Used Ratio:** The utilization ratio of the WAL Buffer.
+ - **WAL Buffer Entries Count:** The number of entries in the WAL Buffer.
- Flush Statistics
- - Flush MemTable Cost(avg): The total time spent on node Flush and the average time spent on each sub stage
- - Flush MemTable Cost(50%): The total time spent on node Flush and the median time spent on each sub stage
- - Flush MemTable Cost(99%): The total time spent on node Flush and the P99 time spent on each sub stage
- - Flush Sub Task Cost(avg): The average time consumption of each node's Flush subtask, including sorting, encoding, and IO stages
- - Flush Sub Task Cost(50%): The median time consumption of each subtask of the Flush node, including sorting, encoding, and IO stages
- - Flush Sub Task Cost(99%): The average subtask time P99 for Flush of nodes, including sorting, encoding, and IO stages
-- Pending Flush Task Num: The number of Flush tasks in a blocked state for a node
-- Pending Flush Sub Task Num: Number of Flush subtasks blocked by nodes
-- Tsfile Compression Ratio Of Flushing MemTable: The compression rate of TsFile corresponding to node flashing Memtable
-- Flush TsFile Size Of DataRegions: The corresponding TsFile size for each disk flush of nodes in different DataRegions
-- Size Of Flushing MemTable: The size of the Memtable for node disk flushing
-- Points Num Of Flushing MemTable: The number of points when flashing data in different DataRegions of a node
-- Series Num Of Flushing MemTable: The number of time series when flashing Memtables in different DataRegions of a node
-- Average Point Num Of Flushing MemChunk: The average number of disk flushing points for node MemChunk
-
-#### Schema Engine
-
-- Schema Engine Mode: The metadata engine pattern of nodes
-- Schema Consensus Protocol: Node metadata consensus protocol
-- Schema Region Number:Number of SchemeRegions managed by nodes
-- Schema Region Memory Overview: The amount of memory in the SchemeRegion of a node
-- Memory Usgae per SchemaRegion:The average memory usage size of node SchemaRegion
-- Cache MNode per SchemaRegion: The number of cache nodes in each SchemeRegion of a node
-- MLog Length and Checkpoint: The total length and checkpoint position of the current mlog for each SchemeRegion of the node (valid only for SimpleConsense)
-- Buffer MNode per SchemaRegion: The number of buffer nodes in each SchemeRegion of a node
-- Activated Template Count per SchemaRegion: The number of activated templates in each SchemeRegion of a node
-- Time Series statistics
- - Timeseries Count per SchemaRegion: The average number of time series for node SchemaRegion
- - Series Type: Number of time series of different types of nodes
- - Time Series Number: The total number of time series nodes
- - Template Series Number: The total number of template time series for nodes
- - Template Series Count per SchemaRegion: The number of sequences created through templates in each SchemeRegion of a node
+ - **Flush MemTable Cost (avg):** The average total flush time, including time spent in different sub-stages.
+ - **Flush MemTable Cost (50%):** The median total flush time, including time spent in different sub-stages.
+ - **Flush MemTable Cost (99%):** The P99 total flush time, including time spent in different sub-stages.
+ - **Flush Sub Task Cost (avg):** The average execution time of flush sub-tasks, including sorting, encoding, and I/O stages.
+ - **Flush Sub Task Cost (50%):** The median execution time of flush sub-tasks, including sorting, encoding, and I/O stages.
+ - **Flush Sub Task Cost (99%):** The P99 execution time of flush sub-tasks, including sorting, encoding, and I/O stages.
+- **Pending Flush Task Num:** The number of Flush tasks currently in a blocked state.
+- **Pending Flush Sub Task Num:** The number of blocked Flush sub-tasks.
+- **TsFile Compression Ratio of Flushing MemTable:** The compression ratio of TsFiles generated from flushed MemTables.
+- **Flush TsFile Size of DataRegions:** The size of TsFiles generated from flushed MemTables in different DataRegions.
+- **Size of Flushing MemTable:** The size of the MemTable currently being flushed.
+- **Points Num of Flushing MemTable:** The number of data points being flushed from MemTables in different DataRegions.
+- S**eries Num of Flushing MemTable:** The number of time series being flushed from MemTables in different DataRegions.
+- **Average Point Num of Flushing MemChunk:** The average number of points in MemChunks being flushed.
+
+#### 3.4.5 Schema Engine
+
+- **Schema Engine Mode:** The metadata engine mode used by the node.
+- **Schema Consensus Protocol:** The metadata consensus protocol used by the node.
+- **Schema Region Number:** The number of SchemaRegions managed by the node.
+- **Schema Region Memory Overview:** The total memory used by SchemaRegions on the node.
+- **Memory Usage per SchemaRegion:** The average memory usage per SchemaRegion.
+- **Cache MNode per SchemaRegion:** The number of cached MNodes per SchemaRegion.
+- **MLog Length and Checkpoint****:** The current MLog size and checkpoint position for each SchemaRegion (valid only for SimpleConsensus).
+- **Buffer MNode per SchemaRegion:** The number of buffered MNodes per SchemaRegion.
+- **Activated Template Count per SchemaRegion:** The number of activated templates per SchemaRegion.
+- Time Series Statistics
+ - **Timeseries Count per SchemaRegion:** The average number of time series per SchemaRegion.
+ - **Series Type:** The number of time series of different types.
+ - **Time Series Number:** The total number of time series on the node.
+ - **Template Series Number:** The total number of template-based time series on the node.
+ - **Template Series Count per SchemaRegion:** The number of time series created via templates per SchemaRegion.
- IMNode Statistics
- - Pinned MNode per SchemaRegion: Number of IMNode nodes with Pinned nodes in each SchemeRegion
- - Pinned Memory per SchemaRegion: The memory usage size of the IMNode node for Pinned nodes in each SchemeRegion of the node
- - Unpinned MNode per SchemaRegion: The number of unpinned IMNode nodes in each SchemeRegion of a node
- - Unpinned Memory per SchemaRegion: Memory usage size of unpinned IMNode nodes in each SchemeRegion of the node
- - Schema File Memory MNode Number: Number of IMNode nodes with global pinned and unpinned nodes
- - Release and Flush MNode Rate: The number of IMNodes that release and flush nodes per second
-- Cache Hit Rate: Cache hit rate of nodes
-- Release and Flush Thread Number: The current number of active Release and Flush threads on the node
-- Time Consumed of Relead and Flush (avg): The average time taken for node triggered cache release and buffer flushing
-- Time Consumed of Relead and Flush (99%): P99 time consumption for node triggered cache release and buffer flushing
-
-#### Query Engine
-
-- Time Consumption In Each Stage
- - The time consumed of query plan stages(avg): The average time spent on node queries at each stage
- - The time consumed of query plan stages(50%): Median time spent on node queries at each stage
- - The time consumed of query plan stages(99%): P99 time consumption for node query at each stage
-- Execution Plan Distribution Time
- - The time consumed of plan dispatch stages(avg): The average time spent on node query execution plan distribution
- - The time consumed of plan dispatch stages(50%): Median time spent on node query execution plan distribution
- - The time consumed of plan dispatch stages(99%): P99 of node query execution plan distribution time
-- Execution Plan Execution Time
- - The time consumed of query execution stages(avg): The average execution time of node query execution plan
- - The time consumed of query execution stages(50%):Median execution time of node query execution plan
- - The time consumed of query execution stages(99%): P99 of node query execution plan execution time
+ - **Pinned MNode per SchemaRegion:** The number of pinned IMNodes per SchemaRegion.
+ - **Pinned Memory per SchemaRegion:** The memory usage of pinned IMNodes per SchemaRegion.
+ - **Unpinned MNode per SchemaRegion:** The number of unpinned IMNodes per SchemaRegion.
+ - **Unpinned Memory per SchemaRegion:** The memory usage of unpinned IMNodes per SchemaRegion.
+ - **Schema File Memory MNode Number:** The total number of pinned and unpinned IMNodes on the node.
+ - **Release and Flush MNode Rate:** The number of IMNodes released and flushed per second.
+- **Cache Hit Rate:** The cache hit ratio of the node.
+- **Release and Flush Thread Number:** The number of active threads for releasing and flushing memory.
+- **Time Consumed of Release and Flush (avg):** The average execution time for cache release and buffer flush.
+- **Time Consumed of Release and Flush (99%):** The P99 execution time for cache release and buffer flush.
+
+#### 3.4.6 Query Engine
+
+- Time Consumed at Each Stage
+ - **The time consumed of query plan stages (avg):** The average time consumed in different query plan stages on the node.
+ - **The time consumed of query plan stages (50%):** The median time consumed in different query plan stages on the node.
+ - **The time consumed of query plan stages (99%):** The P99 time consumed in different query plan stages on the node.
+- Plan Dispatch Time
+ - **The time consumed of plan dispatch stages (avg):** The average time consumed in query execution plan dispatch.
+ - **The time consumed of plan dispatch stages (50%):** The median time consumed in query execution plan dispatch.
+ - **The time consumed of plan dispatch stages (99%):** The P99 time consumed in query execution plan dispatch.
+- Query Execution Time
+ - **The time consumed of query execution stages (avg):** The average time consumed in query execution on the node.
+ - **The time consumed of query execution stages (50%):** The median time consumed in query execution on the node.
+ - **The time consumed of query execution stages (99%):** The P99 time consumed in query execution on the node.
- Operator Execution Time
- - The time consumed of operator execution stages(avg): The average execution time of node query operators
- - The time consumed of operator execution(50%): Median execution time of node query operator
- - The time consumed of operator execution(99%): P99 of node query operator execution time
+ - **The time consumed of operator execution stages (avg):** The average time consumed in query operator execution.
+ - **The time consumed of operator execution (50%):** The median time consumed in query operator execution.
+ - **The time consumed of operator execution (99%):** The P99 time consumed in query operator execution
- Aggregation Query Computation Time
- - The time consumed of query aggregation(avg): The average computation time for node aggregation queries
- - The time consumed of query aggregation(50%): Median computation time for node aggregation queries
- - The time consumed of query aggregation(99%): P99 of node aggregation query computation time
-- File/Memory Interface Time Consumption
- - The time consumed of query scan(avg): The average time spent querying file/memory interfaces for nodes
- - The time consumed of query scan(50%): Median time spent querying file/memory interfaces for nodes
- - The time consumed of query scan(99%): P99 time consumption for node query file/memory interface
-- Number Of Resource Visits
- - The usage of query resource(avg): The average number of resource visits for node queries
- - The usage of query resource(50%): Median number of resource visits for node queries
- - The usage of query resource(99%): P99 for node query resource access quantity
+ - **The time consumed of query aggregation (avg):** The average time consumed in aggregation query computation.
+ - **The time consumed of query aggregation (50%):** The median time consumed in aggregation query computation.
+ - **The time consumed of query aggregation (99%):** The P99 time consumed in aggregation query computation.
+- File/Memory Interface Time
+ - **The time consumed of query scan (avg):** The average time consumed in file/memory interface query scans.
+ - **The time consumed of query scan (50%):** The median time consumed in file/memory interface query scans.
+ - **The time consumed of query scan (99%):** The P99 time consumed in file/memory interface query scans.
+- Resource Access Count
+ - **The usage of query resource (avg):** The average number of resource accesses during query execution.
+ - **The usage of query resource (50%):** The median number of resource accesses during query execution.
+ - **The usage of query resource (99%):** The P99 number of resource accesses during query execution.
- Data Transmission Time
- - The time consumed of query data exchange(avg): The average time spent on node query data transmission
- - The time consumed of query data exchange(50%): Median query data transmission time for nodes
- - The time consumed of query data exchange(99%): P99 for node query data transmission time
-- Number Of Data Transfers
- - The count of Data Exchange(avg): The average number of data transfers queried by nodes
- - The count of Data Exchange: The quantile of the number of data transfers queried by nodes, including the median and P99
-- Task Scheduling Quantity And Time Consumption
- - The number of query queue: Node query task scheduling quantity
- - The time consumed of query schedule time(avg): The average time spent on scheduling node query tasks
- - The time consumed of query schedule time(50%): Median time spent on node query task scheduling
- - The time consumed of query schedule time(99%): P99 of node query task scheduling time
-
-#### Query Interface
+ - **The time consumed of query data exchange (avg):** The average time consumed in query data exchange.
+ - **The time consumed of query data exchange (50%):** The median time consumed in query data exchange.
+ - **The time consumed of query data exchange (99%):** The P99 time consumed in query data exchange.
+- Data Transmission Count
+ - **The count of Data Exchange (avg):** The average number of data exchanges during queries.
+ - **The count of Data Exchange:** The quantiles (median, P99) of data exchanges during queries.
+- Task Scheduling Count and Time
+ - **The number of query queue:** The number of query tasks scheduled.
+ - **The time consumed of query schedule time (avg):** The average time consumed for query scheduling.
+ - **The time consumed of query schedule time (50%):** The median time consumed for query scheduling.
+ - **The time consumed of query schedule time (99%):** The P99 time consumed for query scheduling.
+
+#### 3.4.7 Query Interface
- Load Time Series Metadata
- - The time consumed of load timeseries metadata(avg): The average time taken for node queries to load time series metadata
- - The time consumed of load timeseries metadata(50%): Median time spent on loading time series metadata for node queries
- - The time consumed of load timeseries metadata(99%): P99 time consumption for node query loading time series metadata
+ - **The time consumed of load timeseries metadata (avg):** The average time consumed for loading time series metadata.
+ - **The time consumed of load timeseries metadata (50%):** The median time consumed for loading time series metadata.
+ - **The time consumed of load timeseries metadata (99%):** The P99 time consumed for loading time series metadata.
- Read Time Series
- - The time consumed of read timeseries metadata(avg): The average time taken for node queries to read time series
- - The time consumed of read timeseries metadata(50%): The median time taken for node queries to read time series
- - The time consumed of read timeseries metadata(99%): P99 time consumption for node query reading time series
+ - **The time consumed of read timeseries metadata (avg):** The average time consumed for reading time series.
+ - **The time consumed of read timeseries metadata (50%):** The median time consumed for reading time series.
+ - **The time consumed of read timeseries metadata (99%):** The P99 time consumed for reading time series.
- Modify Time Series Metadata
- - The time consumed of timeseries metadata modification(avg):The average time taken for node queries to modify time series metadata
- - The time consumed of timeseries metadata modification(50%): Median time spent on querying and modifying time series metadata for nodes
- - The time consumed of timeseries metadata modification(99%): P99 time consumption for node query and modification of time series metadata
+ - **The time consumed of timeseries metadata modification (avg):** The average time consumed for modifying time series metadata.
+ - **The time consumed of timeseries metadata modification (50%):** The median time consumed for modifying time series metadata.
+ - **The time consumed of timeseries metadata modification (99%):** The P99 time consumed for modifying time series metadata.
- Load Chunk Metadata List
- - The time consumed of load chunk metadata list(avg): The average time it takes for node queries to load Chunk metadata lists
- - The time consumed of load chunk metadata list(50%): Median time spent on node query loading Chunk metadata list
- - The time consumed of load chunk metadata list(99%): P99 time consumption for node query loading Chunk metadata list
+ - The time consumed of load chunk metadata list(avg): Average time consumed of loading chunk metadata list by the node
+ - The time consumed of load chunk metadata list(50%): Median time consumed of loading chunk metadata list by the node
+ - The time consumed of load chunk metadata list(99%): P99 time consumed of loading chunk metadata list by the node
- Modify Chunk Metadata
- - The time consumed of chunk metadata modification(avg): The average time it takes for node queries to modify Chunk metadata
- - The time consumed of chunk metadata modification(50%): The total number of bits spent on modifying Chunk metadata for node queries
- - The time consumed of chunk metadata modification(99%): P99 time consumption for node query and modification of Chunk metadata
-- Filter According To Chunk Metadata
- - The time consumed of chunk metadata filter(avg): The average time spent on node queries filtering by Chunk metadata
- - The time consumed of chunk metadata filter(50%): Median filtering time for node queries based on Chunk metadata
- - The time consumed of chunk metadata filter(99%): P99 time consumption for node query filtering based on Chunk metadata
-- Constructing Chunk Reader
- - The time consumed of construct chunk reader(avg): The average time spent on constructing Chunk Reader for node queries
- - The time consumed of construct chunk reader(50%): Median time spent on constructing Chunk Reader for node queries
- - The time consumed of construct chunk reader(99%): P99 time consumption for constructing Chunk Reader for node queries
+ - The time consumed of chunk metadata modification(avg): Average time consumed of modifying chunk metadata by the node
+ - The time consumed of chunk metadata modification(50%): Median time consumed of modifying chunk metadata by the node
+ - The time consumed of chunk metadata modification(99%): P99 time consumed of modifying chunk metadata by the node
+- Filter by Chunk Metadata
+ - **The time consumed of chunk metadata filter (avg):** The average time consumed for filtering by chunk metadata.
+ - **The time consumed of chunk metadata filter (50%):** The median time consumed for filtering by chunk metadata.
+ - **The time consumed of chunk metadata filter (99%):** The P99 time consumed for filtering by chunk metadata.
+- Construct Chunk Reader
+ - **The time consumed of construct chunk reader (avg):** The average time consumed for constructing a Chunk Reader.
+ - **The time consumed of construct chunk reader (50%):** The median time consumed for constructing a Chunk Reader.
+ - **The time consumed of construct chunk reader (99%):** The P99 time consumed for constructing a Chunk Reader.
- Read Chunk
- - The time consumed of read chunk(avg): The average time taken for node queries to read Chunks
- - The time consumed of read chunk(50%): Median time spent querying nodes to read Chunks
- - The time consumed of read chunk(99%): P99 time spent on querying and reading Chunks for nodes
+ - **The time consumed of read chunk (avg):** The average time consumed for reading a Chunk.
+ - **The time consumed of read chunk (50%):** The median time consumed for reading a Chunk.
+ - **The time consumed of read chunk (99%):** The P99 time consumed for reading a Chunk.
- Initialize Chunk Reader
- - The time consumed of init chunk reader(avg): The average time spent initializing Chunk Reader for node queries
- - The time consumed of init chunk reader(50%): Median time spent initializing Chunk Reader for node queries
- - The time consumed of init chunk reader(99%):P99 time spent initializing Chunk Reader for node queries
-- Constructing TsBlock Through Page Reader
- - The time consumed of build tsblock from page reader(avg): The average time it takes for node queries to construct TsBlock through Page Reader
- - The time consumed of build tsblock from page reader(50%): The median time spent on constructing TsBlock through Page Reader for node queries
- - The time consumed of build tsblock from page reader(99%):Node query using Page Reader to construct TsBlock time-consuming P99
-- Query the construction of TsBlock through Merge Reader
- - The time consumed of build tsblock from merge reader(avg): The average time taken for node queries to construct TsBlock through Merge Reader
- - The time consumed of build tsblock from merge reader(50%): The median time spent on constructing TsBlock through Merge Reader for node queries
- - The time consumed of build tsblock from merge reader(99%): Node query using Merge Reader to construct TsBlock time-consuming P99
-
-#### Query Data Exchange
-
-The data exchange for the query is time-consuming.
-
-- Obtain TsBlock through source handle
- - The time consumed of source handle get tsblock(avg): The average time taken for node queries to obtain TsBlock through source handle
- - The time consumed of source handle get tsblock(50%):Node query obtains the median time spent on TsBlock through source handle
- - The time consumed of source handle get tsblock(99%): Node query obtains TsBlock time P99 through source handle
-- Deserialize TsBlock through source handle
- - The time consumed of source handle deserialize tsblock(avg): The average time taken for node queries to deserialize TsBlock through source handle
- - The time consumed of source handle deserialize tsblock(50%): The median time taken for node queries to deserialize TsBlock through source handle
- - The time consumed of source handle deserialize tsblock(99%): P99 time spent on deserializing TsBlock through source handle for node query
-- Send TsBlock through sink handle
- - The time consumed of sink handle send tsblock(avg): The average time taken for node queries to send TsBlock through sink handle
- - The time consumed of sink handle send tsblock(50%): Node query median time spent sending TsBlock through sink handle
- - The time consumed of sink handle send tsblock(99%): Node query sends TsBlock through sink handle with a time consumption of P99
-- Callback data block event
- - The time consumed of on acknowledge data block event task(avg): The average time taken for node query callback data block event
- - The time consumed of on acknowledge data block event task(50%): Median time spent on node query callback data block event
- - The time consumed of on acknowledge data block event task(99%): P99 time consumption for node query callback data block event
-- Get Data Block Tasks
- - The time consumed of get data block task(avg): The average time taken for node queries to obtain data block tasks
- - The time consumed of get data block task(50%): The median time taken for node queries to obtain data block tasks
- - The time consumed of get data block task(99%): P99 time consumption for node query to obtain data block task
-
-#### Query Related Resource
-
-- MppDataExchangeManager:The number of shuffle sink handles and source handles during node queries
-- LocalExecutionPlanner: The remaining memory that nodes can allocate to query shards
-- FragmentInstanceManager: The query sharding context information and the number of query shards that the node is running
-- Coordinator: The number of queries recorded on the node
-- MemoryPool Size: Node query related memory pool situation
-- MemoryPool Capacity: The size of memory pools related to node queries, including maximum and remaining available values
-- DriverScheduler: Number of queue tasks related to node queries
-
-#### Consensus - IoT Consensus
+ - **The time consumed of init chunk reader (avg):** The average time consumed for initializing a Chunk Reader.
+ - **The time consumed of init chunk reader (50%):** The median time consumed for initializing a Chunk Reader.
+ - **The time consumed of init chunk reader (99%):** The P99 time consumed for initializing a Chunk Reader.
+- Build TsBlock from Page Reader
+ - **The time consumed of build tsblock from page reader (avg):** The average time consumed for building a TsBlock using a Page Reader.
+ - **The time consumed of build tsblock from page reader (50%):** The median time consumed for building a TsBlock using a Page Reader.
+ - **The time consumed of build tsblock from page reader (99%):** The P99 time consumed for building a TsBlock using a Page Reader.
+- Build TsBlock from Merge Reader
+ - **The time consumed of build tsblock from merge reader (avg):** The average time consumed for building a TsBlock using a Merge Reader.
+ - **The time consumed of build tsblock from merge reader (50%):** The median time consumed for building a TsBlock using a Merge Reader.
+ - **The time consumed of build tsblock from merge reader (99%):** The P99 time consumed for building a TsBlock using a Merge Reader.
+
+#### 3.4.8 Query Data Exchange
+
+Time consumed of data exchange in queries.
+
+- Get TsBlock via Source Handle
+ - **The time consumed of source handle get tsblock (avg):** The average time consumed for retrieving a TsBlock using the source handle.
+ - **The time consumed of source handle get tsblock (50%):** The median time consumed for retrieving a TsBlock using the source handle.
+ - **The time consumed of source handle get tsblock (99%):** The P99 time consumed for retrieving a TsBlock using the source handle.
+- Deserialize TsBlock via Source Handle
+ - **The time consumed of source handle deserialize tsblock (avg):** The average time consumed for deserializing a TsBlock via the source handle.
+ - **The time consumed of source handle deserialize tsblock (50%):** The median time consumed for deserializing a TsBlock via the source handle.
+ - **The time consumed of source handle deserialize tsblock (99%):** The P99 time consumed for deserializing a TsBlock via the source handle.
+- Send TsBlock via Sink Handle
+ - **The time consumed of sink handle send tsblock (avg):** The average time consumed for sending a TsBlock via the sink handle.
+ - **The time consumed of sink handle send tsblock (50%):** The median time consumed for sending a TsBlock via the sink handle.
+ - **The time consumed of sink handle send tsblock (99%):** The P99 time consumed for sending a TsBlock via the sink handle.
+- Handle Data Block Event Callback
+ - **The time consumed of handling data block event callback (avg):** The average time consumed for handling the callback of a data block event during query execution.
+ - **The time consumed of handling data block event callback (50%):** The median time consumed for handling the callback of a data block event during query execution.
+ - **The time consumed of handling data block event callback (99%):** The P99 time consumed for handling the callback of a data block event during query execution.
+- Get Data Block Task
+ - **The time consumed of get data block task (avg):** The average time consumed for retrieving a data block task.
+ - **The time consumed of get data block task (50%):** The median time consumed for retrieving a data block task.
+ - **The time consumed of get data block task (99%):** The P99 time consumed for retrieving a data block task.
+
+#### 3.4.9 Query Related Resource
+
+- **MppDataExchangeManager:** The number of shuffle sink handles and source handles during queries.
+- **LocalExecutionPlanner:** The remaining memory available for query fragments.
+- **FragmentInstanceManager:** The context information and count of running query fragments.
+- **Coordinator:** The number of queries recorded on the node.
+- **MemoryPool Size:** The status of the memory pool related to queries.
+- **MemoryPool Capacity:** The size of the query-related memory pool, including the maximum and remaining available capacity.
+- **DriverScheduler:** The number of queued query tasks.
+
+#### 3.4.10 Consensus - IoT Consensus
- Memory Usage
- - IoTConsensus Used Memory: The memory usage of IoT Consumes for nodes, including total memory usage, queue usage, and synchronization usage
-- Synchronization Status Between Nodes
- - IoTConsensus Sync Index: SyncIndex size for different DataRegions of IoT Consumption nodes
- - IoTConsensus Overview:The total synchronization gap and cached request count of IoT consumption for nodes
- - IoTConsensus Search Index Rate: The growth rate of writing SearchIndex for different DataRegions of IoT Consumer nodes
- - IoTConsensus Safe Index Rate: The growth rate of synchronous SafeIndex for different DataRegions of IoT Consumer nodes
- - IoTConsensus LogDispatcher Request Size: The request size for node IoT Consusus to synchronize different DataRegions to other nodes
- - Sync Lag: The size of synchronization gap between different DataRegions in IoT Consumption node
- - Min Peer Sync Lag: The minimum synchronization gap between different DataRegions and different replicas of node IoT Consumption
- - Sync Speed Diff Of Peers: The maximum difference in synchronization from different DataRegions to different replicas for node IoT Consumption
- - IoTConsensus LogEntriesFromWAL Rate: The rate at which nodes IoT Consumus obtain logs from WAL for different DataRegions
- - IoTConsensus LogEntriesFromQueue Rate: The rate at which nodes IoT Consumes different DataRegions retrieve logs from the queue
-- Different Execution Stages Take Time
- - The Time Consumed Of Different Stages (avg): The average time spent on different execution stages of node IoT Consumus
- - The Time Consumed Of Different Stages (50%): The median time spent on different execution stages of node IoT Consusus
- - The Time Consumed Of Different Stages (99%):P99 of the time consumption for different execution stages of node IoT Consusus
-
-#### Consensus - DataRegion Ratis Consensus
-
-- Ratis Stage Time: The time consumption of different stages of node Ratis
-- Write Log Entry: The time consumption of writing logs at different stages of node Ratis
-- Remote / Local Write Time: The time it takes for node Ratis to write locally or remotely
-- Remote / Local Write QPS: QPS written by node Ratis locally or remotely
-- RatisConsensus Memory:Memory usage of node Ratis
-
-#### Consensus - SchemaRegion Ratis Consensus
-
-- Ratis Stage Time: The time consumption of different stages of node Ratis
-- Write Log Entry: The time consumption for writing logs at each stage of node Ratis
-- Remote / Local Write Time: The time it takes for node Ratis to write locally or remotelyThe time it takes for node Ratis to write locally or remotely
-- Remote / Local Write QPS: QPS written by node Ratis locally or remotely
-- RatisConsensus Memory: Node Ratis Memory Usage
\ No newline at end of file
+ - **IoTConsensus Used Memory:** The memory usage of IoT Consensus, including total used memory, queue memory usage, and synchronization memory usage.
+- Synchronization between Nodes
+ - **IoTConsensus Sync Index:** The sync index size of different DataRegions.
+ - **IoTConsensus Overview:** The total synchronization lag and cached request count of IoT Consensus.
+ - **IoTConsensus Search Index Rate:** The growth rate of SearchIndex writes for different DataRegions.
+ - **IoTConsensus Safe Index Rate:** The growth rate of SafeIndex synchronization for different DataRegions.
+ - **IoTConsensus LogDispatcher Request Size:** The size of synchronization requests sent to other nodes for different DataRegions.
+ - **Sync Lag:** The synchronization lag size of different DataRegions.
+ - **Min Peer Sync Lag:** The minimum synchronization lag to different replicas for different DataRegions.
+ - **Sync Speed Diff of Peers:** The maximum synchronization lag to different replicas for different DataRegions.
+ - **IoTConsensus LogEntriesFromWAL Rate:** The rate of retrieving log entries from WAL for different DataRegions.
+ - **IoTConsensus LogEntriesFromQueue Rate:** The rate of retrieving log entries from the queue for different DataRegions.
+- Execution Time of Different Stages
+ - **The Time Consumed of Different Stages (avg):** The average execution time of different stages in IoT Consensus.
+ - **The Time Consumed of Different Stages (50%):** The median execution time of different stages in IoT Consensus.
+ - **The Time Consumed of Different Stages (99%):** The P99 execution time of different stages in IoT Consensus.
+
+#### 3.4.11 Consensus - DataRegion Ratis Consensus
+
+- **Ratis Stage Time:** The execution time of different stages in Ratis.
+- **Write Log Entry:** The execution time for writing logs in Ratis.
+- **Remote / Local Write Time:** The time taken for remote and local writes in Ratis.
+- **Remote / Local Write QPS****:** The QPS for remote and local writes in Ratis.
+- **RatisConsensus Memory:** The memory usage of Ratis consensus.
+
+#### 3.4.12 Consensus - SchemaRegion Ratis Consensus
+
+- **Ratis Stage Time:** The execution time of different stages in Ratis.
+- **Write Log Entry:** The execution time for writing logs in Ratis.
+- **Remote / Local Write Time:** The time taken for remote and local writes in Ratis.
+- **Remote / Local Write QPS****:** The QPS for remote and local writes in Ratis.
+- **RatisConsensus Memory:** The memory usage of Ratis consensus.
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
new file mode 100644
index 000000000..a5dd070ba
--- /dev/null
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
@@ -0,0 +1,192 @@
+
+# Stand-Alone Deployment
+
+This guide introduces how to set up a standalone TimechoDB instance, which includes one ConfigNode and one DataNode (commonly referred to as 1C1D).
+
+## 1 Prerequisites
+
+1. **System Preparation**: Ensure the system has been configured according to the [System Requirements](../Deployment-and-Maintenance/Environment-Requirements.md).
+
+2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Set the hostname by editing the `/etc/hosts` file. For example, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, run:
+
+```Bash
+echo "192.168.1.3 iotdb-1" >> /etc/hosts
+```
+
+Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration.
+
+3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the [Parameter Configuration](#22-parameters-configuration) section.
+
+4. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues.
+
+5. **User Permissions**: Choose one of the following permissions during installation and deployment:
+ - **Root User (Recommended)**: This avoids permission-related issues.
+ - **Non-Root User**:
+ - Use the same user for all operations, including starting, activating, and stopping services.
+ - Avoid using `sudo`, which can cause permission conflicts.
+
+6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the [Monitoring Board Install and Deploy](../Deployment-and-Maintenance/Monitoring-panel-deployment.md).
+
+## 2 Installation Steps
+
+### 2.1 Extract Installation Package
+
+Unzip the installation package and navigate to the directory:
+
+```Plain
+unzip apache-iotdb-{version}-all-bin.zip
+cd apache-iotdb-{version}-all-bin
+```
+
+### 2.2 Parameters Configuration
+
+#### 2.2.1 Memory Configuration
+
+Edit the following files for memory allocation:
+
+- **ConfigNode**: `conf/confignode-env.sh` (or `.bat` for Windows)
+- **DataNode**: `conf/datanode-env.sh` (or `.bat` for Windows)
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------ | :---------------------------------- | :---------- | :-------------- | :---------------------- |
+| MEMORY_SIZE | Total memory allocated for the node | Empty | As needed | Effective after restart |
+
+#### 2.2.2 General Configuration
+
+Set the following parameters in `conf/iotdb-system.properties`. Refer to `conf/iotdb-system.properties.template` for a complete list.
+
+
+**Cluster-Level Parameters**:
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------------ | :-------------------------- | :------------- | :-------------- | :----------------------------------------------------------- |
+| cluster_name | Name of the cluster | defaultCluster | Customizable | If there is no specific requirement, keep the default value. |
+| schema_replication_factor | Number of metadata replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. |
+| data_replication_factor | Number of data replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. |
+
+**ConfigNode Parameters**:
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- |
+| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. |
+| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | This parameter cannot be modified after the first startup. |
+| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | This parameter cannot be modified after the first startup. |
+| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. |
+
+**DataNode Parameters**:
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- |
+| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. |
+| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | Effective after restarting the service. |
+| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. |
+| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | This parameter cannot be modified after the first startup. |
+| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | This parameter cannot be modified after the first startup. |
+| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | This parameter cannot be modified after the first startup. |
+| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | This parameter cannot be modified after the first startup. |
+| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. |
+
+### 2.3 Start ConfigNode
+
+Navigate to the `sbin` directory and start ConfigNode:
+
+```Bash
+./sbin/start-confignode.sh -d # The "-d" flag starts the process in the background.
+```
+
+If the startup fails, refer to the [Common Issues](#3-common-issues)。 section below for troubleshooting.
+
+
+
+### 2.4 Start DataNode
+
+Navigate to the `sbin` directory of IoTDB and start the DataNode:
+
+```Bash
+./sbin/start-datanode.sh -d # The "-d" flag starts the process in the background.
+```
+
+### 2.5 Verify Activation
+
+Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated.
+
+
+
+## 3 Common Issues
+
+1. ConfigNode Fails to Start
+
+ 1. Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed.
+ 2. Check the logs for any other errors. If unresolved, contact technical support for assistance.
+ 3. If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps:
+
+ **Clean the Environment**
+
+ 1. Stop all ConfigNode and DataNode processes:
+ ```Bash
+ sbin/stop-standalone.sh
+ ```
+
+ 2. Check for any remaining processes:
+ ```Bash
+ jps
+ # or
+ ps -ef | grep iotdb
+ ```
+
+ 3. If processes remain, terminate them manually:
+ ```Bash
+ kill -9
+
+ #For systems with a single IoTDB instance, you can clean up residual processes with:
+ ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9
+ ```
+
+ 4. Delete the `data` and `logs` directories:
+ ```Bash
+ cd /data/iotdb
+ rm -rf data logs
+ ```
+
+## 4 Appendix
+
+### 4.1 ConfigNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :---------------------------------------------------------- | :------- |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+
+### 4.2 DataNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :----------------------------------------------------------- | :------- |
+| -v | Displays version information. | No |
+| -f | Runs the script in the foreground without backgrounding it. | No |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+| -p | Specifies a file to store the process ID for process management. | No |
+| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No |
+| -g | Prints detailed garbage collection (GC) information. | No |
+| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No |
+| -E | Specifies the file for JVM error logs. | No |
+| -D | Defines system properties in the format `key=value`. | No |
+| -X | Passes `-XX` options directly to the JVM. | No |
+| -h | Displays the help instructions. | No |
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md
index 7b28bd231..86c70ce67 100644
--- a/src/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md
@@ -20,158 +20,148 @@
-->
# Stand-Alone Deployment
-This chapter will introduce how to start an IoTDB standalone instance, which includes 1 ConfigNode and 1 DataNode (commonly known as 1C1D).
+This guide introduces how to set up a standalone TimechoDB instance, which includes one ConfigNode and one DataNode (commonly referred to as 1C1D).
-## Note
+## 1 Prerequisites
-1. Before installation, ensure that the system is complete by referring to [System Requirements](./Environment-Requirements.md).
+1. **System Preparation**: Ensure the system has been configured according to the [System Requirements](../Deployment-and-Maintenance/Environment-Requirements.md).
- 2. It is recommended to prioritize using 'hostname' for IP configuration during deployment, which can avoid the problem of modifying the host IP in the later stage and causing the database to fail to start. To set the host name, you need to configure/etc/hosts on the target server. For example, if the local IP is 192.168.1.3 and the host name is iotdb-1, you can use the following command to set the server's host name and configure IoTDB's' cn_internal-address' using the host name dn_internal_address、dn_rpc_address。
+2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Set the hostname by editing the `/etc/hosts` file. For example, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, run:
- ```shell
- echo "192.168.1.3 iotdb-1" >> /etc/hosts
- ```
-
- 3. Some parameters cannot be modified after the first startup. Please refer to the "Parameter Configuration" section below for settings.
-
- 4. Whether in linux or windows, ensure that the IoTDB installation path does not contain Spaces and Chinese characters to avoid software exceptions.
-
- 5. Please note that when installing and deploying IoTDB (including activating and using software), it is necessary to use the same user for operations. You can:
-
- - Using root user (recommended): Using root user can avoid issues such as permissions.
- - Using a fixed non root user:
- - Using the same user operation: Ensure that the same user is used for start, activation, stop, and other operations, and do not switch users.
- - Avoid using sudo: Try to avoid using sudo commands as they execute commands with root privileges, which may cause confusion or security issues.
-
- 6. It is recommended to deploy a monitoring panel, which can monitor important operational indicators and keep track of database operation status at any time. The monitoring panel can be obtained by contacting the business department, and the steps for deploying the monitoring panel can be referred to:[Monitoring Board Install and Deploy](./Monitoring-panel-deployment.md).
-
-## Installation Steps
-
-### 1、Unzip the installation package and enter the installation directory
-
-```Plain
-unzip timechodb-{version}-bin.zip
-cd timechodb-{version}-bin
+```Bash
+echo "192.168.1.3 iotdb-1" >> /etc/hosts
```
-### 2、Parameter Configuration
-
-#### Memory Configuration
+Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration.
-- conf/confignode-env.sh(or .bat)
+3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the [Parameter Configuration](#22-parameters-configuration) section.
- | **Configuration** | **Description** | **Default** | **Recommended value** | Note |
- | :---------------: | :----------------------------------------------------------: | :---------: | :----------------------------------------------------------: | :---------------------------------: |
- | MEMORY_SIZE | The total amount of memory that IoTDB ConfigNode nodes can use | empty | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect |
+4. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues.
-- conf/datanode-env.sh(or .bat)
+5. **User Permissions**: Choose one of the following permissions during installation and deployment:
+ - **Root User (Recommended)**: This avoids permission-related issues.
+ - **Non-Root User**:
+ - Use the same user for all operations, including starting, activating, and stopping services.
+ - Avoid using `sudo`, which can cause permission conflicts.
- | **Configuration** | **Description** | **Default** | **Recommended value** | **Note** |
- | :---------------: | :----------------------------------------------------------: | :---------: | :----------------------------------------------------------: | :---------------------------------: |
- | MEMORY_SIZE | The total amount of memory that IoTDB DataNode nodes can use | empty | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect |
+6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the [Monitoring Board Install and Deploy](../Deployment-and-Maintenance/Monitoring-panel-deployment.md).
-#### Function Configuration
+## 2 Installation Steps
-The parameters that actually take effect in the system are in the file conf/iotdb-system.exe. To start, the following parameters need to be set, which can be viewed in the conf/iotdb-system.exe file for all parameters
+### 2.1 Extract Installation Package
-Cluster function configuration
+Unzip the installation package and navigate to the directory:
-| **Configuration** | **Description** | **Default** | **Recommended value** | Note |
-| :-----------------------: | :----------------------------------------------------------: | :------------: | :----------------------------------------------------------: | :---------------------------------------------------: |
-| cluster_name | Cluster Name | defaultCluster | The cluster name can be set as needed, and if there are no special needs, the default can be kept | Cannot be modified after initial startup |
-| schema_replication_factor | Number of metadata replicas, set to 1 for the standalone version here | 1 | 1 | Default 1, cannot be modified after the first startup |
-| data_replication_factor | Number of data replicas, set to 1 for the standalone version here | 1 | 1 | Default 1, cannot be modified after the first startup |
+```Bash
+unzip timechodb-{version}-bin.zip
+cd timechodb-{version}-bin
+```
-ConfigNode Configuration
+### 2.2 Parameters Configuration
-| **Configuration** | **Description** | **Default** | **Recommended value** | Note |
-| :-----------------: | :----------------------------------------------------------: | :-------------: | :----------------------------------------------------------: | :--------------------------------------: |
-| cn_internal_address | The address used by ConfigNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Cannot be modified after initial startup |
-| cn_internal_port | The port used by ConfigNode for communication within the cluster | 10710 | 10710 | Cannot be modified after initial startup |
-| cn_consensus_port | The port used for ConfigNode replica group consensus protocol communication | 10720 | 10720 | Cannot be modified after initial startup |
-| cn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, cn_internal_address:cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | Cannot be modified after initial startup |
+#### 2.2.1 Memory Configuration
-DataNode Configuration
+Edit the following files for memory allocation:
-| **Configuration** | **Description** | **Default** | **Recommended value** | **Note** |
-| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------- |
-| dn_rpc_address | The address of the client RPC service | 0.0.0.0 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Restarting the service takes effect |
-| dn_rpc_port | The port of the client RPC service | 6667 | 6667 | Restarting the service takes effect |
-| dn_internal_address | The address used by DataNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Cannot be modified after initial startup |
-| dn_internal_port | The port used by DataNode for communication within the cluster | 10730 | 10730 | Cannot be modified after initial startup |
-| dn_mpp_data_exchange_port | The port used by DataNode to receive data streams | 10740 | 10740 | Cannot be modified after initial startup |
-| dn_data_region_consensus_port | The port used by DataNode for data replica consensus protocol communication | 10750 | 10750 | Cannot be modified after initial startup |
-| dn_schema_region_consensus_port | The port used by DataNode for metadata replica consensus protocol communication | 10760 | 10760 | Cannot be modified after initial startup |
-| dn_seed_config_node | The ConfigNode address that the node connects to when registering to join the cluster, i.e. cn_internal-address: cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | Cannot be modified after initial startup |
+- **ConfigNode**: `conf/confignode-env.sh` (or `.bat` for Windows)
+- **DataNode**: `conf/datanode-env.sh` (or `.bat` for Windows)
-### 3、Start ConfigNode
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------ | :---------------------------------- | :---------- | :-------------- | :---------------------- |
+| MEMORY_SIZE | Total memory allocated for the node | Empty | As needed | Effective after restart |
-Enter the sbin directory of iotdb and start confignode
+#### 2.2.2 General Configuration
-```shell
+Set the following parameters in `conf/iotdb-system.properties`. Refer to `conf/iotdb-system.properties.template` for a complete list.
-./start-confignode.sh -d #The "- d" parameter will start in the background
+**Cluster-Level Parameters**:
-```
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------------ | :-------------------------- | :------------- | :-------------- | :----------------------------------------------------------- |
+| cluster_name | Name of the cluster | defaultCluster | Customizable | If there is no specific requirement, keep the default value. |
+| schema_replication_factor | Number of metadata replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. |
+| data_replication_factor | Number of data replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. |
-If the startup fails, please refer to [Common Problem](#common-problem).
+**ConfigNode Parameters**:
-### 4、Start DataNode
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- |
+| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. |
+| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | This parameter cannot be modified after the first startup. |
+| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | This parameter cannot be modified after the first startup. |
+| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. |
- Enter the sbin directory of iotdb and start datanode:
+**DataNode Parameters**:
-```shell
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- |
+| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. |
+| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | Effective after restarting the service. |
+| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. |
+| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | This parameter cannot be modified after the first startup. |
+| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | This parameter cannot be modified after the first startup. |
+| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | This parameter cannot be modified after the first startup. |
+| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | This parameter cannot be modified after the first startup. |
+| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. |
-cd sbin
+### 2.3 Start ConfigNode
-./start-datanode.sh -d # The "- d" parameter will start in the background
+Navigate to the `sbin` directory and start ConfigNode:
+```Bash
+./sbin/start-confignode.sh -d # The "-d" flag starts the process in the background.
```
-### 5、Activate Database
-
-#### Method 1: Activate file copy activation
+If the startup fails, refer to the [Common Issues](#3-common-issues)。 section below for troubleshooting.
-- After starting the confignode datanode node, enter the activation folder and copy the systeminfo file to the Timecho staff
-- Received the license file returned by the staff
-- Place the license file in the activation folder of the corresponding node;
+### 2.4 Start DataNode
-#### Method 2: Activate Script Activation
+Navigate to the `sbin` directory of IoTDB and start the DataNode:
-- Retrieve the machine codes of 3 machines in sequence and enter IoTDB CLI
+```Bash
+./sbin/start-datanode.sh -d # The "-d" flag starts the process in the background.
+```
- - Table Model CLI Enter Command:
+### 2.5 Activate the Database
- ```SQL
- # Linux or MACOS
- ./start-cli.sh -sql_dialect table
-
- # windows
- ./start-cli.bat -sql_dialect table
- ```
+#### Option 1: File-Based Activation
- - Enter the tree model CLI command:
+- Start both the ConfigNode and DataNode.
+- Navigate to the `activation` folder and copy the `system_info` file.
+- Send the `system_info` file to the Timecho team.
+- Place the license file provided by the Timecho team into the corresponding `activation` folder for each node.
- ```SQL
- # Linux or MACOS
- ./start-cli.sh
-
- # windows
- ./start-cli.bat
- ```
+#### Option 2: Command-Based Activation
-- Execute the following to obtain the machine code required for activation:
- - : Currently, activation is only supported in tree models
+1. Enter the IoTDB CLI.
+ - **For Table Model**:
+ - ```SQL
+ # For Linux or macOS
+ ./start-cli.sh -sql_dialect table
+
+ # For Windows
+ ./start-cli.bat -sql_dialect table
+ ```
- ```Bash
+ - **For Tree Model**:
+ - ```SQL
+ # For Linux or macOS
+ ./start-cli.sh
+
+ # For Windows
+ ./start-cli.bat
+ ```
+2. Run the following command to retrieve the machine code required for activation:
- show system info
+```Bash
+show system info
+```
- ```
+**Note**: Activation is currently supported only in the Tree Model.
-- The following information is displayed, which shows the machine code of one machine:
+3. Copy the returned machine code (displayed as a green string) and send it to the Timecho team:
```Bash
+--------------------------------------------------------------+
@@ -183,84 +173,78 @@ Total line number = 1
It costs 0.030s
```
-- Enter the activation code returned by the staff into the CLI and enter the following content
- - Note: The activation code needs to be marked with a `'`symbol before and after, as shown in
+4. Enter the activation code provided by the Timecho team in the CLI using the following format. Wrap the activation code in single quotes ('):
```Bash
IoTDB> activate '01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA==='
```
-### 6、Verify Activation
+### 2.6 Verify Activation
-When the "ClusterActivation Status" field is displayed as Activated, it indicates successful activation
+Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated.

-## Common Problem
-
-1. Multiple prompts indicating activation failure during deployment process
-
- - Use the `ls -al` command: Use the `ls -al` command to check if the owner information of the installation package root directory is the current user.
-
- - Check activation directory: Check all files in the `./activation` directory and whether the owner information is the current user.
-
-2. Confignode failed to start
-
- Step 1: Please check the startup log to see if any parameters that cannot be changed after the first startup have been modified.
+## 3 Common Issues
+1. Activation Fails Repeatedly
- Step 2: Please check the startup log for any other abnormalities. If there are any abnormal phenomena in the log, please contact Timecho Technical Support personnel for consultation on solutions.
+ 1. Use the `ls -al` command to verify that the ownership of the installation directory matches the current user.
+ 2. Check the ownership of all files in the `./activation` directory to ensure they belong to the current user.
- Step 3: If it is the first deployment or data can be deleted, you can also clean up the environment according to the following steps, redeploy, and restart.
+2. ConfigNode Fails to Start
- Step 4: Clean up the environment:
+ 1. Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed.
+ 2. Check the logs for any other errors. If unresolved, contact technical support for assistance.
+ 3. If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps:
- a. Terminate all ConfigNode Node and DataNode processes.
+ **Clean the Environment**
- ```Bash
- # 1. Stop the ConfigNode and DataNode services
- sbin/stop-standalone.sh
-
- # 2. Check for any remaining processes
- jps
- # Or
- ps -ef|gerp iotdb
-
- # 3. If there are any remaining processes, manually kill the
- kill -9
- # If you are sure there is only one iotdb on the machine, you can use the following command to clean up residual processes
- ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9
- ```
-
- b. Delete the data and logs directories.
-
- Explanation: Deleting the data directory is necessary, deleting the logs directory is for clean logs and is not mandatory.
-
- ```Bash
- cd /data/iotdb
- rm -rf data logs
- ```
-
-## Appendix
-
-### Introduction to Configuration Node Parameters
-
-| Parameter | Description | Is it required |
-| :-------- | :---------------------------------------------- | :----------------- |
-| -d | Start in daemon mode, running in the background | No |
-
-### Introduction to Datanode Node Parameters
+ 1. Stop all ConfigNode and DataNode processes:
+ ```Bash
+ sbin/stop-standalone.sh
+ ```
-| Abbreviation | Description | Is it required |
-| :----------- | :----------------------------------------------------------- | :------------- |
-| -v | Show version information | No |
-| -f | Run the script in the foreground, do not put it in the background | No |
-| -d | Start in daemon mode, i.e. run in the background | No |
-| -p | Specify a file to store the process ID for process management | No |
-| -c | Specify the path to the configuration file folder, the script will load the configuration file from here | No |
-| -g | Print detailed garbage collection (GC) information | No |
-| -H | Specify the path of the Java heap dump file, used when JVM memory overflows | No |
-| -E | Specify the path of the JVM error log file | No |
-| -D | Define system properties, in the format key=value | No |
-| -X | Pass -XX parameters directly to the JVM | No |
-| -h | Help instruction | No |
+ 2. Check for any remaining processes:
+ ```Bash
+ jps
+ # or
+ ps -ef | grep iotdb
+ ```
+ 3. If processes remain, terminate them manually:
+ ```Bash
+ kill -9
+
+ #For systems with a single IoTDB instance, you can clean up residual processes with:
+ ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9
+ ```
+
+ 4. Delete the `data` and `logs` directories:
+ ```Bash
+ cd /data/iotdb
+ rm -rf data logs
+ ```
+
+## 4 Appendix
+
+### 4.1 ConfigNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :---------------------------------------------------------- | :------- |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+
+### 4.2 DataNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :----------------------------------------------------------- | :------- |
+| -v | Displays version information. | No |
+| -f | Runs the script in the foreground without backgrounding it. | No |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+| -p | Specifies a file to store the process ID for process management. | No |
+| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No |
+| -g | Prints detailed garbage collection (GC) information. | No |
+| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No |
+| -E | Specifies the file for JVM error logs. | No |
+| -D | Defines system properties in the format `key=value`. | No |
+| -X | Passes `-XX` options directly to the JVM. | No |
+| -h | Displays the help instructions. | No |
\ No newline at end of file
diff --git a/src/UserGuide/Master/Tree/Deployment-and-Maintenance/Cluster-Deployment_apache.md b/src/UserGuide/Master/Tree/Deployment-and-Maintenance/Cluster-Deployment_apache.md
index b11e592fe..4389a704f 100644
--- a/src/UserGuide/Master/Tree/Deployment-and-Maintenance/Cluster-Deployment_apache.md
+++ b/src/UserGuide/Master/Tree/Deployment-and-Maintenance/Cluster-Deployment_apache.md
@@ -50,7 +50,7 @@ This section will take the IoTDB classic cluster deployment architecture 3C3D (3
1. Prepare the IoTDB database installation package::apache-iotdb-{version}-all-bin.zip(Please refer to the installation package for details:[IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_apache.md))
-2. Configure the operating system environment according to environmental requirements (system environment configuration can be found in:[Environment Requirements](https://iotdb.apache.org/UserGuide/latest/Deployment-and-Maintenance/Environment-Requirements.html))
+2. Configure the operating system environment according to environmental requirements (system environment configuration can be found in:[Environment Requirement](../Deployment-and-Maintenance/Environment-Requirements.md))
## Installation Steps
diff --git a/src/UserGuide/Master/Tree/Deployment-and-Maintenance/Cluster-Deployment_timecho.md b/src/UserGuide/Master/Tree/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
index 25d61324f..bd7d0aee5 100644
--- a/src/UserGuide/Master/Tree/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
+++ b/src/UserGuide/Master/Tree/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
@@ -55,8 +55,9 @@ This guide describes how to manually deploy a cluster instance consisting of 3 C
## Preparation
-1. Obtain the TimechoDB installation package: `timechodb-{version}-bin.zip` following[IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_timecho.md)
-2. Configure the operating system environment according to [Environment Requirement](./Environment-Requirements.md)
+1. Obtain the TimechoDB installation package: `timechodb-{version}-bin.zip` following [IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_timecho.md))
+
+2. Configure the operating system environment according to [Environment Requirement](../Deployment-and-Maintenance/Environment-Requirements.md))
## Installation Steps
diff --git a/src/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md
index aab760b7b..45aeedd4e 100644
--- a/src/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md
+++ b/src/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md
@@ -18,25 +18,30 @@
under the License.
-->
-# Package Acquisition
+# Obtain TimechoDB
+
+## 1 How to obtain TimechoDB
-## How to obtain installation packages
The installation package can be directly obtained from the Apache IoTDB official website:https://iotdb.apache.org/Download/
-## Installation Package Structure
+
+## 2 Installation Package Structure
+
+
Install the package after decompression(`apache-iotdb--all-bin.zip`),After decompressing the installation package, the directory structure is as follows:
-| **catalogue** | **Type** | **Explanation** |
-| :--------------: | :------: | :----------------------------------------------------------: |
-| conf | folder | Configuration file directory, including configuration files such as ConfigNode, DataNode, JMX, and logback |
-| data | folder | The default data file directory contains data files for ConfigNode and DataNode. (The directory will only be generated after starting the program) |
-| lib | folder | IoTDB executable library file directory |
-| licenses | folder | Open source community certificate file directory |
-| logs | folder | The default log file directory, which includes log files for ConfigNode and DataNode (this directory will only be generated after starting the program) |
-| sbin | folder | Main script directory, including start, stop, and other scripts |
-| tools | folder | Directory of System Peripheral Tools |
-| ext | folder | Related files for pipe, trigger, and UDF plugins (created by the user when needed) |
-| LICENSE | file | certificate |
-| NOTICE | file | Tip |
-| README_ZH\.md | file | Explanation of the Chinese version in Markdown format |
-| README\.md | file | Instructions for use |
-| RELEASE_NOTES\.md | file | Version Description |
\ No newline at end of file
+
+| **Catologue** | **Type** | **Description** |
+| :--------------- | :------- | :----------------------------------------------------------- |
+| conf | Folder | Configuration files directory, containing ConfigNode, DataNode, JMX, and logback configuration files. |
+| data | Folder | Default data file directory, containing data files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| lib | Folder | Library files directory. |
+| licenses | Folder | Directory for open-source license certificates. |
+| logs | Folder | Default log file directory, containing log files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| sbin | Folder | Main scripts directory, containing scripts for starting, stopping, and managing the database. |
+| tools | Folder | Tools directory. |
+| ext | Folder | Directory for pipe, trigger, and UDF plugin-related files. |
+| LICENSE | File | Open-source license file. |
+| NOTICE | File | Open-source notice file. |
+| README_ZH.md | File | User manual (Chinese version). |
+| README.md | File | User manual (English version). |
+| RELEASE_NOTES.md | File | Release notes. |
\ No newline at end of file
diff --git a/src/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md b/src/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md
index 86e0af2aa..261c8a10f 100644
--- a/src/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md
+++ b/src/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md
@@ -19,24 +19,28 @@
-->
# Obtain TimechoDB
-## How to obtain TimechoDB
-The enterprise version installation package can be obtained through product trial application or by directly contacting the business personnel who are in contact with you.
-## Installation Package Structure
-Install the package after decompression(iotdb-enterprise-{version}-bin.zip),The directory structure after unpacking the installation package is as follows:
-| **catalogue** | **Type** | **Explanation** |
-| :--------------: | -------- | ------------------------------------------------------------ |
-| activation | folder | The directory where the activation file is located, including the generated machine code and the enterprise version activation code obtained from the business side (this directory will only be generated after starting ConfigNode to obtain the activation code) |
-| conf | folder | Configuration file directory, including configuration files such as ConfigNode, DataNode, JMX, and logback |
-| data | folder | The default data file directory contains data files for ConfigNode and DataNode. (The directory will only be generated after starting the program) |
-| lib | folder | IoTDB executable library file directory |
-| licenses | folder | Open source community certificate file directory |
-| logs | folder | The default log file directory, which includes log files for ConfigNode and DataNode (this directory will only be generated after starting the program) |
-| sbin | folder | Main script directory, including start, stop, and other scripts |
-| tools | folder | Directory of System Peripheral Tools |
-| ext | folder | Related files for pipe, trigger, and UDF plugins (created by the user when needed) |
-| LICENSE | file | certificate |
-| NOTICE | file | Tip |
-| README_ZH\.md | file | Explanation of the Chinese version in Markdown format |
-| README\.md | file | Instructions for use |
-| RELEASE_NOTES\.md | file | Version Description |
+## 1 How to obtain TimechoDB
+
+The TimechoDB installation package can be obtained through product trial application or by directly contacting the Timecho team.
+
+## 2 Installation Package Structure
+
+After unpacking the installation package(`iotdb-enterprise-{version}-bin.zip`),you will see the directory structure is as follows:
+
+| **Catologue** | **Type** | **Description** |
+| :--------------- | :------- | :----------------------------------------------------------- |
+| activation | Folder | Directory for activation files, including the generated machine code and the TimechoDB activation code obtained from Timecho staff. *(This directory is generated after starting the ConfigNode, enabling you to obtain the activation code.)* |
+| conf | Folder | Configuration files directory, containing ConfigNode, DataNode, JMX, and logback configuration files. |
+| data | Folder | Default data file directory, containing data files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| lib | Folder | Library files directory. |
+| licenses | Folder | Directory for open-source license certificates. |
+| logs | Folder | Default log file directory, containing log files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| sbin | Folder | Main scripts directory, containing scripts for starting, stopping, and managing the database. |
+| tools | Folder | Tools directory. |
+| ext | Folder | Directory for pipe, trigger, and UDF plugin-related files. |
+| LICENSE | File | Open-source license file. |
+| NOTICE | File | Open-source notice file. |
+| README_ZH.md | File | User manual (Chinese version). |
+| README.md | File | User manual (English version). |
+| RELEASE_NOTES.md | File | Release notes. |
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
new file mode 100644
index 000000000..0214e7996
--- /dev/null
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
@@ -0,0 +1,349 @@
+
+# Cluster Deployment
+
+This guide describes how to manually deploy a cluster instance consisting of 3 ConfigNodes and 3 DataNodes (commonly referred to as a 3C3D cluster).
+
+
+

+
+
+
+## 1 Prerequisites
+
+1. **System Preparation**: Ensure the system has been configured according to the [System Requirements](../Deployment-and-Maintenance/Environment-Requirements.md).
+
+2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Set the hostname by editing the `/etc/hosts` file. For example, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, run:
+
+```Bash
+echo "192.168.1.3 iotdb-1" >> /etc/hosts
+```
+
+Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration.
+
+3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the [Parameter Configuration](#22-parameters-configuration) section.
+
+
+1. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues.
+2. **User Permissions**: Choose one of the following permissions during installation and deployment:
+ - **Root User (Recommended)**: This avoids permission-related issues.
+ - **Non-Root User**:
+ - Use the same user for all operations, including starting, activating, and stopping services.
+ - Avoid using `sudo`, which can cause permission conflicts.
+
+6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the [Monitoring Board Install and Deploy](../Deployment-and-Maintenance/Monitoring-panel-deployment.md).
+
+## 2 Preparation
+
+1. Prepare the IoTDB database installation package::apache-iotdb-{version}-all-bin.zip(Please refer to the installation package for details:[IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_apache.md))
+
+2. Configure the operating system environment according to environmental requirements (system environment configuration can be found in:[Environment Requirement](../Deployment-and-Maintenance/Environment-Requirements.md))
+
+
+## 3 Installation Steps
+
+Taking a cluster with three Linux servers with the following information as example:
+
+| Node IP | Hostname | Services |
+| ------------- | -------- | -------------------- |
+| 11.101.17.224 | iotdb-1 | ConfigNode, DataNode |
+| 11.101.17.225 | iotdb-2 | ConfigNode, DataNode |
+| 11.101.17.226 | iotdb-3 | ConfigNode, DataNode |
+
+### 3.1 Configure Hostnames
+
+On all three servers, configure the hostnames by editing the `/etc/hosts` file. Use the following commands:
+
+```Bash
+echo "11.101.17.224 iotdb-1" >> /etc/hosts
+echo "11.101.17.225 iotdb-2" >> /etc/hosts
+echo "11.101.17.226 iotdb-3" >> /etc/hosts
+```
+
+### 3.2 Extract Installation Package
+
+Unzip the installation package and navigate to the directory:
+
+```Plain
+unzip apache-iotdb-{version}-all-bin.zip
+cd apache-iotdb-{version}-all-bin
+```
+### 3.3 Parameters Configuration
+
+#### 3.3.1 Memory Configuration
+
+Edit the following files for memory allocation:
+
+- **ConfigNode**: `./conf/confignode-env.sh` (or `.bat` for Windows)
+- **DataNode**: `./conf/datanode-env.sh` (or `.bat` for Windows)
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------ | :--------------------------------- | :---------- | :-------------- | :-------------------------------------- |
+| MEMORY_SIZE | Total memory allocated to the node | Empty | As needed | Effective after restarting the service. |
+
+#### 3.3.2 General Configuration
+
+Set the following parameters in `./conf/iotdb-system.properties`. Refer to `./conf/iotdb-system.properties.template` for a complete list.
+
+**Cluster-Level Parameters**:
+
+| **Parameter** | **Description** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** |
+| :------------------------ | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- |
+| cluster_name | Name of the cluster | defaultCluster | defaultCluster | defaultCluster |
+| schema_replication_factor | Metadata replication factor; DataNode count shall not be fewer than this value | 3 | 3 | 3 |
+| data_replication_factor | Data replication factor; DataNode count shall not be fewer than this value | 2 | 2 | 2 |
+
+**ConfigNode Parameters**:
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** |
+| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- |
+| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. |
+| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | 10710 | 10710 | 10710 | This parameter cannot be modified after the first startup. |
+| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | 10720 | 10720 | 10720 | This parameter cannot be modified after the first startup. |
+| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address and port of the seed ConfigNode (e.g., `cn_internal_address:cn_internal_port`) | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. |
+
+**DataNode Parameters**:
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** |
+| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- |
+| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. |
+| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | 6667 | 6667 | 6667 | Effective after restarting the service. |
+| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. |
+| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | 10730 | 10730 | 10730 | This parameter cannot be modified after the first startup. |
+| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | 10740 | 10740 | 10740 | This parameter cannot be modified after the first startup. |
+| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | 10750 | 10750 | 10750 | This parameter cannot be modified after the first startup. |
+| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | 10760 | 10760 | 10760 | This parameter cannot be modified after the first startup. |
+| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster.(e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address of the first ConfigNode | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. |
+
+**Note:** Ensure files are saved after editing. Tools like VSCode Remote do not save changes automatically.
+
+### 3.4 Start ConfigNode Instances
+
+1. Start the first ConfigNode (`iotdb-1`) as the seed node
+
+```Bash
+ cd sbin
+ ./start-confignode.sh -d # The "-d" flag starts the process in the background.
+ ```
+
+2. Start the remaining ConfigNodes (`iotdb-2` and `iotdb-3`) in sequence.
+
+If the startup fails, refer to the [Common Issues](#5-common-issues) section below for troubleshooting.
+
+### 3.5 Start DataNode Instances
+
+On each server, navigate to the `sbin` directory and start the DataNode:
+
+```Bash
+ cd sbin
+ ./start-datanode.sh -d # The "-d" flag starts the process in the background.
+ ```
+
+### 3.6 Verify Activation
+
+Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated.
+
+## 4 Maintenance
+
+### 4.1 ConfigNode Maintenance
+
+ConfigNode maintenance includes adding and removing ConfigNodes. Common use cases include:
+
+- **Cluster Expansion:** If the cluster contains only 1 ConfigNode, adding 2 more ConfigNodes enhances high availability, resulting in a total of 3 ConfigNodes.
+- **Cluster Fault Recovery:** If a ConfigNode's machine fails and it cannot function normally, remove the faulty ConfigNode and add a new one to the cluster.
+
+**Note:** After completing ConfigNode maintenance, ensure that the cluster contains either 1 or 3 active ConfigNodes. Two ConfigNodes do not provide high availability, and more than three ConfigNodes can degrade performance.
+
+#### 4.1.1 Adding a ConfigNode
+
+**Linux / MacOS :**
+
+```Plain
+sbin/start-confignode.sh
+```
+
+**Windows:**
+
+```Plain
+sbin/start-confignode.bat
+```
+
+#### 4.1.2 Removing a ConfigNode
+
+1. Connect to the cluster using the CLI and confirm the internal address and port of the ConfigNode to be removed:
+
+```Plain
+show confignodes;
+```
+
+Example output:
+
+```Plain
+IoTDB> show confignodes
++------+-------+---------------+------------+--------+
+|NodeID| Status|InternalAddress|InternalPort| Role|
++------+-------+---------------+------------+--------+
+| 0|Running| 127.0.0.1| 10710| Leader|
+| 1|Running| 127.0.0.1| 10711|Follower|
+| 2|Running| 127.0.0.1| 10712|Follower|
++------+-------+---------------+------------+--------+
+Total line number = 3
+It costs 0.030s
+```
+
+2. Remove the ConfigNode using the script:
+
+**Linux / MacOS:**
+
+```Bash
+sbin/remove-confignode.sh [confignode_id]
+# Or:
+sbin/remove-confignode.sh [cn_internal_address:cn_internal_port]
+```
+
+**Windows:**
+
+```Bash
+sbin/remove-confignode.bat [confignode_id]
+# Or:
+sbin/remove-confignode.bat [cn_internal_address:cn_internal_port]
+```
+
+### 4.2 DataNode Maintenance
+
+DataNode maintenance includes adding and removing DataNodes. Common use cases include:
+
+- **Cluster Expansion:** Add new DataNodes to increase cluster capacity.
+- **Cluster Fault Recovery:** If a DataNode's machine fails and it cannot function normally, remove the faulty DataNode and add a new one to the cluster.
+
+**Note:** During and after DataNode maintenance, ensure that the number of active DataNodes is not fewer than the data replication factor (usually 2) or the schema replication factor (usually 3).
+
+#### 4.2.1 Adding a DataNode
+
+**Linux / MacOS:**
+
+```Plain
+sbin/start-datanode.sh
+```
+
+**Windows:**
+
+```Plain
+sbin/start-datanode.bat
+```
+
+**Note:** After adding a DataNode, the cluster load will gradually balance across all nodes as new writes arrive and old data expires (if TTL is set).
+
+#### 4.2.2 Removing a DataNode
+
+1. Connect to the cluster using the CLI and confirm the RPC address and port of the DataNode to be removed:
+
+```Plain
+show datanodes;
+```
+
+Example output:
+
+```Plain
+IoTDB> show datanodes
++------+-------+----------+-------+-------------+---------------+
+|NodeID| Status|RpcAddress|RpcPort|DataRegionNum|SchemaRegionNum|
++------+-------+----------+-------+-------------+---------------+
+| 1|Running| 0.0.0.0| 6667| 0| 0|
+| 2|Running| 0.0.0.0| 6668| 1| 1|
+| 3|Running| 0.0.0.0| 6669| 1| 0|
++------+-------+----------+-------+-------------+---------------+
+Total line number = 3
+It costs 0.110s
+```
+
+2. Remove the DataNode using the script:
+
+**Linux / MacOS:**
+
+```Bash
+sbin/remove-datanode.sh [dn_rpc_address:dn_rpc_port]
+```
+
+**Windows:**
+
+```Bash
+sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port]
+```
+
+## 5 Common Issues
+
+1. ConfigNode Fails to Start
+ 1. Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed.
+ 2. Check the logs for any other errors. If unresolved, contact technical support for assistance.
+ 3. If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps:
+
+ **Clean the Environment**
+
+ 1. Stop all ConfigNode and DataNode processes:
+ ```Bash
+ sbin/stop-standalone.sh
+ ```
+
+ 2. Check for any remaining processes:
+ ```Bash
+ jps
+ # or
+ ps -ef | grep iotdb
+ ```
+
+ 3. If processes remain, terminate them manually:
+ ```Bash
+ kill -9
+
+ #For systems with a single IoTDB instance, you can clean up residual processes with:
+ ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9
+ ```
+
+ 4. Delete the `data` and `logs` directories:
+ ```Bash
+ cd /data/iotdb
+ rm -rf data logs
+ ```
+
+## 6 Appendix
+
+### 6.1 ConfigNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :---------------------------------------------------------- | :------- |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+
+### 6.2 DataNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :----------------------------------------------------------- | :------- |
+| -v | Displays version information. | No |
+| -f | Runs the script in the foreground without backgrounding it. | No |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+| -p | Specifies a file to store the process ID for process management. | No |
+| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No |
+| -g | Prints detailed garbage collection (GC) information. | No |
+| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No |
+| -E | Specifies the file for JVM error logs. | No |
+| -D | Defines system properties in the format `key=value`. | No |
+| -X | Passes `-XX` options directly to the JVM. | No |
+| -h | Displays the help instructions. | No |
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
index a58cff767..306420109 100644
--- a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
@@ -20,54 +20,56 @@
-->
# Cluster Deployment
-This section describes how to manually deploy an instance that includes 3 ConfigNodes and 3 DataNodes, commonly known as a 3C3D cluster.
+This guide describes how to manually deploy a cluster instance consisting of 3 ConfigNodes and 3 DataNodes (commonly referred to as a 3C3D cluster).
-## Note
+## 1 Prerequisites
-1. Before installation, ensure that the system is complete by referring to [System Requirements](./Environment-Requirements.md)
+1. **System Preparation**: Ensure the system has been configured according to the [System Requirements](../Deployment-and-Maintenance/Environment-Requirements.md).
-2. It is recommended to prioritize using `hostname` for IP configuration during deployment, which can avoid the problem of modifying the host IP in the later stage and causing the database to fail to start. To set the host name, you need to configure /etc/hosts on the target server. For example, if the local IP is 192.168.1.3 and the host name is iotdb-1, you can use the following command to set the server's host name and configure the `cn_internal_address` and `dn_internal_address` of IoTDB using the host name.
+2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Set the hostname by editing the `/etc/hosts` file. For example, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, run:
- ``` shell
- echo "192.168.1.3 iotdb-1" >> /etc/hosts
- ```
+```Bash
+echo "192.168.1.3 iotdb-1" >> /etc/hosts
+```
+
+Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration.
-3. Some parameters cannot be modified after the first startup. Please refer to the "Parameter Configuration" section below for settings.
+3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the [Parameter Configuration](#22-parameters-configuration) section.
-4. Whether in linux or windows, ensure that the IoTDB installation path does not contain Spaces and Chinese characters to avoid software exceptions.
-5. Please note that when installing and deploying IoTDB (including activating and using software), it is necessary to use the same user for operations. You can:
+1. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues.
+2. **User Permissions**: Choose one of the following permissions during installation and deployment:
+ - **Root User (Recommended)**: This avoids permission-related issues.
+ - **Non-Root User**:
+ - Use the same user for all operations, including starting, activating, and stopping services.
+ - Avoid using `sudo`, which can cause permission conflicts.
-- Using root user (recommended): Using root user can avoid issues such as permissions.
-- Using a fixed non root user:
- - Using the same user operation: Ensure that the same user is used for start, activation, stop, and other operations, and do not switch users.
- - Avoid using sudo: Try to avoid using sudo commands as they execute commands with root privileges, which may cause confusion or security issues.
+6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the [Monitoring Board Install and Deploy](../Deployment-and-Maintenance/Monitoring-panel-deployment.md).
-6. It is recommended to deploy a monitoring panel, which can monitor important operational indicators and keep track of database operation status at any time. The monitoring panel can be obtained by contacting the business department,The steps for deploying a monitoring panel can refer to:[Monitoring Panel Deployment](./Monitoring-panel-deployment.md)
+## 2 Preparation
-## Preparation Steps
+1. Obtain the TimechoDB installation package: `timechodb-{version}-bin.zip` following [IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_timecho.md))
-1. Prepare the IoTDB database installation package: timechodb-{version}-bin.zip(The installation package can be obtained from:[IoTDB-Package](./IoTDB-Package_timecho.md))
-2. Configure the operating system environment according to environmental requirements(The system environment configuration can be found in:[Environment Requirement](./Environment-Requirements.md))
+2. Configure the operating system environment according to [Environment Requirement](../Deployment-and-Maintenance/Environment-Requirements.md))
-## Installation Steps
+## 3 Installation Steps
-Assuming there are three Linux servers now, the IP addresses and service roles are assigned as follows:
+Taking a cluster with three Linux servers with the following information as example:
-| Node IP | Host Name | Service |
-| ------------- | --------- | -------------------- |
-| 11.101.17.224 | iotdb-1 | ConfigNode、DataNode |
-| 11.101.17.225 | iotdb-2 | ConfigNode、DataNode |
-| 11.101.17.226 | iotdb-3 | ConfigNode、DataNode |
+| Node IP | Hostname | Services |
+| ------------- | -------- | -------------------- |
+| 11.101.17.224 | iotdb-1 | ConfigNode, DataNode |
+| 11.101.17.225 | iotdb-2 | ConfigNode, DataNode |
+| 11.101.17.226 | iotdb-3 | ConfigNode, DataNode |
-### Set Host Name
+### 3.1 Configure Hostnames
-On three machines, configure the host names separately. To set the host names, configure `/etc/hosts` on the target server. Use the following command:
+On all three servers, configure the hostnames by editing the `/etc/hosts` file. Use the following commands:
```Bash
echo "11.101.17.224 iotdb-1" >> /etc/hosts
@@ -75,186 +77,182 @@ echo "11.101.17.225 iotdb-2" >> /etc/hosts
echo "11.101.17.226 iotdb-3" >> /etc/hosts
```
-### Configuration
+### 3.2 Extract Installation Package
-Unzip the installation package and enter the installation directory
+Unzip the installation package and navigate to the directory:
```Plain
-unzip timechodb-{version}-bin.zip
-cd timechodb-{version}-bin
+unzip timechodb-{version}-bin.zip
+cd timechodb-{version}-bin
```
-#### Environment script configuration
-
-- `./conf/confignode-env.sh` configuration
-
- | **Configuration** | **Description** | **Default** | **Recommended value** | **Note** |
- | :---------------- | :----------------------------------------------------------- | :---------- | :----------------------------------------------------------- | :---------------------------------- |
- | MEMORY_SIZE | The total amount of memory that IoTDB ConfigNode nodes can use | - | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect |
+### 3.3 Parameters Configuration
-- `./conf/datanode-env.sh` configuration
+#### 3.3.1 Memory Configuration
- | **Configuration** | **Description** | **Default** | **Recommended value** | **Note** |
- | :---------------- | :----------------------------------------------------------- | :---------- | :----------------------------------------------------------- | :---------------------------------- |
- | MEMORY_SIZE | The total amount of memory that IoTDB DataNode nodes can use | - | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect |
+Edit the following files for memory allocation:
-#### General Configuration(./conf/iotdb-system.properties)
+- **ConfigNode**: `./conf/confignode-env.sh` (or `.bat` for Windows)
+- **DataNode**: `./conf/datanode-env.sh` (or `.bat` for Windows)
-- Cluster Configuration
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------ | :--------------------------------- | :---------- | :-------------- | :-------------------------------------- |
+| MEMORY_SIZE | Total memory allocated to the node | Empty | As needed | Effective after restarting the service. |
- | **Configuration** | **Description** | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 |
- | ------------------------- | ------------------------------------------------------------ | -------------- | -------------- | -------------- |
- | cluster_name | Cluster Name | defaultCluster | defaultCluster | defaultCluster |
- | schema_replication_factor | The number of metadata replicas, the number of DataNodes should not be less than this number | 3 | 3 | 3 |
- | data_replication_factor | The number of data replicas should not be less than this number of DataNodes | 2 | 2 | 2 |
+#### 3.3.2 General Configuration
-#### ConfigNode Configuration
+Set the following parameters in `./conf/iotdb-system.properties`. Refer to `./conf/iotdb-system.properties.template` for a complete list.
-| **Configuration** | **Description** | **Default** | **Recommended value** | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 | Note |
-| ------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------------ | ------------- | ------------- | ------------- | ---------------------------------------- |
-| cn_internal_address | The address used by ConfigNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | iotdb-1 | iotdb-2 | iotdb-3 | Cannot be modified after initial startup |
-| cn_internal_port | The port used by ConfigNode for communication within the cluster | 10710 | 10710 | 10710 | 10710 | 10710 | Cannot be modified after initial startup |
-| cn_consensus_port | The port used for ConfigNode replica group consensus protocol communication | 10720 | 10720 | 10720 | 10720 | 10720 | Cannot be modified after initial startup |
-| cn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, `cn_internal_address:cn_internal_port` | 127.0.0.1:10710 | The first CongfigNode's `cn_internal-address: cn_internal_port` | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | Cannot be modified after initial startup |
+**Cluster-Level Parameters**:
-#### Datanode Configuration
+| **Parameter** | **Description** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** |
+| :------------------------ | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- |
+| cluster_name | Name of the cluster | defaultCluster | defaultCluster | defaultCluster |
+| schema_replication_factor | Metadata replication factor; DataNode count shall not be fewer than this value | 3 | 3 | 3 |
+| data_replication_factor | Data replication factor; DataNode count shall not be fewer than this value | 2 | 2 | 2 |
-| **Configuration** | **Description** | **Default** | **Recommended value** | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 | Note |
-| ------------------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------------ | ------------- | ------------- | ------------- | ---------------------------------------- |
-| dn_rpc_address | The address of the client RPC service | 127.0.0.1 | Recommend using the **IPV4 address or hostname** of the server where it is located | iotdb-1 | iotdb-2 | iotdb-3 | Restarting the service takes effect |
-| dn_rpc_port | The port of the client RPC service | 6667 | 6667 | 6667 | 6667 | 6667 | Restarting the service takes effect |
-| dn_internal_address | The address used by DataNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | iotdb-1 | iotdb-2 | iotdb-3 | Cannot be modified after initial startup |
-| dn_internal_port | The port used by DataNode for communication within the cluster | 10730 | 10730 | 10730 | 10730 | 10730 | Cannot be modified after initial startup |
-| dn_mpp_data_exchange_port | The port used by DataNode to receive data streams | 10740 | 10740 | 10740 | 10740 | 10740 | Cannot be modified after initial startup |
-| dn_data_region_consensus_port | The port used by DataNode for data replica consensus protocol communication | 10750 | 10750 | 10750 | 10750 | 10750 | Cannot be modified after initial startup |
-| dn_schema_region_consensus_port | The port used by DataNode for metadata replica consensus protocol communication | 10760 | 10760 | 10760 | 10760 | 10760 | Cannot be modified after initial startup |
-| dn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, i.e. `cn_internal-address: cn_internal_port` | 127.0.0.1:10710 | The first CongfigNode's cn_internal-address: cn_internal_port | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | Cannot be modified after initial startup |
+**ConfigNode Parameters**:
-> ❗️Attention: Editors such as VSCode Remote do not have automatic configuration saving function. Please ensure that the modified files are saved persistently, otherwise the configuration items will not take effect
+| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** |
+| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- |
+| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. |
+| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | 10710 | 10710 | 10710 | This parameter cannot be modified after the first startup. |
+| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | 10720 | 10720 | 10720 | This parameter cannot be modified after the first startup. |
+| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address and port of the seed ConfigNode (e.g., `cn_internal_address:cn_internal_port`) | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. |
-### Start ConfigNode
+**DataNode Parameters**:
-Start the first confignode of IoTDB-1 first, ensuring that the seed confignode node starts first, and then start the second and third confignode nodes in sequence
-
-```Bash
-cd sbin
-
-./start-confignode.sh -d #"- d" parameter will start in the background
-```
+| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** |
+| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- |
+| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. |
+| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | 6667 | 6667 | 6667 | Effective after restarting the service. |
+| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. |
+| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | 10730 | 10730 | 10730 | This parameter cannot be modified after the first startup. |
+| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | 10740 | 10740 | 10740 | This parameter cannot be modified after the first startup. |
+| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | 10750 | 10750 | 10750 | This parameter cannot be modified after the first startup. |
+| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | 10760 | 10760 | 10760 | This parameter cannot be modified after the first startup. |
+| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster.(e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address of the first ConfigNode | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. |
-If the startup fails, please refer to [Common Questions](#common-questions).
+**Note:** Ensure files are saved after editing. Tools like VSCode Remote do not save changes automatically.
-### Start DataNode
+### 3.4 Start ConfigNode Instances
- Enter the `sbin` directory of iotdb and start three datanode nodes in sequence:
+1. Start the first ConfigNode (`iotdb-1`) as the seed node
-```Go
-cd sbin
-
-./start-datanode.sh -d #"- d" parameter will start in the background
-```
-
-### Activate Database
-
-#### Method 1: Activate file copy activation
-
-- After starting three Confignode Datanode nodes in sequence, copy the `activation` folder of each machine and the `system_info` file of each machine to the Timecho staff;
-
-- The staff will return the license files for each ConfigNode Datanode node, where 3 license files will be returned;
+```Bash
+ cd sbin
+ ./start-confignode.sh -d # The "-d" flag starts the process in the background.
+ ```
-- Put the three license files into the `activation` folder of the corresponding ConfigNode node;
+2. Start the remaining ConfigNodes (`iotdb-2` and `iotdb-3`) in sequence.
-#### Method 2: Activate Script Activation
+If the startup fails, refer to the [Common Issues](#5-common-issues) section below for troubleshooting.
-- Retrieve the machine codes of 3 machines in sequence and enter IoTDB CLI
+### 3.5 Start DataNode Instances
- - Table Model CLI Enter Command:
+On each server, navigate to the `sbin` directory and start the DataNode:
- ```SQL
- # Linux or MACOS
- ./start-cli.sh -sql_dialect table
-
- # windows
- ./start-cli.bat -sql_dialect table
+```Bash
+ cd sbin
+ ./start-datanode.sh -d # The "-d" flag starts the process in the background.
```
- - Enter the tree model CLI command:
+### 3.6 Activate the Database
- ```SQL
- # Linux or MACOS
- ./start-cli.sh
-
- # windows
- ./start-cli.bat
- ```
+#### Option 1: File-Based Activation
- - Execute the following to obtain the machine code required for activation:
- - Note: Currently, activation is only supported in tree models
+1. Start all ConfigNodes and DataNodes.
+2. Copy the `system_info` file from the `activation` directory on each server and send them to the Timecho team.
+3. Place the license files provided by the Timecho team into the corresponding `activation` folder for each node.
+#### Option 2: Command-Based Activation
- ```Bash
- show system info
- ```
+1. Enter the IoTDB CLI for each node:
+ - **For Table Model**:
+ ```SQL
+ # For Linux or macOS
+ ./start-cli.sh -sql_dialect table
+
+ # For Windows
+ ./start-cli.bat -sql_dialect table
+ ```
- - The following information is displayed, which shows the machine code of one machine:
+ - **For Tree Model**:
+ ```SQL
+ # For Linux or macOS
+ ./start-cli.sh
+
+ # For Windows
+ ./start-cli.bat
+ ```
+2. Run the following command to retrieve the machine code required for activation:
- ```Bash
- +--------------------------------------------------------------+
- | SystemInfo|
- +--------------------------------------------------------------+
- |01-TE5NLES4-UDDWCMYE,01-GG5NLES4-XXDWCMYE,01-FF5NLES4-WWWWCMYE|
- +--------------------------------------------------------------+
- Total line number = 1
- It costs 0.030s
- ```
+ ```Bash
+ show system info
+ ```
-- The other two nodes enter the CLI of the IoTDB tree model in sequence, execute the statement, and copy the machine codes of the three machines obtained to the Timecho staff
+**Note**: Activation is currently supported only in the Tree Model.
-- The staff will return three activation codes, which normally correspond to the order of the three machine codes provided. Please paste each activation code into the CLI separately, as prompted below:
+3. Copy the returned machine code of each server (displayed as a green string) and send it to the Timecho team:
+
+```Bash
++--------------------------------------------------------------+
+| SystemInfo|
++--------------------------------------------------------------+
+|01-TE5NLES4-UDDWCMYE,01-GG5NLES4-XXDWCMYE,01-FF5NLES4-WWWWCMYE|
++--------------------------------------------------------------+
+Total line number = 1
+It costs 0.030s
+```
- - Note: The activation code needs to be marked with a `'`symbol before and after, as shown in
+4. Enter the activation codes provided by the Timecho team in the CLI in sequence using the following format. Wrap the activation code in single quotes ('):
- ```Bash
- IoTDB> activate '01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA==='
- ```
+```Bash
+IoTDB> activate '01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA==='
+```
-### Verify Activation
+### 3.7 Verify Activation
-When the status of the 'Result' field is displayed as' success', it indicates successful activation
+Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated.

-## Node Maintenance Steps
+## 4 Maintenance
-### ConfigNode Node Maintenance
+### 4.1 ConfigNode Maintenance
-ConfigNode node maintenance is divided into two types of operations: adding and removing ConfigNodes, with two common use cases:
+ConfigNode maintenance includes adding and removing ConfigNodes. Common use cases include:
-- Cluster expansion: For example, when there is only one ConfigNode in the cluster, and you want to increase the high availability of ConfigNode nodes, you can add two ConfigNodes, making a total of three ConfigNodes in the cluster.
+- **Cluster Expansion:** If the cluster contains only 1 ConfigNode, adding 2 more ConfigNodes enhances high availability, resulting in a total of 3 ConfigNodes.
+- **Cluster Fault Recovery:** If a ConfigNode's machine fails and it cannot function normally, remove the faulty ConfigNode and add a new one to the cluster.
-- Cluster failure recovery: When the machine where a ConfigNode is located fails, making the ConfigNode unable to run normally, you can remove this ConfigNode and then add a new ConfigNode to the cluster.
+**Note:** After completing ConfigNode maintenance, ensure that the cluster contains either 1 or 3 active ConfigNodes. Two ConfigNodes do not provide high availability, and more than three ConfigNodes can degrade performance.
-> ❗️Note, after completing ConfigNode node maintenance, you need to ensure that there are 1 or 3 ConfigNodes running normally in the cluster. Two ConfigNodes do not have high availability, and more than three ConfigNodes will lead to performance loss.
+#### 4.1.1 Adding a ConfigNode
-#### Adding ConfigNode Nodes
+**Linux / MacOS :**
-Script command:
-
-```shell
-# Linux / MacOS
-# First switch to the IoTDB root directory
+```Plain
sbin/start-confignode.sh
+```
+
+**Windows:**
-# Windows
-# First switch to the IoTDB root directory
+```Plain
sbin/start-confignode.bat
```
-#### Removing ConfigNode Nodes
+#### 4.1.2 Removing a ConfigNode
-First connect to the cluster through the CLI and confirm the internal address and port number of the ConfigNode you want to remove by using `show confignodes`:
+1. Connect to the cluster using the CLI and confirm the internal address and port of the ConfigNode to be removed:
-```Bash
+```Plain
+show confignodes;
+```
+
+Example output:
+
+```Plain
IoTDB> show confignodes
+------+-------+---------------+------------+--------+
|NodeID| Status|InternalAddress|InternalPort| Role|
@@ -267,48 +265,60 @@ Total line number = 3
It costs 0.030s
```
-Then use the script to remove the DataNode. Script command:
+2. Remove the ConfigNode using the script:
+
+**Linux / MacOS:**
```Bash
-# Linux / MacOS
sbin/remove-confignode.sh [confignode_id]
+# Or:
+sbin/remove-confignode.sh [cn_internal_address:cn_internal_port]
+```
-#Windows
-sbin/remove-confignode.bat [confignode_id]
+**Windows:**
+```Bash
+sbin/remove-confignode.bat [confignode_id]
+# Or:
+sbin/remove-confignode.bat [cn_internal_address:cn_internal_port]
```
-### DataNode Node Maintenance
-
-There are two common scenarios for DataNode node maintenance:
+### 4.2 DataNode Maintenance
-- Cluster expansion: For the purpose of expanding cluster capabilities, add new DataNodes to the cluster
+DataNode maintenance includes adding and removing DataNodes. Common use cases include:
-- Cluster failure recovery: When a machine where a DataNode is located fails, making the DataNode unable to run normally, you can remove this DataNode and add a new DataNode to the cluster
+- **Cluster Expansion:** Add new DataNodes to increase cluster capacity.
+- **Cluster Fault Recovery:** If a DataNode's machine fails and it cannot function normally, remove the faulty DataNode and add a new one to the cluster.
-> ❗️Note, in order for the cluster to work normally, during the process of DataNode node maintenance and after the maintenance is completed, the total number of DataNodes running normally should not be less than the number of data replicas (usually 2), nor less than the number of metadata replicas (usually 3).
+**Note:** During and after DataNode maintenance, ensure that the number of active DataNodes is not fewer than the data replication factor (usually 2) or the schema replication factor (usually 3).
-#### Adding DataNode Nodes
+#### 4.2.1 Adding a DataNode
-Script command:
+**Linux / MacOS:**
-```Bash
-# Linux / MacOS
-# First switch to the IoTDB root directory
+```Plain
sbin/start-datanode.sh
+```
+
+**Windows:**
-# Windows
-# First switch to the IoTDB root directory
+```Plain
sbin/start-datanode.bat
```
-Note: After adding a DataNode, as new writes arrive (and old data expires, if TTL is set), the cluster load will gradually balance towards the new DataNode, eventually achieving a balance of storage and computation resources on all nodes.
+**Note:** After adding a DataNode, the cluster load will gradually balance across all nodes as new writes arrive and old data expires (if TTL is set).
-#### Removing DataNode Nodes
+#### 4.2.2 Removing a DataNode
-First connect to the cluster through the CLI and confirm the RPC address and port number of the DataNode you want to remove with `show datanodes`:
+1. Connect to the cluster using the CLI and confirm the RPC address and port of the DataNode to be removed:
-```Bash
+```Plain
+show datanodes;
+```
+
+Example output:
+
+```Plain
IoTDB> show datanodes
+------+-------+----------+-------+-------------+---------------+
|NodeID| Status|RpcAddress|RpcPort|DataRegionNum|SchemaRegionNum|
@@ -321,80 +331,79 @@ Total line number = 3
It costs 0.110s
```
-Then use the script to remove the DataNode. Script command:
+2. Remove the DataNode using the script:
-```Bash
-# Linux / MacOS
-sbin/remove-datanode.sh [datanode_id]
+**Linux / MacOS:**
-#Windows
-sbin/remove-datanode.bat [datanode_id]
+```Bash
+sbin/remove-datanode.sh [dn_rpc_address:dn_rpc_port]
```
-## Common Questions
-
-1. Multiple prompts indicating activation failure during deployment process
+**Windows:**
- - Use the `ls -al` command: Use the `ls -al` command to check if the owner information of the installation package root directory is the current user.
-
- - Check activation directory: Check all files in the `./activation` directory and whether the owner information is the current user.
-
-2. Confignode failed to start
-
- Step 1: Please check the startup log to see if any parameters that cannot be changed after the first startup have been modified.
-
- Step 2: Please check the startup log for any other abnormalities. If there are any abnormal phenomena in the log, please contact Timecho Technical Support personnel for consultation on solutions.
-
- Step 3: If it is the first deployment or data can be deleted, you can also clean up the environment according to the following steps, redeploy, and restart.
+```Bash
+sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port]
+```
- Step 4: Clean up the environment:
+## 5 Common Issues
- a. Terminate all ConfigNode Node and DataNode processes.
+1. Activation Fails Repeatedly
+ - Use the `ls -al` command to verify that the ownership of the installation directory matches the current user.
+ - Check the ownership of all files in the `./activation` directory to ensure they belong to the current user.
- ```Bash
- # 1. Stop the ConfigNode and DataNode services
- sbin/stop-standalone.sh
-
- # 2. Check for any remaining processes
- jps
- # Or
- ps -ef|gerp iotdb
+2. ConfigNode Fails to Start
+ 1. Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed.
+ 2. Check the logs for any other errors. If unresolved, contact technical support for assistance.
+ 3. If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps:
- # 3. If there are any remaining processes, manually kill the
- kill -9
- # If you are sure there is only one iotdb on the machine, you can use the following command to clean up residual processes
- ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9
- ```
-
- b. Delete the data and logs directories.
-
- Explanation: Deleting the data directory is necessary, deleting the logs directory is for clean logs and is not mandatory.
-
- ```Bash
- cd /data/iotdb
- rm -rf data logs
- ```
-
-## Appendix
-
-### Introduction to Configuration Node Parameters
-
-| Parameter | Description | Is it required |
-| :-------- | :---------------------------------------------- | :------------- |
-| -d | Start in daemon mode, running in the background | No |
-
-### Introduction to Datanode Node Parameters
-
-| Abbreviation | Description | Is it required |
-| :----------- | :----------------------------------------------------------- | :------------- |
-| -v | Show version information | No |
-| -f | Run the script in the foreground, do not put it in the background | No |
-| -d | Start in daemon mode, i.e. run in the background | No |
-| -p | Specify a file to store the process ID for process management | No |
-| -c | Specify the path to the configuration file folder, the script will load the configuration file from here | No |
-| -g | Print detailed garbage collection (GC) information | No |
-| -H | Specify the path of the Java heap dump file, used when JVM memory overflows | No |
-| -E | Specify the path of the JVM error log file | No |
-| -D | Define system properties, in the format key=value | No |
-| -X | Pass -XX parameters directly to the JVM | No |
-| -h | Help instruction | No |
\ No newline at end of file
+ **Clean the Environment**
+
+ 1. Stop all ConfigNode and DataNode processes:
+ ```Bash
+ sbin/stop-standalone.sh
+ ```
+
+ 2. Check for any remaining processes:
+ ```Bash
+ jps
+ # or
+ ps -ef | grep iotdb
+ ```
+
+ 3. If processes remain, terminate them manually:
+ ```Bash
+ kill -9
+
+ #For systems with a single IoTDB instance, you can clean up residual processes with:
+ ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9
+ ```
+
+ 4. Delete the `data` and `logs` directories:
+ ```Bash
+ cd /data/iotdb
+ rm -rf data logs
+ ```
+
+## 6 Appendix
+
+### 6.1 ConfigNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :---------------------------------------------------------- | :------- |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+
+### 6.2 DataNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :----------------------------------------------------------- | :------- |
+| -v | Displays version information. | No |
+| -f | Runs the script in the foreground without backgrounding it. | No |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+| -p | Specifies a file to store the process ID for process management. | No |
+| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No |
+| -g | Prints detailed garbage collection (GC) information. | No |
+| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No |
+| -E | Specifies the file for JVM error logs. | No |
+| -D | Defines system properties in the format `key=value`. | No |
+| -X | Passes `-XX` options directly to the JVM. | No |
+| -h | Displays the help instructions. | No |
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources.md
index 374b03e2f..2ca497e6e 100644
--- a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources.md
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources.md
@@ -19,7 +19,7 @@
-->
# Database Resources
-## CPU
+## 1 CPU
@@ -28,41 +28,41 @@
| Number of nodes |
- | standalone mode |
- Double active |
+ standalone |
+ Dual-Active |
Distributed |
| Within 100000 |
- 2core-4core |
+ 2-4 cores |
1 |
2 |
3 |
| Within 300000 |
- 4core-8core |
+ 4-8 cores |
1 |
2 |
3 |
| Within 500000 |
- 8core-26core |
+ 8-16 cores |
1 |
2 |
3 |
| Within 1000000 |
- 16core-32core |
+ 16-32 cores |
1 |
2 |
3 |
| Within 2000000 |
- 32core-48core |
+ 32-48 cores |
1 |
2 |
3 |
@@ -81,7 +81,7 @@
-## Memory
+## 2 Memory
@@ -90,41 +90,41 @@
| Number of nodes |
- | standalone mode |
- Double active |
+ standalone |
+ Dual-Active |
Distributed |
| Within 100000 |
- 4G-8G |
+ 4-8G |
1 |
2 |
3 |
| Within 300000 |
- 12G-32G |
+ 12-32G |
1 |
2 |
3 |
| Within 500000 |
- 24G-48G |
+ 24-48G |
1 |
2 |
3 |
| Within 1000000 |
- 32G-96G |
+ 32-96G |
1 |
2 |
3 |
| Within 2000000 |
- 64G-128G |
+ 64-128G |
1 |
2 |
3 |
@@ -143,19 +143,23 @@
-## Storage (Disk)
-### Storage space
-Calculation formula: Number of measurement points * Sampling frequency (Hz) * Size of each data point (Byte, different data types may vary, see table below) * Storage time (seconds) * Number of copies (usually 1 copy for a single node and 2 copies for a cluster) ÷ Compression ratio (can be estimated at 5-10 times, but may be higher in actual situations)
+## 3 Storage (Disk)
+### 3.1 Storage space
+Calculation Formula:
+
+```Plain
+Storage Space = Number of Measurement Points * Sampling Frequency (Hz) * Size of Each Data Point (Bytes, see the table below) * Storage Duration * Replication Factor / Compression Ratio
+```
+
+Data Point Size Calculation Table:
+
- | Data point size calculation |
-
-
- | data type |
+ Data Type |
Timestamp (Bytes) |
Value (Bytes) |
- Total size of data points (in bytes)
+ | Total Data Point Size (Bytes)
|
@@ -165,36 +169,48 @@ Calculation formula: Number of measurement points * Sampling frequency (Hz) * Si
| 9 |
- | INT32/FLOAT |
+ INT32 / FLOAT (Single Precision) |
8 |
4 |
12 |
- | INT64/DOUBLE |
+ INT64 / DOUBLE (Double Precision) |
8 |
8 |
16 |
- | TEXT |
+ TEXT (String) |
8 |
- The average is a |
+ Average = a |
8+a |
+Example:
+
+- Scenario: 1,000 devices, 100 measurement points per device, i.e. 100,000 sequences in total. Data type is INT32. Sampling frequency is 1Hz (once per second). Storage duration is 1 year. Replication factor is 3.
+- Full Calculation:
+ ```Plain
+ 1,000 devices * 100 measurement points * 12 bytes per data point * 86,400 seconds per day * 365 days per year * 3 replicas / 10 compression ratio = 11 TB
+ ```
+- Simplified Calculation:
+ ```Plain
+ 1,000 * 100 * 12 * 86,400 * 365 * 3 / 10 = 11 TB
+ ```
+### 3.2 Storage Configuration
+
+- For systems with > 10 million measurement points or high query loads, SSD is recommended.
+
+## 4 Network (NIC)
+When the write throughput does not exceed 10 million points per second, a gigabit network card is required. When the write throughput exceeds 10 million points per second, a 10-gigabit network card is required.
+
+| **Write** **Throughput** **(Data Points/Second)** | **NIC** **Speed** |
+| ------------------------------------------------- | -------------------- |
+| < 10 million | 1 Gbps (Gigabit) |
+| ≥ 10 million | 10 Gbps (10 Gigabit) |
+
+## 5 Additional Notes
-Example: 1000 devices, each with 100 measurement points, a total of 100000 sequences, INT32 type. Sampling frequency 1Hz (once per second), storage for 1 year, 3 copies.
-- Complete calculation formula: 1000 devices * 100 measurement points * 12 bytes per data point * 86400 seconds per day * 365 days per year * 3 copies / 10 compression ratio / 1024 / 1024 / 1024 / 1024 =11T
-- Simplified calculation formula: 1000 * 100 * 12 * 86400 * 365 * 3 / 10 / 1024 / 1024 / 1024 / 1024 =11T
-### Storage Configuration
-If the number of nodes is over 10000000 or the query load is high, it is recommended to configure SSD
-## Network (Network card)
-If the write throughput does not exceed 10 million points/second, configure 1Gbps network card. When the write throughput exceeds 10 million points per second, a 10Gbps network card needs to be configured.
-| **Write throughput (data points per second)** | **NIC rate** |
-| ------------------- | ------------- |
-| <10 million | 1Gbps |
-| >=10 million | 10Gbps |
-## Other instructions
-IoTDB has the ability to scale up clusters in seconds, and expanding node data does not require migration. Therefore, you do not need to worry about the limited cluster capacity estimated based on existing data. In the future, you can add new nodes to the cluster when you need to scale up.
\ No newline at end of file
+- IoTDB supports second-level cluster scaling . Data migration is not required when adding new nodes, so there is no need to worry about limited cluster capacity based on current data estimates. You can add new nodes to the cluster when scaling is needed in the future.
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Deployment-form_apache.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Deployment-form_apache.md
new file mode 100644
index 000000000..29ee83861
--- /dev/null
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Deployment-form_apache.md
@@ -0,0 +1,52 @@
+
+# Deployment-Mode_apache
+
+IoTDB has two operation modes: standalone mode and cluster mode.
+
+## 1 Standalone Mode
+
+An IoTDB standalone instance includes 1 ConfigNode and 1 DataNode, referred to as 1C1D.
+
+- **Features**: Easy for developers to install and deploy, with lower deployment and maintenance costs, and convenient operation.
+- **Applicable scenarios**: Situations with limited resources or where high availability is not a critical requirement, such as edge servers.
+- **Deployment method**:[Stand-Alone Deployment](../Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md)
+
+
+## 2 Cluster Mode
+
+An IoTDB cluster instance consists of 3 ConfigNodes and no fewer than 3 DataNodes, typically 3 DataNodes, referred to as 3C3D. In the event of partial node failures, the remaining nodes can still provide services, ensuring high availability of the database service, and the database performance can be improved with the addition of nodes.
+
+- **Features**: High availability and scalability, with the ability to enhance system performance by adding DataNodes.
+- **Applicable scenarios**: Enterprise-level application scenarios that require high availability and reliability.
+- **Deployment method**: [Cluster Deployment](../Deployment-and-Maintenance/Cluster-Deployment_apache.md)
+
+## 3 Summary of Features
+
+| **Dimension** | **Stand-Alone Mode** | **Cluster Mode** |
+| :-------------------------- | :----------------------------------------------------- | :----------------------------------------------------------- |
+| Applicable Scenario | Edge deployment, low requirement for high availability | High-availability business, disaster recovery scenarios, etc. |
+| Number of Machines Required | 1 | ≥3 |
+| Security and Reliability | Cannot tolerate single-point failure | High, can tolerate single-point failure |
+| Scalability | Scalable by adding DataNodes to improve performance | Scalable by adding DataNodes to improve performance |
+| Performance | Scalable with the number of DataNodes | Scalable with the number of DataNodes |
+
+- The deployment steps for standalone mode and cluster mode are similar (adding ConfigNodes and DataNodes one by one), with the only differences being the number of replicas and the minimum number of nodes required to provide services.
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Deployment-form_timecho.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Deployment-form_timecho.md
new file mode 100644
index 000000000..90230e5c5
--- /dev/null
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Deployment-form_timecho.md
@@ -0,0 +1,63 @@
+
+# Deployment-Mode_apache
+
+IoTDB has two operation modes: standalone mode and cluster mode.
+
+## 1 Standalone Mode
+
+An IoTDB standalone instance includes 1 ConfigNode and 1 DataNode, i.e., 1C1D.
+
+- **Features**: Easy for developers to install and deploy, with low deployment and maintenance costs and convenient operations.
+- **Use Cases**: Scenarios with limited resources or low high-availability requirements, such as edge servers.
+- **Deployment Method**: [Stand-Alone Deployment](../Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md)
+
+## 2 Dual-Active Mode
+
+Dual-Active Deployment is a feature of TimechoDB, where two independent instances synchronize bidirectionally and can provide services simultaneously. If one instance stops and restarts, the other instance will resume data transfer from the breakpoint.
+
+> An IoTDB Dual-Active instance typically consists of 2 standalone nodes, i.e., 2 sets of 1C1D. Each instance can also be a cluster.
+
+- **Features**: The high-availability solution with the lowest resource consumption.
+- **Use Cases**: Scenarios with limited resources (only two servers) but requiring high availability.
+- **Deployment Method**: [Dual-Active Deployment](../Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md)
+
+## 3 Cluster Mode
+
+An IoTDB cluster instance consists of 3 ConfigNodes and no fewer than 3 DataNodes, typically 3 DataNodes, i.e., 3C3D. If some nodes fail, the remaining nodes can still provide services, ensuring high availability of the database. Performance can be improved by adding DataNodes.
+
+- **Features**: High availability, high scalability, and improved system performance by adding DataNodes.
+- **Use Cases**: Enterprise-level application scenarios requiring high availability and reliability.
+- **Deployment Method**: [Cluster Deployment](../Deployment-and-Maintenance/Cluster-Deployment_timecho.md)
+
+
+
+## 4 Feature Summary
+
+| **Dimension** | **Stand-Alone Mode** | **Dual-Active Mode** | **Cluster Mode** |
+| :-------------------------- | :------------------------------------------------------- | :------------------------------------------------------ | :------------------------------------------------------ |
+| Use Cases | Edge-side deployment, low high-availability requirements | High-availability services, disaster recovery scenarios | High-availability services, disaster recovery scenarios |
+| Number of Machines Required | 1 | 2 | ≥3 |
+| Security and Reliability | Cannot tolerate single-point failure | High, can tolerate single-point failure | High, can tolerate single-point failure |
+| Scalability | Can expand DataNodes to improve performance | Each instance can be scaled as needed | Can expand DataNodes to improve performance |
+| Performance | Can scale with the number of DataNodes | Same as one of the instances | Can scale with the number of DataNodes |
+
+- The deployment steps for Stand-Alone Mode and Cluster Mode are similar (adding ConfigNodes and DataNodes one by one), with differences only in the number of replicas and the minimum number of nodes required to provide services.
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Docker-Deployment_apache.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Docker-Deployment_apache.md
index 048c3e0d8..176db37f8 100644
--- a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Docker-Deployment_apache.md
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Docker-Deployment_apache.md
@@ -20,43 +20,44 @@
-->
# Docker Deployment
-## Environmental Preparation
+## 1 Environment Preparation
-### Docker Installation
+### 1.1 Install Docker
-```SQL
-#Taking Ubuntu as an example, other operating systems can search for installation methods themselves
-#step1: Install some necessary system tools
+```Bash
+#Taking Ubuntu as an example. For other operating systems, you can search for installation methods on your own.
+#step1: Install necessary system tools
sudo apt-get update
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
#step2: Install GPG certificate
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
-#step3: Write software source information
+#step3: Add the software source
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
#step4: Update and install Docker CE
sudo apt-get -y update
sudo apt-get -y install docker-ce
-#step5: Set Docker to start automatically upon startup
+#step5: Set Docker to start automatically on boot
sudo systemctl enable docker
-#step6: Verify if Docker installation is successful
-docker --version #Display version information, indicating successful installation
+#step6: Verify if Docker is installed successfully
+docker --version #Display version information, indicating successful installation.
```
-### Docker-compose Installation
+### 1.2 Install Docker Compose
-```SQL
+```Bash
#Installation command
curl -L "https://github.com/docker/compose/releases/download/v2.20.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
-#Verify if the installation was successful
-docker-compose --version #Displaying version information indicates successful installation
+#Verify the installation
+docker-compose --version #Display version information, indicating successful installation.
```
-## Stand-Alone Deployment
+## 2 Stand-Alone Deployment
This section demonstrates how to deploy a standalone Docker version of 1C1D.
+
### Pull Image File
The Docker image of Apache IoTDB has been uploaded tohttps://hub.docker.com/r/apache/iotdb。
@@ -75,31 +76,29 @@ docker images

-### Create Docker Bridge Network
+### 2.2 Create a Docker Bridge Network
```Bash
docker network create --driver=bridge --subnet=172.18.0.0/16 --gateway=172.18.0.1 iotdb
```
-### Write The Yml File For Docker-Compose
+### 2.3 Write the Docker-Compose YML File
-Here we take the example of consolidating the IoTDB installation directory and yml files in the/docker iotdb folder:
+Assume the IoTDB installation directory and the YML file are placed under the `/docker-iotdb` folder. The directory structure is as follows:`docker-iotdb/iotdb`, `/docker-iotdb/docker-compose-standalone.yml`
-The file directory structure is:`/docker-iotdb/iotdb`, `/docker-iotdb/docker-compose-standalone.yml `
-
-```bash
+```Bash
docker-iotdb:
├── iotdb #Iotdb installation directory
│── docker-compose-standalone.yml #YML file for standalone Docker Composer
```
-The complete docker-compose-standalone.yml content is as follows:
+The complete content of `docker-compose-standalone.yml` is as follows:
-```bash
+```Bash
version: "3"
services:
iotdb-service:
- image: apache/iotdb:1.3.2-standalone #The image used
+ image: iotdb-enterprise:1.3.2.3-standalone #The image used
hostname: iotdb
container_name: iotdb
restart: always
@@ -120,8 +119,11 @@ services:
- dn_seed_config_node=iotdb:10710
privileged: true
volumes:
+ - ./iotdb/activation:/iotdb/activation
- ./iotdb/data:/iotdb/data
- ./iotdb/logs:/iotdb/logs
+ - /usr/sbin/dmidecode:/usr/sbin/dmidecode:ro
+ - /dev/mem:/dev/mem:ro
networks:
iotdb:
ipv4_address: 172.18.0.6
@@ -130,16 +132,17 @@ networks:
external: true
```
-### Start IoTDB
+### 2.4 First Startup
Use the following command to start:
-```bash
+```Bash
cd /docker-iotdb
-docker-compose -f docker-compose-standalone.yml up -d #Background startup
+docker-compose -f docker-compose-standalone.yml up
```
-### Validate Deployment
+
+### 2.5 Verify the Deployment
- Viewing the log, the following words indicate successful startup
@@ -172,17 +175,18 @@ You can see that all services are running and the activation status shows as act

-### Map/conf Directory (optional)
+### 2.6 Map the `/conf` Directory (Optional)
-If you want to directly modify the configuration file in the physical machine in the future, you can map the/conf folder in the container in three steps:
-Step 1: Copy the /conf directory from the container to `/docker-iotdb/iotdb/conf`
+If you want to modify configuration files directly on the physical machine, you can map the `/conf` folder from the container. Follow these steps:
-```bash
+**Step 1**: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf`:
+
+```Bash
docker cp iotdb:/iotdb/conf /docker-iotdb/iotdb/conf
```
-Step 2: Add mappings in docker-compose-standalone.yml
+**Step 2**: Add the mapping in `docker-compose-standalone.yml`:
```bash
volumes:
@@ -191,35 +195,35 @@ Step 2: Add mappings in docker-compose-standalone.yml
- ./iotdb/logs:/iotdb/logs
```
-Step 3: Restart IoTDB
+**Step 3**: Restart IoTDB:
-```bash
+```Bash
docker-compose -f docker-compose-standalone.yml up -d
```
-## Cluster Deployment
+## 3 Cluster Deployment
-This section describes how to manually deploy an instance that includes 3 Config Nodes and 3 Data Nodes, commonly known as a 3C3D cluster.
+This section describes how to manually deploy a cluster consisting of 3 ConfigNodes and 3 DataNodes, commonly referred to as a 3C3D cluster.
-**Note: The cluster version currently only supports host and overlay networks, and does not support bridge networks.**
+**Note: The cluster version currently only supports host and overlay networks, and does not support bridge networks.**
-Taking the host network as an example, we will demonstrate how to deploy a 3C3D cluster.
+Below, we demonstrate how to deploy a 3C3D cluster using the host network as an example.
-### Set Host Name
+### 3.1 Set Hostnames
-Assuming there are currently three Linux servers, the IP addresses and service role assignments are as follows:
+Assume there are 3 Linux servers with the following IP addresses and service roles:
-| Node IP | Host Name | Service |
-| ----------- | --------- | -------------------- |
-| 192.168.1.3 | iotdb-1 | ConfigNode、DataNode |
-| 192.168.1.4 | iotdb-2 | ConfigNode、DataNode |
-| 192.168.1.5 | iotdb-3 | ConfigNode、DataNode |
+| Node IP | Hostname | Services |
+| :---------- | :------- | :------------------- |
+| 192.168.1.3 | iotdb-1 | ConfigNode, DataNode |
+| 192.168.1.4 | iotdb-2 | ConfigNode, DataNode |
+| 192.168.1.5 | iotdb-3 | ConfigNode, DataNode |
-Configure the host names on three machines separately. To set the host names, configure `/etc/hosts` on the target server using the following command:
+On each of the 3 machines, configure the hostnames by editing the `/etc/hosts` file. Use the following commands:
```Bash
echo "192.168.1.3 iotdb-1" >> /etc/hosts
@@ -227,7 +231,7 @@ echo "192.168.1.4 iotdb-2" >> /etc/hosts
echo "192.168.1.5 iotdb-3" >> /etc/hosts
```
-### Pull Image File
+### 3.2 Load the Image File
The Docker image of Apache IoTDB has been uploaded tohttps://hub.docker.com/r/apache/iotdb。
@@ -245,24 +249,22 @@ docker images

-### Write The Yml File For Docker Compose
+### 3.3. Write the Docker-Compose YML Files
-Here we take the example of consolidating the IoTDB installation directory and yml files in the `/docker-iotdb` folder:
+Here, we assume the IoTDB installation directory and YML files are placed under the `/docker-iotdb` folder. The directory structure is as follows:
-The file directory structure is :`/docker-iotdb/iotdb`, `/docker-iotdb/confignode.yml`,`/docker-iotdb/datanode.yml`
-
-```SQL
+```Bash
docker-iotdb:
-├── confignode.yml #Yml file of confignode
-├── datanode.yml #Yml file of datanode
-└── iotdb #IoTDB installation directory
+├── confignode.yml #ConfigNode YML file
+├── datanode.yml #DataNode YML file
+└── iotdb #IoTDB installation directory
```
-On each server, two yml files need to be written, namely confignnode. yml and datanode. yml. The example of yml is as follows:
+On each server, create two YML files: `confignode.yml` and `datanode.yml`. Examples are provided below:
**confignode.yml:**
-```bash
+```Bash
#confignode.yml
version: "3"
services:
@@ -291,7 +293,7 @@ services:
**datanode.yml:**
-```bash
+```Bash
#datanode.yml
version: "3"
services:
@@ -324,29 +326,31 @@ services:
network_mode: "host" #Using the host network
```
-### Starting Confignode For The First Time
+### 3.4 Start ConfigNode for the First Time
-First, start configNodes on each of the three servers to obtain the machine code. Pay attention to the startup order, start the first iotdb-1 first, then start iotdb-2 and iotdb-3.
+Start the ConfigNode on all 3 servers. **Note the startup order**: Start `iotdb-1` first, followed by `iotdb-2` and `iotdb-3`.
-```bash
+Run the following command on each server:
+
+```Bash
cd /docker-iotdb
docker-compose -f confignode.yml up -d #Background startup
```
-### Start Datanode
+### 3.5 Start DataNode
-Start datanodes on 3 servers separately
+Start the DataNode on all 3 servers:
-```SQL
+```Bash
cd /docker-iotdb
docker-compose -f datanode.yml up -d #Background startup
```

-### Validate Deployment
+### 3.6 Verify Deployment
-- Viewing the logs, the following words indicate that the datanode has successfully started
+- Check the logs: If you see the following message, the DataNode has started successfully.
```SQL
docker logs -f iotdb-datanode #View log command
@@ -355,7 +359,7 @@ docker-compose -f datanode.yml up -d #Background startup

-- Enter any container to view the service running status and activation information
+- Enter the container and check the service status:
View the launched container
@@ -365,7 +369,7 @@ docker-compose -f datanode.yml up -d #Background startup

- Enter the container, log in to the database through CLI, and use the `show cluster` command to view the service status and activation status
+ Enter any container, log in to the database via CLI, and use the `show cluster` command to check the service status:
```SQL
docker exec -it iotdb-datanode /bin/bash #Entering the container
@@ -373,23 +377,23 @@ docker-compose -f datanode.yml up -d #Background startup
IoTDB> show cluster #View status
```
- You can see that all services are running and the activation status shows as activated.
+ If all services are in the `running` state, the IoTDB deployment is successful.

-### Map/conf Directory (optional)
+### 3.7 Map the `/conf` Directory (Optional)
-If you want to directly modify the configuration file in the physical machine in the future, you can map the/conf folder in the container in three steps:
+If you want to modify configuration files directly on the physical machine, you can map the `/conf` folder from the container. Follow these steps:
-Step 1: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf` on each of the three servers
+**Step 1**: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf` on all 3 servers:
-```bash
+```Bash
docker cp iotdb-confignode:/iotdb/conf /docker-iotdb/iotdb/conf
or
docker cp iotdb-datanode:/iotdb/conf /docker-iotdb/iotdb/conf
```
-Step 2: Add `/conf` directory mapping in `confignode.yml` and `datanode. yml` on 3 servers
+**Step 2**: Add the `/conf` directory mapping in both `confignode.yml` and `datanode.yml` on all 3 servers:
```bash
#confignode.yml
@@ -407,7 +411,7 @@ Step 2: Add `/conf` directory mapping in `confignode.yml` and `datanode. yml` on
- /dev/mem:/dev/mem:ro
```
-Step 3: Restart IoTDB on 3 servers
+**Step 3**: Restart IoTDB on all 3 servers:
```bash
cd /docker-iotdb
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md
index 4aec6d8ee..608bc363c 100644
--- a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md
@@ -20,68 +20,66 @@
-->
# Docker Deployment
-## Environmental Preparation
+## 1 Environment Preparation
-### Docker Installation
+### 1.1 Install Docker
```Bash
-#Taking Ubuntu as an example, other operating systems can search for installation methods themselves
-#step1: Install some necessary system tools
+#Taking Ubuntu as an example. For other operating systems, you can search for installation methods on your own.
+#step1: Install necessary system tools
sudo apt-get update
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
#step2: Install GPG certificate
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
-#step3: Write software source information
+#step3: Add the software source
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
#step4: Update and install Docker CE
sudo apt-get -y update
sudo apt-get -y install docker-ce
-#step5: Set Docker to start automatically upon startup
+#step5: Set Docker to start automatically on boot
sudo systemctl enable docker
-#step6: Verify if Docker installation is successful
-docker --version #Display version information, indicating successful installation
+#step6: Verify if Docker is installed successfully
+docker --version #Display version information, indicating successful installation.
```
-### Docker-compose Installation
+### 1.2 Install Docker Compose
```Bash
#Installation command
curl -L "https://github.com/docker/compose/releases/download/v2.20.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
-#Verify if the installation was successful
-docker-compose --version #Displaying version information indicates successful installation
+#Verify the installation
+docker-compose --version #Display version information, indicating successful installation.
```
-### Install The Dmidecode Plugin
+### 1.3 Install dmidecode
-By default, Linux servers should already be installed. If not, you can use the following command to install them.
+By default, Linux servers should already have dmidecode. If not, you can use the following command to install it.
```Bash
sudo apt-get install dmidecode
```
-After installing dmidecode, search for the installation path: `wherever dmidecode`. Assuming the result is `/usr/sbin/dmidecode`, remember this path as it will be used in the later docker compose yml file.
+After installing `dmidecode`, you can locate its installation path by running:`whereis dmidecode`. Assuming the result is `/usr/sbin/dmidecode`, please remember this path as it will be used in the YML file of Docker Compose later.
-### Get Container Image Of IoTDB
+### 1.4 Obtain the Container Image
-You can contact business or technical support to obtain container images for IoTDB Enterprise Edition.
+For the TimechoDB container image, you can contact the Timecho team to acquire it.
-## Stand-Alone Deployment
+## 2 Stand-Alone Deployment
This section demonstrates how to deploy a standalone Docker version of 1C1D.
-### Load Image File
+### 2.1 Load the Image File
-For example, the container image file name of IoTDB obtained here is: `iotdb-enterprise-1.3.2-3-standalone-docker.tar.gz`
-
-Load image:
+For example, if the IoTDB container image file you obtained is named: `iotdb-enterprise-1.3.2.3-standalone-docker.tar.gz`, use the following command to load the image:
```Bash
docker load -i iotdb-enterprise-1.3.2.3-standalone-docker.tar.gz
```
-View image:
+To view the loaded image, use the following command:
```Bash
docker images
@@ -89,17 +87,15 @@ docker images

-### Create Docker Bridge Network
+### 2.2 Create a Docker Bridge Network
```Bash
docker network create --driver=bridge --subnet=172.18.0.0/16 --gateway=172.18.0.1 iotdb
```
-### Write The Yml File For docker-compose
-
-Here we take the example of consolidating the IoTDB installation directory and yml files in the/docker iotdb folder:
+### 2.3 Write the Docker-Compose YML File
-The file directory structure is:`/docker-iotdb/iotdb`, `/docker-iotdb/docker-compose-standalone.yml `
+Assume the IoTDB installation directory and the YML file are placed under the `/docker-iotdb` folder. The directory structure is as follows:`docker-iotdb/iotdb`, `/docker-iotdb/docker-compose-standalone.yml`
```Bash
docker-iotdb:
@@ -107,7 +103,7 @@ docker-iotdb:
│── docker-compose-standalone.yml #YML file for standalone Docker Composer
```
-The complete docker-compose-standalone.yml content is as follows:
+The complete content of `docker-compose-standalone.yml` is as follows:
```Bash
version: "3"
@@ -147,7 +143,7 @@ networks:
external: true
```
-### First Launch
+### 2.4 First Startup
Use the following command to start:
@@ -156,21 +152,21 @@ cd /docker-iotdb
docker-compose -f docker-compose-standalone.yml up
```
-Due to lack of activation, it is normal to exit directly upon initial startup. The initial startup is to obtain the machine code file for the subsequent activation process.
+Since the system is not activated yet, it will exit immediately after the first startup, which is normal. The purpose of the first startup is to generate the machine code file for the activation process.

-### Apply For Activation
+### 2.5 Apply for Activation
-- After the first startup, a system_info file will be generated in the physical machine directory `/docker-iotdb/iotdb/activation`, and this file will be copied to the Timecho staff.
+- After the first startup, a `system_info` file will be generated in the physical machine directory `/docker-iotdb/iotdb/activation`. Copy this file and send it to the Timecho team.

-- Received the license file returned by the staff, copy the license file to the `/docker iotdb/iotdb/activation` folder.
+- Once you receive the `license` file, copy it to the `/docker-iotdb/iotdb/activation` folder.

-### Restart IoTDB
+### 2.6 Start IoTDB Again
```Bash
docker-compose -f docker-compose-standalone.yml up -d
@@ -178,9 +174,9 @@ docker-compose -f docker-compose-standalone.yml up -d

-### Validate Deployment
+### 2.7 Verify the Deployment
-- Viewing the log, the following words indicate successful startup
+- Check the logs: If you see the following message, the startup is successful.
```Bash
docker logs -f iotdb-datanode #View log command
@@ -189,7 +185,7 @@ docker-compose -f docker-compose-standalone.yml up -d

-- Enter the container to view the service running status and activation information
+- Enter the container and check the service status:
View the launched container
@@ -199,69 +195,67 @@ docker-compose -f docker-compose-standalone.yml up -d

- Enter the container, log in to the database through CLI, and use the `show cluster` command to view the service status and activation status
+ Enter the container, log in to the database through CLI, and use the show cluster command to view the service status and activation status
```Bash
- docker exec -it iotdb /bin/bash #Entering the container
+ docker exec -it iotdb /bin/bash #Enter the container
./start-cli.sh -h iotdb #Log in to the database
- IoTDB> show cluster #View status
+ IoTDB> show cluster #Check the service status
```
- You can see that all services are running and the activation status shows as activated.
+ If all services are in the `running` state, the IoTDB deployment is successful.

-### Map/conf Directory (optional)
+### 2.8 Map the `/conf` Directory (Optional)
-If you want to directly modify the configuration file in the physical machine in the future, you can map the/conf folder in the container in three steps:
+If you want to modify configuration files directly on the physical machine, you can map the `/conf` folder from the container. Follow these steps:
-Step 1: Copy the/conf directory from the container to/docker-iotdb/iotdb/conf
+**Step 1**: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf`:
```Bash
docker cp iotdb:/iotdb/conf /docker-iotdb/iotdb/conf
```
-Step 2: Add mappings in docker-compose-standalone.yml
+**Step 2**: Add the mapping in `docker-compose-standalone.yml`:
```Bash
volumes:
- - ./iotdb/conf:/iotdb/conf #Add mapping for this/conf folder
- - ./iotdb/activation:/iotdb/activation
+ - ./iotdb/conf:/iotdb/conf # Add this mapping for the /conf folder
- ./iotdb/data:/iotdb/data
- ./iotdb/logs:/iotdb/logs
- - /usr/sbin/dmidecode:/usr/sbin/dmidecode:ro
- /dev/mem:/dev/mem:ro
```
-Step 3: Restart IoTDB
+**Step 3**: Restart IoTDB:
```Bash
docker-compose -f docker-compose-standalone.yml up -d
```
-## Cluster Deployment
+## 3 Cluster Deployment
-This section describes how to manually deploy an instance that includes 3 Config Nodes and 3 Data Nodes, commonly known as a 3C3D cluster.
+This section describes how to manually deploy a cluster consisting of 3 ConfigNodes and 3 DataNodes, commonly referred to as a 3C3D cluster.
-**Note: The cluster version currently only supports host and overlay networks, and does not support bridge networks.**
+**Note: The cluster version currently only supports host and overlay networks, and does not support bridge networks.**
-Taking the host network as an example, we will demonstrate how to deploy a 3C3D cluster.
+Below, we demonstrate how to deploy a 3C3D cluster using the host network as an example.
-### Set Host Name
+### 3.1 Set Hostnames
-Assuming there are currently three Linux servers, the IP addresses and service role assignments are as follows:
+Assume there are 3 Linux servers with the following IP addresses and service roles:
-| Node IP | Host Name | Service |
-| ----------- | --------- | -------------------- |
-| 192.168.1.3 | iotdb-1 | ConfigNode、DataNode |
-| 192.168.1.4 | iotdb-2 | ConfigNode、DataNode |
-| 192.168.1.5 | iotdb-3 | ConfigNode、DataNode |
+| Node IP | Hostname | Services |
+| :---------- | :------- | :------------------- |
+| 192.168.1.3 | iotdb-1 | ConfigNode, DataNode |
+| 192.168.1.4 | iotdb-2 | ConfigNode, DataNode |
+| 192.168.1.5 | iotdb-3 | ConfigNode, DataNode |
-Configure the host names on three machines separately. To set the host names, configure `/etc/hosts` on the target server using the following command:
+On each of the 3 machines, configure the hostnames by editing the `/etc/hosts` file. Use the following commands:
```Bash
echo "192.168.1.3 iotdb-1" >> /etc/hosts
@@ -269,17 +263,15 @@ echo "192.168.1.4 iotdb-2" >> /etc/hosts
echo "192.168.1.5 iotdb-3" >> /etc/hosts
```
-### Load Image File
+### 3.2 Load the Image File
-For example, the container image file name obtained for IoTDB is: `iotdb-enterprise-1.3.23-standalone-docker.tar.gz`
-
-Execute the load image command on three servers separately:
+For example, if the IoTDB container image file is named `iotdb-enterprise-1.3.2.3-standalone-docker.tar.gz`, execute the following command on all 3 servers to load the image:
```Bash
docker load -i iotdb-enterprise-1.3.2.3-standalone-docker.tar.gz
```
-View image:
+To view the loaded images, run:
```Bash
docker images
@@ -287,20 +279,18 @@ docker images

-### Write The Yml File For Docker Compose
-
-Here we take the example of consolidating the IoTDB installation directory and yml files in the /docker-iotdb folder:
+### 3.3. Write the Docker-Compose YML Files
-The file directory structure is:/docker-iotdb/iotdb, /docker-iotdb/confignode.yml,/docker-iotdb/datanode.yml
+Here, we assume the IoTDB installation directory and YML files are placed under the `/docker-iotdb` folder. The directory structure is as follows:
```Bash
docker-iotdb:
-├── confignode.yml #Yml file of confignode
-├── datanode.yml #Yml file of datanode
-└── iotdb #IoTDB installation directory
+├── confignode.yml #ConfigNode YML file
+├── datanode.yml #DataNode YML file
+└── iotdb #IoTDB installation directory
```
-On each server, two yml files need to be written, namely confignnode. yml and datanode. yml. The example of yml is as follows:
+On each server, create two YML files: `confignode.yml` and `datanode.yml`. Examples are provided below:
**confignode.yml:**
@@ -366,30 +356,32 @@ services:
network_mode: "host" #Using the host network
```
-### Starting Confignode For The First Time
+### 3.4 Start ConfigNode for the First Time
+
+Start the ConfigNode on all 3 servers. **Note the startup order**: Start `iotdb-1` first, followed by `iotdb-2` and `iotdb-3`.
-First, start configNodes on each of the three servers to obtain the machine code. Pay attention to the startup order, start the first iotdb-1 first, then start iotdb-2 and iotdb-3.
+Run the following command on each server:
```Bash
cd /docker-iotdb
docker-compose -f confignode.yml up -d #Background startup
```
-### Apply For Activation
+### 3.5 Apply for Activation
-- After starting three confignodes for the first time, a system_info file will be generated in each physical machine directory `/docker-iotdb/iotdb/activation`, and the system_info files of the three servers will be copied to the Timecho staff;
+- After starting the 3 ConfigNodes for the first time, a `system_info` file will be generated in the `/docker-iotdb/iotdb/activation` directory on each physical machine. Copy the `system_info` files from all 3 servers and send them to the Timecho team.

-- Put the three license files into the `/docker iotdb/iotdb/activation` folder of the corresponding Configurable Node node;
+- Place the 3 `license` files into the corresponding `/docker-iotdb/iotdb/activation` folders on each ConfigNode server.

-- After the license is placed in the corresponding activation folder, confignode will be automatically activated without restarting confignode
+- Once the `license` files are placed in the `activation` folders, the ConfigNodes will automatically activate. **No restart is required for the ConfigNodes.**
-### Start Datanode
+### 3.6 Start DataNode
-Start datanodes on 3 servers separately
+Start the DataNode on all 3 servers:
```Bash
cd /docker-iotdb
@@ -398,9 +390,9 @@ docker-compose -f datanode.yml up -d #Background startup

-### Validate Deployment
+### 3.7 Verify Deployment
-- Viewing the logs, the following words indicate that the datanode has successfully started
+- Check the logs: If you see the following message, the DataNode has started successfully.
```Bash
docker logs -f iotdb-datanode #View log command
@@ -409,7 +401,7 @@ docker-compose -f datanode.yml up -d #Background startup

-- Enter any container to view the service running status and activation information
+- Enter the container and check the service status:
View the launched container
@@ -419,23 +411,23 @@ docker-compose -f datanode.yml up -d #Background startup

- Enter the container, log in to the database through CLI, and use the `show cluster` command to view the service status and activation status
+ Enter any container, log in to the database via CLI, and use the `show cluster` command to check the service status:
- ```Bash
- docker exec -it iotdb-datanode /bin/bash #Entering the container
- ./start-cli.sh -h iotdb-1 #Log in to the database
- IoTDB> show cluster #View status
- ```
+```Bash
+docker exec -it iotdb-datanode /bin/bash #Entering the container
+./start-cli.sh -h iotdb-1 #Log in to the database
+IoTDB> show cluster #View status
+```
- You can see that all services are running and the activation status shows as activated.
+If all services are in the `running` state, the IoTDB deployment is successful.

-### Map/conf Directory (optional)
+### 3.8 Map the `/conf` Directory (Optional)
-If you want to directly modify the configuration file in the physical machine in the future, you can map the/conf folder in the container in three steps:
+If you want to modify configuration files directly on the physical machine, you can map the `/conf` folder from the container. Follow these steps:
-Step 1: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf` on each of the three servers
+**Step 1**: Copy the `/conf` directory from the container to `/docker-iotdb/iotdb/conf` on all 3 servers:
```Bash
docker cp iotdb-confignode:/iotdb/conf /docker-iotdb/iotdb/conf
@@ -443,7 +435,7 @@ or
docker cp iotdb-datanode:/iotdb/conf /docker-iotdb/iotdb/conf
```
-Step 2: Add `/conf` directory mapping in `confignode.yml` and `datanode. yml` on 3 servers
+**Step 2**: Add the `/conf` directory mapping in both `confignode.yml` and `datanode.yml` on all 3 servers:
```Bash
#confignode.yml
@@ -465,11 +457,10 @@ Step 2: Add `/conf` directory mapping in `confignode.yml` and `datanode. yml` on
- /dev/mem:/dev/mem:ro
```
-Step 3: Restart IoTDB on 3 servers
+**Step 3**: Restart IoTDB on all 3 servers:
```Bash
cd /docker-iotdb
docker-compose -f confignode.yml up -d
docker-compose -f datanode.yml up -d
-```
-
+```
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md
index 1936905cf..e6cb33081 100644
--- a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md
@@ -22,95 +22,100 @@
## What is a double active version?
-Dual active usually refers to two independent machines (or clusters) that perform real-time mirror synchronization. Their configurations are completely independent and can simultaneously receive external writes. Each independent machine (or cluster) can synchronize the data written to itself to another machine (or cluster), and the data of the two machines (or clusters) can achieve final consistency.
+Dual-active mode refers to two independent instances (either standalone or clusters) with completely independent configurations. These instances can simultaneously handle external read and write operations, with real-time bi-directional synchronization and breakpoint recovery capabilities.
-- Two standalone machines (or clusters) can form a high availability group: when one of the standalone machines (or clusters) stops serving, the other standalone machine (or cluster) will not be affected. When the single machine (or cluster) that stopped the service is restarted, another single machine (or cluster) will synchronize the newly written data. Business can be bound to two standalone machines (or clusters) for read and write operations, thereby achieving high availability.
-- The dual active deployment scheme allows for high availability with fewer than 3 physical nodes and has certain advantages in deployment costs. At the same time, the physical supply isolation of two sets of single machines (or clusters) can be achieved through the dual ring network of power and network, ensuring the stability of operation.
-- At present, the dual active capability is a feature of the enterprise version.
+Key features include:
+
+- **Mutual Backup of Instances**: If one instance stops service, the other remains unaffected. When the stopped instance resumes, the other instance will synchronize newly written data. Businesses can bind both instances for read and write operations, achieving high availability.
+- **Cost-Effective Deployment**: The dual-active deployment solution achieves high availability with only two physical nodes, offering cost advantages. Additionally, physical resource isolation for the two instances can be ensured by leveraging dual-ring power and network designs, enhancing operational stability.
+
+**Note:** The dual-active functionality is exclusively available in enterprise-grade TimechoDB.

-## Note
+## Prerequisites
-1. It is recommended to prioritize using `hostname` for IP configuration during deployment to avoid the problem of database failure caused by modifying the host IP in the later stage. To set the hostname, you need to configure `/etc/hosts` on the target server. If the local IP is 192.168.1.3 and the hostname is iotdb-1, you can use the following command to set the server's hostname and configure IoTDB's `cn_internal-address` and` dn_internal-address` using the hostname.
+1. **Hostname Configuration**: It is recommended to prioritize hostname over IP during deployment to avoid issues where the database cannot start due to later changes in the host IP. For instance, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, configure it in `/etc/hosts` using:
- ```Bash
- echo "192.168.1.3 iotdb-1" >> /etc/hosts
- ```
+```Bash
+echo "192.168.1.3 iotdb-1" >> /etc/hosts
+```
-2. Some parameters cannot be modified after the first startup, please refer to the "Installation Steps" section below to set them.
+Use the hostname to configure IoTDB’s `cn_internal_address` and `dn_internal_address`.
-3. Recommend deploying a monitoring panel, which can monitor important operational indicators and keep track of database operation status at any time. The monitoring panel can be obtained by contacting the business department. The steps for deploying the monitoring panel can be referred to [Monitoring Panel Deployment](../Deployment-and-Maintenance/Monitoring-panel-deployment.md)
+2. **Immutable Parameters**: Some parameters cannot be changed after the initial startup. Follow the steps in the "Installation Steps" section to configure them correctly.
-## Installation Steps
+3. **Monitoring Panel**: Deploying a monitoring panel is recommended to monitor key performance indicators and stay informed about the database’s operational status. Contact the Timecho team to obtain the monitoring panel and refer to the corresponding [Monitoring Panel Deployment](../Deployment-and-Maintenance/Monitoring-panel-deployment.md) for deployment steps.
-Taking the dual active version IoTDB built by two single machines A and B as an example, the IP addresses of A and B are 192.168.1.3 and 192.168.1.4, respectively. Here, we use hostname to represent different hosts. The plan is as follows:
+## 3 Installation Steps
-| Machine | Machine IP | Host Name |
-| ------- | ----------- | --------- |
-| A | 192.168.1.3 | iotdb-1 |
-| B | 192.168.1.4 | iotdb-2 |
+This guide uses two standalone nodes, A and B, to deploy the dual-active version of TimechoDB. The IP addresses and hostnames for the nodes are as follows:
-### Step1:Install Two Independent IoTDBs Separately
+| Machine | IP Address | Hostname |
+| ------- | ----------- | -------- |
+| A | 192.168.1.3 | iotdb-1 |
+| B | 192.168.1.4 | iotdb-2 |
-Install IoTDB on two machines separately, and refer to the deployment documentation for the standalone version [Stand-Alone Deployment](../Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md),The deployment document for the cluster version can be referred to [Cluster Deployment](../Deployment-and-Maintenance/Cluster-Deployment_timecho.md)。**It is recommended that the configurations of clusters A and B remain consistent to achieve the best dual active effect**
+### Step1:Install Two Independent TimechoDB Instances
-### Step2:Create A Aata Synchronization Task On Machine A To Machine B
+Install TimechoDB on both machines (A and B) independently. For detailed instructions, refer to the standalone [Stand-Alone Deployment](../Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md)or cluster [Cluster Deployment](../Deployment-and-Maintenance/Cluster-Deployment_timecho.md)deployment guides.
-- Create a data synchronization process on machine A, where the data on machine A is automatically synchronized to machine B. Use the cli tool in the sbin directory to connect to the IoTDB database on machine A:
+Ensure that configurations for A and B are consistent for optimal dual-active performance.
- ```Bash
- ./sbin/start-cli.sh -h iotdb-1
- ```
+### Step2:Configure Data Synchronization from Machine A to Machine B
-- Create and start the data synchronization command with the following SQL:
+- Connect to the database on Machine A using the CLI tool from the `sbin` directory:
- ```Bash
- create pipe AB
- with source (
- 'source.mode.double-living' ='true'
- )
- with sink (
- 'sink'='iotdb-thrift-sink',
- 'sink.ip'='iotdb-2',
- 'sink.port'='6667'
- )
- ```
+```Bash
+./sbin/start-cli.sh -h iotdb-1
+```
-- Note: To avoid infinite data loops, it is necessary to set the parameter `source.mode.double-living` on both A and B to `true`, indicating that data transmitted from another pipe will not be forwarded.
+- Then create and start a data synchronization process. Use the following SQL command:
-### Step3:Create A Data Synchronization Task On Machine B To Machine A
+```Bash
+create pipe AB
+with source (
+ 'source.mode.double-living' ='true'
+with sink (
+ 'sink'='iotdb-thrift-sink',
+ 'sink.ip'='iotdb-2',
+ 'sink.port'='6667'
+)
+```
+
+- **Note:** To avoid infinite data loops, ensure the parameter `source.mode.double-living` is set to `true` on both A and B. This prevents retransmission of data received through the other instance's pipe.
-- Create a data synchronization process on machine B, where the data on machine B is automatically synchronized to machine A. Use the cli tool in the sbin directory to connect to the IoTDB database on machine B
+### Step3:Configure Data Synchronization from Machine B to Machine A
- ```Bash
- ./sbin/start-cli.sh -h iotdb-2
- ```
+- Connect to the database on Machine B:
+
+```Bash
+./sbin/start-cli.sh -h iotdb-2
+```
- Create and start the pipe with the following SQL:
+- Then create and start the synchronization process with the following SQL command:
- ```Bash
- create pipe BA
- with source (
- 'source.mode.double-living' ='true'
- )
- with sink (
- 'sink'='iotdb-thrift-sink',
- 'sink.ip'='iotdb-1',
- 'sink.port'='6667'
- )
- ```
+```Bash
+create pipe BA
+with source (
+'source.mode.double-living' ='true'
+)
+with sink (
+ 'sink'='iotdb-thrift-sink',
+ 'sink.ip'='iotdb-1',
+ 'sink.port'='6667'
+)
+```
-- Note: To avoid infinite data loops, it is necessary to set the parameter `source.mode.double-living` on both A and B to `true` , indicating that data transmitted from another pipe will not be forwarded.
+- **Note:** To avoid infinite data loops, ensure the parameter `source.mode.double-living` is set to `true` on both A and B. This prevents retransmission of data received through the other instance's pipe.
-### Step4:Validate Deployment
+### Step4:Verify Deployment
-After the above data synchronization process is created, the dual active cluster can be started.
+#### Check Cluster Status
-#### Check the running status of the cluster
+Run the `show cluster` command on both nodes to verify the status of the TimechoDB services:
```Bash
-#Execute the show cluster command on two nodes respectively to check the status of IoTDB service
show cluster
```
@@ -122,43 +127,42 @@ show cluster

-Ensure that every Configurable Node and DataNode is in the Running state.
+Ensure all `ConfigNode` and `DataNode` processes are in the `Running` state.
-#### Check synchronization status
+#### Check Synchronization Status
-- Check the synchronization status on machine A
+Use the `show pipes` command on both nodes:
```Bash
show pipes
```
-
+Confirm that all pipes are in the `RUNNING` state:
-- Check the synchronization status on machine B
+On machine A:
-```Bash
-show pipes
-```
+
-
+On machine B:
-Ensure that every pipe is in the RUNNING state.
+
-### Step5:Stop Dual Active Version IoTDB
+### Step5:Stop the Dual-Active Instances
-- Execute the following command on machine A:
+To stop the dual-active instances:
- ```SQL
- ./sbin/start-cli.sh -h iotdb-1 #Log in to CLI
- IoTDB> stop pipe AB #Stop the data synchronization process
- ./sbin/stop-standalone.sh #Stop database service
- ```
+On machine A:
-- Execute the following command on machine B:
+```SQL
+./sbin/start-cli.sh -h iotdb-1 # Log in to CLI
+IoTDB> stop pipe AB # Stop data synchronization
+./sbin/stop-standalone.sh # Stop database service
+```
- ```SQL
- ./sbin/start-cli.sh -h iotdb-2 #Log in to CLI
- IoTDB> stop pipe BA #Stop the data synchronization process
- ./sbin/stop-standalone.sh #Stop database service
- ```
+On machine B:
+```SQL
+./sbin/start-cli.sh -h iotdb-2 # Log in to CLI
+IoTDB> stop pipe BA # Stop data synchronization
+./sbin/stop-standalone.sh # Stop database service
+```
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Environment-Requirements.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Environment-Requirements.md
index 3cd56b38d..a1a168b86 100644
--- a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Environment-Requirements.md
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Environment-Requirements.md
@@ -20,175 +20,182 @@
-->
# System Requirements
-## Disk Array
-
-### Configuration Suggestions
-
-IoTDB has no strict operation requirements on disk array configuration. It is recommended to use multiple disk arrays to store IoTDB data to achieve the goal of concurrent writing to multiple disk arrays. For configuration, refer to the following suggestions:
-
-1. Physical environment
- System disk: You are advised to use two disks as Raid1, considering only the space occupied by the operating system itself, and do not reserve system disk space for the IoTDB
- Data disk:
- Raid is recommended to protect data on disks
- It is recommended to provide multiple disks (1-6 disks) or disk groups for the IoTDB. (It is not recommended to create a disk array for all disks, as this will affect the maximum performance of the IoTDB.)
-2. Virtual environment
- You are advised to mount multiple hard disks (1-6 disks).
-3. When deploying IoTDB, it is recommended to avoid using network storage devices such as NAS.
-
-### Configuration Example
-
-- Example 1: Four 3.5-inch hard disks
-
-Only a few hard disks are installed on the server. Configure Raid5 directly.
-The recommended configurations are as follows:
-| **Use classification** | **Raid type** | **Disk number** | **Redundancy** | **Available capacity** |
-| ----------- | -------- | -------- | --------- | -------- |
-| system/data disk | RAID5 | 4 | 1 | 3 | is allowed to fail|
-
-- Example 2: Twelve 3.5-inch hard disks
-
-The server is configured with twelve 3.5-inch disks.
-Two disks are recommended as Raid1 system disks. The two data disks can be divided into two Raid5 groups. Each group of five disks can be used as four disks.
-The recommended configurations are as follows:
-| **Use classification** | **Raid type** | **Disk number** | **Redundancy** | **Available capacity** |
-| -------- | -------- | -------- | --------- | -------- |
-| system disk | RAID1 | 2 | 1 | 1 |
-| data disk | RAID5 | 5 | 1 | 4 |
-| data disk | RAID5 | 5 | 1 | 4 |
-- Example 3:24 2.5-inch disks
-
-The server is configured with 24 2.5-inch disks.
-Two disks are recommended as Raid1 system disks. The last two disks can be divided into three Raid5 groups. Each group of seven disks can be used as six disks. The remaining block can be idle or used to store pre-write logs.
-The recommended configurations are as follows:
-| **Use classification** | **Raid type** | **Disk number** | **Redundancy** | **Available capacity** |
-| -------- | -------- | -------- | --------- | -------- |
-| system disk | RAID1 | 2 | 1 | 1 |
-| data disk | RAID5 | 7 | 1 | 6 |
-| data disk | RAID5 | 7 | 1 | 6 |
-| data disk | RAID5 | 7 | 1 | 6 |
-| data disk | NoRaid | 1 | 0 | 1 |
-
-## Operating System
-
-### Version Requirements
-
-IoTDB supports operating systems such as Linux, Windows, and MacOS, while the enterprise version supports domestic CPUs such as Loongson, Phytium, and Kunpeng. It also supports domestic server operating systems such as Neokylin, KylinOS, UOS, and Linx.
-
-### Disk Partition
-
-- The default standard partition mode is recommended. LVM extension and hard disk encryption are not recommended.
-- The system disk needs only the space used by the operating system, and does not need to reserve space for the IoTDB.
-- Each disk group corresponds to only one partition. Data disks (with multiple disk groups, corresponding to raid) do not need additional partitions. All space is used by the IoTDB.
-The following table lists the recommended disk partitioning methods.
+## 1 Disk Array
+
+### 1.1 Configuration Suggestions
+
+IoTDB does not have strict operational requirements for disk array configurations. It is recommended to use multiple disk arrays to store IoTDB data to achieve concurrent writing across multiple disk arrays. The following configuration suggestions can be referenced:
+
+1. Physical Environment
+ - System Disk: It is recommended to use 2 disks for RAID1, considering only the space occupied by the operating system itself. No additional space needs to be reserved for IoTDB on the system disk.
+ - Data Disk:
+ - It is recommended to use RAID for data protection at the disk level.
+ - It is recommended to provide multiple disks (around 1-6) or disk groups for IoTDB (avoiding creating a single disk array with all disks, as it may affect IoTDB's performance ceiling).
+2. Virtual Environment
+ - It is recommended to mount multiple hard drives (around 1-6).
+
+### 1.2 Configuration Examples
+
+- Example 1: 4 x 3.5-inch Hard Drives
+
+ - Since the server has fewer installed hard drives, RAID5 can be directly configured without additional settings.
+
+ - Recommended configuration:
+
+ | Classification | RAID Type | Number of Hard Drives | Redundancy | Usable Drives |
+ | :--------------- | :-------- | :-------------------- | :--------------------- | :------------ |
+ | System/Data Disk | RAID5 | 4 | 1 disk failure allowed | 3 |
+
+- Example 2: 12 x 3.5-inch Hard Drives
+
+ - The server is configured with 12 x 3.5-inch hard drives.
+
+ - The first 2 disks are recommended for RAID1 as the system disk. The data disks can be divided into 2 groups of RAID5, with 5 disks in each group (4 usable).
+
+ - Recommended configuration:
+
+ | Classification | RAID Type | Number of Hard Drives | Redundancy | Usable Drives |
+ | :------------- | :-------- | :-------------------- | :--------------------- | :------------ |
+ | System Disk | RAID1 | 2 | 1 disk failure allowed | 1 |
+ | Data Disk | RAID5 | 5 | 1 disk failure allowed | 4 |
+ | Data Disk | RAID5 | 5 | 1 disk failure allowed | 4 |
+
+- Example 3: 24 x 2.5-inch Hard Drives
+
+ - The server is configured with 24 x 2.5-inch hard drives.
+
+ - The first 2 disks are recommended for RAID1 as the system disk. The remaining disks can be divided into 3 groups of RAID5, with 7 disks in each group (6 usable). The last disk can be left idle or used for storing write-ahead logs.
+
+ - Recommended configuration:
+
+ | Usage Classification | RAID Type | Number of Hard Drives | Redundancy | Usable Drives |
+ | :------------------- | :-------- | :-------------------- | :--------------------- | :------------ |
+ | System Disk | RAID1 | 2 | 1 disk failure allowed | 1 |
+ | Data Disk | RAID5 | 7 | 1 disk failure allowed | 6 |
+ | Data Disk | RAID5 | 7 | 1 disk failure allowed | 6 |
+ | Data Disk | RAID5 | 7 | 1 disk failure allowed | 6 |
+ | Data Disk | No RAID | 1 | Data loss if damaged | 1 |
+
+## 2 Operating System
+
+### 2.1 Version Requirements
+
+IoTDB supports operating systems such as Linux, Windows, and MacOS. TimechoDB also supports Chinese CPUs like Loongson, Phytium, and Kunpeng, as well as Chinese operating systems like Kylin, UOS, and NingSi.
+
+### 2.2 Hard Disk Partitioning
+
+- It is recommended to use the default standard partitioning method. LVM expansion and hard disk encryption are not recommended.
+- The system disk only needs to meet the space requirements of the operating system. No additional space needs to be reserved for IoTDB.
+- Each disk group should correspond to a single partition. Data disks (with multiple disk groups corresponding to RAID) do not need additional partitioning, and all space should be allocated to IoTDB.
+
+Recommended disk partitioning is as follows:
+
- | Disk classification |
- Disk set |
- Drive |
- Capacity |
- File system type |
-
+ Hard Disk Classification |
+ Disk Group |
+ Corresponding Drive Letter |
+ Size |
+ File System Type |
+
+
+ | System Disk |
+ Disk Group 0 |
+ /boot |
+ 1GB |
+ Default |
+
+
+ | / |
+ Remaining space of disk group |
+ Default |
+
- | System disk |
- Disk group0 |
- /boot |
- 1GB |
- Acquiesce |
-
-
- | / |
- Remaining space of the disk group |
- Acquiesce |
-
-
- | Data disk |
- Disk set1 |
- /data1 |
- Full space of disk group1 |
- Acquiesce |
-
-
- | Disk set2 |
- /data2 |
- Full space of disk group2 |
- Acquiesce |
-
-
- | ...... |
-
+ Data Disk |
+ Disk Group 1 |
+ /data1 |
+ Entire space of disk group 1 |
+ Default |
+
+
+ | Disk Group 2 |
+ /data2 |
+ Entire space of disk group 2 |
+ Default |
+
+
+ | ...... |
+
-### Network Configuration
-
-1. Disable the firewall
-
-```Bash
-# View firewall
-systemctl status firewalld
-# Disable firewall
-systemctl stop firewalld
-# Disable firewall permanently
-systemctl disable firewalld
-```
-2. Ensure that the required port is not occupied
-
-(1) Check the ports occupied by the cluster: In the default cluster configuration, ConfigNode occupies ports 10710 and 10720, and DataNode occupies ports 6667, 10730, 10740, 10750, 10760, 9090, 9190, and 3000. Ensure that these ports are not occupied. Check methods are as follows:
-
-```Bash
-lsof -i:6667 or netstat -tunp | grep 6667
-lsof -i:10710 or netstat -tunp | grep 10710
-lsof -i:10720 or netstat -tunp | grep 10720
-# If the command outputs, the port is occupied.
-```
-
-(2) Checking the port occupied by the cluster deployment tool: When using the cluster management tool opskit to install and deploy the cluster, enable the SSH remote connection service configuration and open port 22.
-
-```Bash
-yum install openssh-server # Install the ssh service
-systemctl start sshd # Enable port 22
-```
-
-3. Ensure that servers are connected to each other
-
-### Other Configuration
-
-1. Disable the system swap memory
-
-```Bash
-echo "vm.swappiness = 0">> /etc/sysctl.conf
-# The swapoff -a and swapon -a commands are executed together to dump the data in swap back to memory and to empty the data in swap.
-# Do not omit the swappiness setting and just execute swapoff -a; Otherwise, swap automatically opens again after the restart, making the operation invalid.
-swapoff -a && swapon -a
-# Make the configuration take effect without restarting.
-sysctl -p
-# Check memory allocation, expecting swap to be 0
-free -m
-```
-2. Set the maximum number of open files to 65535 to avoid the error of "too many open files".
-
-```Bash
-# View current restrictions
-ulimit -n
-# Temporary changes
-ulimit -n 65535
-# Permanent modification
-echo "* soft nofile 65535" >> /etc/security/limits.conf
-echo "* hard nofile 65535" >> /etc/security/limits.conf
-# View after exiting the current terminal session, expect to display 65535
-ulimit -n
-```
-## Software Dependence
-
-Install the Java runtime environment (Java version >= 1.8). Ensure that jdk environment variables are set. (It is recommended to deploy JDK17 for V1.3.2.2 or later. In some scenarios, the performance of JDK of earlier versions is compromised, and Datanodes cannot be stopped.)
-
-```Bash
-# The following is an example of installing in centos7 using JDK-17:
-tar -zxvf JDk-17_linux-x64_bin.tar # Decompress the JDK file
-Vim ~/.bashrc # Configure the JDK environment
-{ export JAVA_HOME=/usr/lib/jvm/jdk-17.0.9
- export PATH=$JAVA_HOME/bin:$PATH
-} # Add JDK environment variables
-source ~/.bashrc # The configuration takes effect
-java -version # Check the JDK environment
-```
\ No newline at end of file
+
+### 2.3 Network Configuration
+
+1. **Disable Firewall**
+ ```Bash
+ # Check firewall status
+ systemctl status firewalld
+ # Stop firewall
+ systemctl stop firewalld
+ # Permanently disable firewall
+ systemctl disable firewalld
+ ```
+2. **Ensure Required Ports Are Not Occupied**
+ - Cluster Ports: By default, ConfigNode uses ports 10710 and 10720, while DataNode uses ports 6667, 10730, 10740, 10750, 10760, 9090, 9190, and 3000. Ensure these ports are not occupied. Check as follows:
+ ```Bash
+ lsof -i:6667 or netstat -tunp | grep 6667
+ lsof -i:10710 or netstat -tunp | grep 10710
+ lsof -i:10720 or netstat -tunp | grep 10720
+ # If the command outputs anything, the port is occupied.
+ ```
+ - Cluster Deployment Tool Ports: When using the cluster management tool `opskit` for installation and deployment, ensure the SSH remote connection service is configured and port 22 is open.
+ ```Bash
+ yum install openssh-server # Install SSH service
+ systemctl start sshd # Enable port 22
+ ```
+3. Ensure Network Connectivity Between Servers
+
+### 2.4 Other Configurations
+
+1. Disable System Swap Memory
+ ```Bash
+ echo "vm.swappiness = 0" >> /etc/sysctl.conf
+ # Execute both swapoff -a and swapon -a to transfer data from swap back to memory and clear swap data.
+ # Do not omit the swappiness setting and only execute swapoff -a; otherwise, swap will automatically reopen after reboot, rendering the operation ineffective.
+ swapoff -a && swapon -a
+ # Apply the configuration without rebooting.
+ sysctl -p
+ # Check memory allocation; swap should be 0.
+ free -m
+ ```
+2. Set System Maximum Open Files to 65535 to avoid "too many open files" errors.
+ ```Bash
+ # Check current limit
+ ulimit -n
+ # Temporarily modify
+ ulimit -n 65535
+ # Permanently modify
+ echo "* soft nofile 65535" >> /etc/security/limits.conf
+ echo "* hard nofile 65535" >> /etc/security/limits.conf
+ # After exiting the current terminal session, check; it should display 65535.
+ ulimit -n
+ ```
+
+
+
+## 3 Software Dependencies
+
+Install Java Runtime Environment, Java version >= 1.8. Ensure JDK environment variables are set. (For versions V1.3.2.2 and later, it is recommended to directly deploy JDK17. Older JDK versions may have performance issues in some scenarios, and DataNode may fail to stop.)
+
+ ```Bash
+ # Example of installing JDK-17 on CentOS7:
+ tar -zxvf jdk-17_linux-x64_bin.tar # Extract JDK files
+ vim ~/.bashrc # Configure JDK environment
+ {
+ export JAVA_HOME=/usr/lib/jvm/jdk-17.0.9
+ export PATH=$JAVA_HOME/bin:$PATH
+ } # Add JDK environment variables
+ source ~/.bashrc # Apply environment configuration
+ java -version # Check JDK environment
+ ```
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
new file mode 100644
index 000000000..45aeedd4e
--- /dev/null
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
@@ -0,0 +1,47 @@
+
+# Obtain TimechoDB
+
+## 1 How to obtain TimechoDB
+
+The installation package can be directly obtained from the Apache IoTDB official website:https://iotdb.apache.org/Download/
+
+
+## 2 Installation Package Structure
+
+
+Install the package after decompression(`apache-iotdb--all-bin.zip`),After decompressing the installation package, the directory structure is as follows:
+
+| **Catologue** | **Type** | **Description** |
+| :--------------- | :------- | :----------------------------------------------------------- |
+| conf | Folder | Configuration files directory, containing ConfigNode, DataNode, JMX, and logback configuration files. |
+| data | Folder | Default data file directory, containing data files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| lib | Folder | Library files directory. |
+| licenses | Folder | Directory for open-source license certificates. |
+| logs | Folder | Default log file directory, containing log files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| sbin | Folder | Main scripts directory, containing scripts for starting, stopping, and managing the database. |
+| tools | Folder | Tools directory. |
+| ext | Folder | Directory for pipe, trigger, and UDF plugin-related files. |
+| LICENSE | File | Open-source license file. |
+| NOTICE | File | Open-source notice file. |
+| README_ZH.md | File | User manual (Chinese version). |
+| README.md | File | User manual (English version). |
+| RELEASE_NOTES.md | File | Release notes. |
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md
index 57cad838b..261c8a10f 100644
--- a/src/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md
@@ -19,24 +19,28 @@
-->
# Obtain TimechoDB
-## How to obtain TimechoDB
-The enterprise version installation package can be obtained through product trial application or by directly contacting the business personnel who are in contact with you.
-## Installation Package Structure
-The directory structure after unpacking the installation package is as follows:
-| **catalogue** | **Type** | **Explanation** |
-| :--------------: | -------- | ------------------------------------------------------------ |
-| activation | folder | The directory where the activation file is located, including the generated machine code and the enterprise version activation code obtained from the business side (this directory will only be generated after starting ConfigNode to obtain the activation code) |
-| conf | folder | Configuration file directory, including configuration files such as ConfigNode, DataNode, JMX, and logback |
-| data | folder | The default data file directory contains data files for ConfigNode and DataNode. (The directory will only be generated after starting the program) |
-| lib | folder | IoTDB executable library file directory |
-| licenses | folder | Open source community certificate file directory |
-| logs | folder | The default log file directory, which includes log files for ConfigNode and DataNode (this directory will only be generated after starting the program) |
-| sbin | folder | Main script directory, including start, stop, and other scripts |
-| tools | folder | Directory of System Peripheral Tools |
-| ext | folder | Related files for pipe, trigger, and UDF plugins (created by the user when needed) |
-| LICENSE | file | certificate |
-| NOTICE | file | Tip |
-| README_ZH\.md | file | Explanation of the Chinese version in Markdown format |
-| README\.md | file | Instructions for use |
-| RELEASE_NOTES\.md | file | Version Description |
+## 1 How to obtain TimechoDB
+
+The TimechoDB installation package can be obtained through product trial application or by directly contacting the Timecho team.
+
+## 2 Installation Package Structure
+
+After unpacking the installation package(`iotdb-enterprise-{version}-bin.zip`),you will see the directory structure is as follows:
+
+| **Catologue** | **Type** | **Description** |
+| :--------------- | :------- | :----------------------------------------------------------- |
+| activation | Folder | Directory for activation files, including the generated machine code and the TimechoDB activation code obtained from Timecho staff. *(This directory is generated after starting the ConfigNode, enabling you to obtain the activation code.)* |
+| conf | Folder | Configuration files directory, containing ConfigNode, DataNode, JMX, and logback configuration files. |
+| data | Folder | Default data file directory, containing data files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| lib | Folder | Library files directory. |
+| licenses | Folder | Directory for open-source license certificates. |
+| logs | Folder | Default log file directory, containing log files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| sbin | Folder | Main scripts directory, containing scripts for starting, stopping, and managing the database. |
+| tools | Folder | Tools directory. |
+| ext | Folder | Directory for pipe, trigger, and UDF plugin-related files. |
+| LICENSE | File | Open-source license file. |
+| NOTICE | File | Open-source notice file. |
+| README_ZH.md | File | User manual (Chinese version). |
+| README.md | File | User manual (English version). |
+| RELEASE_NOTES.md | File | Release notes. |
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md
index 96b91f273..9bd72b9fc 100644
--- a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md
@@ -20,66 +20,72 @@
-->
# Monitoring Panel Deployment
-The IoTDB monitoring panel is one of the supporting tools for the IoTDB Enterprise Edition. It aims to solve the monitoring problems of IoTDB and its operating system, mainly including operating system resource monitoring, IoTDB performance monitoring, and hundreds of kernel monitoring indicators, in order to help users monitor the health status of the cluster, and perform cluster optimization and operation. This article will take common 3C3D clusters (3 Confignodes and 3 Datanodes) as examples to introduce how to enable the system monitoring module in an IoTDB instance and use Prometheus+Grafana to visualize the system monitoring indicators.
+The monitoring panel is one of the supporting tools for TimechoDB. It aims to solve the monitoring problems of TimechoDB and its operating system, mainly including operating system resource monitoring, TimechoDB performance monitoring, and hundreds of kernel monitoring metrics, in order to help users monitor cluster health, optimize performance, and perform maintenance. This guide demonstrates how to enable the system monitoring module in a TimechoDB instance and visualize monitoring metrics using Prometheus + Grafana, using a typical 3C3D cluster (3 ConfigNodes and 3 DataNodes) as an example.
## Installation Preparation
1. Installing IoTDB: You need to first install IoTDB V1.0 or above Enterprise Edition. You can contact business or technical support to obtain
2. Obtain the IoTDB monitoring panel installation package: Based on the enterprise version of IoTDB database monitoring panel, you can contact business or technical support to obtain
-## Installation Steps
+## 1 Installation Preparation
-### Step 1: IoTDB enables monitoring indicator collection
+1. Installing TimechoDB: Install TimechoDB V1.0 or above. Contact sales or technical support to obtain the installation package.
-1. Open the monitoring configuration item. The configuration items related to monitoring in IoTDB are disabled by default. Before deploying the monitoring panel, you need to open the relevant configuration items (note that the service needs to be restarted after enabling monitoring configuration).
+2. Obtain the monitoring panel installation package: The monitoring panel is exclusive to the enterprise-grade TimechoDB. Contact sales or technical support to obtain it.
-| **Configuration** | Located in the configuration file | **Description** |
-| :--------------------------------- | :-------------------------------- | :----------------------------------------------------------- |
-| cn_metric_reporter_list | conf/iotdb-system.properties | Uncomment the configuration item and set the value to PROMETHEUS |
-| cn_metric_level | conf/iotdb-system.properties | Uncomment the configuration item and set the value to IMPORTANT |
-| cn_metric_prometheus_reporter_port | conf/iotdb-system.properties | Uncomment the configuration item to maintain the default setting of 9091. If other ports are set, they will not conflict with each other |
-| dn_metric_reporter_list | conf/iotdb-system.properties | Uncomment the configuration item and set the value to PROMETHEUS |
-| dn_metric_level | conf/iotdb-system.properties | Uncomment the configuration item and set the value to IMPORTANT |
-| dn_metric_prometheus_reporter_port | conf/iotdb-system.properties | Uncomment the configuration item and set it to 9092 by default. If other ports are set, they will not conflict with each other |
+## 2 Installation Steps
+
+### Step 1: Enable Monitoring Metrics Collection in TimechoDB
+
+1. Enable related configuration options. The configuration options related to monitoring in TimechoDB are disabled by default. Before deploying the monitoring panel, you need to enable certain configuration options (note that the service needs to be restarted after enabling monitoring configuration).
+
+| **Configuration** | **Configuration File** | **Description** |
+| :--------------------------------- | :------------------------------- | :----------------------------------------------------------- |
+| cn_metric_reporter_list | conf/iotdb-confignode.properties | Uncomment the configuration option and set the value to PROMETHEUS |
+| cn_metric_level | conf/iotdb-confignode.properties | Uncomment the configuration option and set the value to IMPORTANT |
+| cn_metric_prometheus_reporter_port | conf/iotdb-confignode.properties | Uncomment the configuration option and keep the default port `9091` or set another port (ensure no conflict) |
+| dn_metric_reporter_list | conf/iotdb-datanode.properties | Uncomment the configuration option and set the value to PROMETHEUS |
+| dn_metric_level | conf/iotdb-datanode.properties | Uncomment the configuration option and set the value to IMPORTANT |
+| dn_metric_prometheus_reporter_port | conf/iotdb-datanode.properties | Uncomment the configuration option and keep the default port `9092` or set another port (ensure no conflict) |
Taking the 3C3D cluster as an example, the monitoring configuration that needs to be modified is as follows:
-| Node IP | Host Name | Cluster Role | Configuration File Path | Configuration |
-| ----------- | --------- | ------------ | -------------------------------- | ------------------------------------------------------------ |
+| Node IP | Host Name | Cluster Role | Configuration File Path | Configuration |
+| ----------- | --------- | ------------ | ---------------------------- | ------------------------------------------------------------ |
| 192.168.1.3 | iotdb-1 | confignode | conf/iotdb-system.properties | cn_metric_reporter_list=PROMETHEUS cn_metric_level=IMPORTANT cn_metric_prometheus_reporter_port=9091 |
| 192.168.1.4 | iotdb-2 | confignode | conf/iotdb-system.properties | cn_metric_reporter_list=PROMETHEUS cn_metric_level=IMPORTANT cn_metric_prometheus_reporter_port=9091 |
| 192.168.1.5 | iotdb-3 | confignode | conf/iotdb-system.properties | cn_metric_reporter_list=PROMETHEUS cn_metric_level=IMPORTANT cn_metric_prometheus_reporter_port=9091 |
-| 192.168.1.3 | iotdb-1 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
-| 192.168.1.4 | iotdb-2 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
-| 192.168.1.5 | iotdb-3 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
+| 192.168.1.3 | iotdb-1 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
+| 192.168.1.4 | iotdb-2 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
+| 192.168.1.5 | iotdb-3 | datanode | conf/iotdb-system.properties | dn_metric_reporter_list=PROMETHEUS dn_metric_level=IMPORTANT dn_metric_prometheus_reporter_port=9092 |
-2. Restart all nodes. After modifying the monitoring indicator configuration of three nodes, the confignode and datanode services of all nodes can be restarted:
+2. Restart all nodes. After modifying the monitoring configurations on all 3 nodes, restart the ConfigNode and DataNode services:
```Bash
-./sbin/stop-standalone.sh #Stop confignode and datanode first
-./sbin/start-confignode.sh -d #Start confignode
-./sbin/start-datanode.sh -d #Start datanode
-```
+ ./sbin/stop-standalone.sh #Stop confignode and datanode first
+ ./sbin/start-confignode.sh -d #Start confignode
+ ./sbin/start-datanode.sh -d #Start datanode
+ ```
-3. After restarting, confirm the running status of each node through the client. If the status is Running, it indicates successful configuration:
+3. After restarting, confirm the running status of each node through the client. If all nodes are running, the configuration is successful.

-### Step 2: Install and configure Prometheus
+### Step 2: Install and Configure Prometheus
-> Taking Prometheus installed on server 192.168.1.3 as an example.
+> In this example, Prometheus is installed on server 192.168.1.3.
-1. Download the Prometheus installation package, which requires installation of V2.30.3 and above. You can go to the Prometheus official website to download it(https://prometheus.io/docs/introduction/first_steps/)
-2. Unzip the installation package and enter the unzipped folder:
+1. Download Prometheus (version 2.30.3 or later). You can download it on Prometheus homepage (https://prometheus.io/docs/introduction/first_steps/)
+2. Unzip the installation package and enter the folder:
```Shell
-tar xvfz prometheus-*.tar.gz
-cd prometheus-*
-```
+ tar xvfz prometheus-*.tar.gz
+ cd prometheus-*
+ ```
-3. Modify the configuration. Modify the configuration file prometheus.yml as follows
- 1. Add configNode task to collect monitoring data for ConfigNode
- 2. Add a datanode task to collect monitoring data for DataNodes
+3. Modify the configuration. Modify the configuration file `prometheus.yml` as follows
+ - Add a confignode job to collect monitoring data for ConfigNode
+ - Add a datanode job to collect monitoring data for DataNodes
```YAML
global:
@@ -102,45 +108,46 @@ scrape_configs:
4. Start Prometheus. The default expiration time for Prometheus monitoring data is 15 days. In production environments, it is recommended to adjust it to 180 days or more to track historical monitoring data for a longer period of time. The startup command is as follows:
```Shell
-./prometheus --config.file=prometheus.yml --storage.tsdb.retention.time=180d
-```
+ ./prometheus --config.file=prometheus.yml --storage.tsdb.retention.time=180d
+ ```
-5. Confirm successful startup. Enter in browser http://192.168.1.3:9090 Go to Prometheus and click on the Target interface under Status. When you see that all States are Up, it indicates successful configuration and connectivity.
+5. Confirm successful startup. Open a browser and navigate to http://192.168.1.3:9090 . Navitage to "Status" -> "Targets". If the states of all targets were up, the configuration is successful.
-6. Clicking on the left link in Targets will redirect you to web monitoring and view the monitoring information of the corresponding node:
+6. Click the links in the `Targets` page to view monitoring information for the respective nodes.

-### Step 3: Install Grafana and configure the data source
+### Step 3: Install Grafana and Configure the Data Source
+
+> n this example, Grafana is installed on server 192.168.1.3.
-> Taking Grafana installed on server 192.168.1.3 as an example.
+1. Download Grafana (version 8.4.2 or later). You can download it on Grafana homepage (https://grafana.com/grafana/download)
-1. Download the Grafana installation package, which requires installing version 8.4.2 or higher. You can go to the Grafana official website to download it(https://grafana.com/grafana/download)
-2. Unzip and enter the corresponding folder
+2. 2. Unzip the installation package and enter the folder:
```Shell
-tar -zxvf grafana-*.tar.gz
-cd grafana-*
-```
+ tar -zxvf grafana-*.tar.gz
+ cd grafana-*
+ ```
3. Start Grafana:
```Shell
-./bin/grafana-server web
-```
+ ./bin/grafana-server web
+ ```
-4. Log in to Grafana. Enter in browser http://192.168.1.3:3000 (or the modified port), enter Grafana, and the default initial username and password are both admin.
+4. Log in to Grafana. Open a browser and navigate to `http://192.168.1.3:3000` (or the modified port). The default initial username and password are both `admin`.
-5. Configure data sources. Find Data sources in Connections, add a new data source, and configure the Data Source to Prometheus
+5. Configure data sources. Navigate to "Connections" -> "Data sources", add a new data source, and add`Prometheus`as data source.

-When configuring the Data Source, pay attention to the URL where Prometheus is located. After configuring it, click on Save&Test and a Data Source is working prompt will appear, indicating successful configuration
+Ensure the URL for Prometheus is correct. Click "Save & Test". If the message "Data source is working" appears, the configuration is successful.

@@ -158,19 +165,19 @@ When configuring the Data Source, pay attention to the URL where Prometheus is l

-4. Select the JSON file of one of the panels in the IoTDB monitoring panel, using the Apache IoTDB ConfigNode Dashboard as an example (refer to the installation preparation section in this article for the monitoring panel installation package):
+4. Choose one of the JSON files (e.g., `Apache IoTDB ConfigNode Dashboard`).

-5. Select Prometheus as the data source and click Import
+5. Choose Prometheus as the data source and click "Import"

-6. Afterwards, you can see the imported Apache IoTDB ConfigNode Dashboard monitoring panel
+6. The imported `Apache IoTDB ConfigNode Dashboard` will now be displayed.

-7. Similarly, we can import the Apache IoTDB DataNode Dashboard Apache Performance Overview Dashboard、Apache System Overview Dashboard, You can see the following monitoring panel:
+7. Similarly, import other dashboards such as `Apache IoTDB DataNode Dashboard`, `Apache Performance Overview Dashboard`, and `Apache System Overview Dashboard`.

@@ -178,503 +185,502 @@ When configuring the Data Source, pay attention to the URL where Prometheus is l
-8. At this point, all IoTDB monitoring panels have been imported and monitoring information can now be viewed at any time.
+8. The IoTDB monitoring panel is now fully imported, and you can view monitoring information at any time.

-## Appendix, Detailed Explanation of Monitoring Indicators
-
-### System Dashboard
-
-This panel displays the current usage of system CPU, memory, disk, and network resources, as well as partial status of the JVM.
-
-#### CPU
-
-- CPU Core:CPU cores
-- CPU Load:
- - System CPU Load:The average CPU load and busyness of the entire system during the sampling time
- - Process CPU Load:The proportion of CPU occupied by the IoTDB process during sampling time
-- CPU Time Per Minute:The total CPU time of all processes in the system per minute
-
-#### Memory
-
-- System Memory:The current usage of system memory.
- - Commited vm size: The size of virtual memory allocated by the operating system to running processes.
- - Total physical memory:The total amount of available physical memory in the system.
- - Used physical memory:The total amount of memory already used by the system. Contains the actual amount of memory used by the process and the memory occupied by the operating system buffers/cache.
-- System Swap Memory:Swap Space memory usage.
-- Process Memory:The usage of memory by the IoTDB process.
- - Max Memory:The maximum amount of memory that an IoTDB process can request from the operating system. (Configure the allocated memory size in the datanode env/configure env configuration file)
- - Total Memory:The total amount of memory that the IoTDB process has currently requested from the operating system.
- - Used Memory:The total amount of memory currently used by the IoTDB process.
-
-#### Disk
-
-- Disk Space:
- - Total disk space:The maximum disk space that IoTDB can use.
- - Used disk space:The disk space already used by IoTDB.
-- Log Number Per Minute:The average number of logs at each level of IoTDB per minute during the sampling time.
-- File Count:Number of IoTDB related files
- - all:All file quantities
- - TsFile:Number of TsFiles
- - seq:Number of sequential TsFiles
- - unseq:Number of unsequence TsFiles
- - wal:Number of WAL files
- - cross-temp:Number of cross space merge temp files
- - inner-seq-temp:Number of merged temp files in sequential space
- - innser-unseq-temp:Number of merged temp files in unsequential space
- - mods:Number of tombstone files
-- Open File Count:Number of file handles opened by the system
-- File Size:The size of IoTDB related files. Each sub item corresponds to the size of the corresponding file.
-- Disk I/O Busy Rate:Equivalent to the% util indicator in iostat, it to some extent reflects the level of disk busyness. Each sub item is an indicator corresponding to the disk.
-- Disk I/O Throughput:The average I/O throughput of each disk in the system over a period of time. Each sub item is an indicator corresponding to the disk.
-- Disk I/O Ops:Equivalent to the four indicators of r/s, w/s, rrqm/s, and wrqm/s in iostat, it refers to the number of times a disk performs I/O per second. Read and write refer to the number of times a disk performs a single I/O. Due to the corresponding scheduling algorithm of block devices, in some cases, multiple adjacent I/Os can be merged into one. Merge read and merge write refer to the number of times multiple I/Os are merged into one I/O.
-- Disk I/O Avg Time:Equivalent to the await of iostat, which is the average latency of each I/O request. Separate recording of read and write requests.
-- Disk I/O Avg Size:Equivalent to the avgrq sz of iostat, it reflects the size of each I/O request. Separate recording of read and write requests.
-- Disk I/O Avg Queue Size:Equivalent to avgqu sz in iostat, which is the average length of the I/O request queue.
-- I/O System Call Rate:The frequency of process calls to read and write system calls, similar to IOPS.
-- I/O Throughput:The throughput of process I/O can be divided into two categories: actual-read/write and attemppt-read/write. Actual read and actual write refer to the number of bytes that a process actually causes block devices to perform I/O, excluding the parts processed by Page Cache.
-
-#### JVM
-
-- GC Time Percentage:The proportion of GC time spent by the node JVM in the past minute's time window
-- GC Allocated/Promoted Size Detail: The average size of objects promoted to the old era per minute by the node JVM, as well as the size of objects newly applied for by the new generation/old era and non generational new applications
-- GC Data Size Detail:The long-term surviving object size of the node JVM and the maximum intergenerational allowed value
-- Heap Memory:JVM heap memory usage.
- - Maximum heap memory:The maximum available heap memory size for the JVM.
- - Committed heap memory:The size of heap memory that has been committed by the JVM.
- - Used heap memory:The size of heap memory already used by the JVM.
- - PS Eden Space:The size of the PS Young area.
- - PS Old Space:The size of the PS Old area.
- - PS Survivor Space:The size of the PS survivor area.
- - ...(CMS/G1/ZGC, etc)
-- Off Heap Memory:Out of heap memory usage.
- - direct memory:Out of heap direct memory.
- - mapped memory:Out of heap mapped memory.
-- GC Number Per Minute:The average number of garbage collection attempts per minute by the node JVM, including YGC and FGC
-- GC Time Per Minute:The average time it takes for node JVM to perform garbage collection per minute, including YGC and FGC
-- GC Number Per Minute Detail:The average number of garbage collections per minute by node JVM due to different reasons, including YGC and FGC
-- GC Time Per Minute Detail:The average time spent by node JVM on garbage collection per minute due to different reasons, including YGC and FGC
-- Time Consumed Of Compilation Per Minute:The total time JVM spends compiling per minute
-- The Number of Class:
- - loaded:The number of classes currently loaded by the JVM
- - unloaded:The number of classes uninstalled by the JVM since system startup
-- The Number of Java Thread:The current number of surviving threads in IoTDB. Each sub item represents the number of threads in each state.
-
-#### Network
-
-Eno refers to the network card connected to the public network, while lo refers to the virtual network card.
-
-- Net Speed:The speed of network card sending and receiving data
-- Receive/Transmit Data Size:The size of data packets sent or received by the network card, calculated from system restart
-- Packet Speed:The speed at which the network card sends and receives packets, and one RPC request can correspond to one or more packets
-- Connection Num:The current number of socket connections for the selected process (IoTDB only has TCP)
-
-### Performance Overview Dashboard
-
-#### Cluster Overview
-
-- Total CPU Core:Total CPU cores of cluster machines
-- DataNode CPU Load:CPU usage of each DataNode node in the cluster
-- Disk
- - Total Disk Space: Total disk size of cluster machines
- - DataNode Disk Usage: The disk usage rate of each DataNode in the cluster
-- Total Timeseries: The total number of time series managed by the cluster (including replicas), the actual number of time series needs to be calculated in conjunction with the number of metadata replicas
-- Cluster: Number of ConfigNode and DataNode nodes in the cluster
-- Up Time: The duration of cluster startup until now
-- Total Write Point Per Second: The total number of writes per second in the cluster (including replicas), and the actual total number of writes needs to be analyzed in conjunction with the number of data replicas
-- Memory
- - Total System Memory: Total memory size of cluster machine system
- - Total Swap Memory: Total size of cluster machine swap memory
- - DataNode Process Memory Usage: Memory usage of each DataNode in the cluster
-- Total File Number:Total number of cluster management files
-- Cluster System Overview:Overview of cluster machines, including average DataNode node memory usage and average machine disk usage
-- Total DataBase: The total number of databases managed by the cluster (including replicas)
-- Total DataRegion: The total number of DataRegions managed by the cluster
-- Total SchemaRegion: The total number of SchemeRegions managed by the cluster
-
-#### Node Overview
-
-- CPU Core: The number of CPU cores in the machine where the node is located
-- Disk Space: The disk size of the machine where the node is located
-- Timeseries: Number of time series managed by the machine where the node is located (including replicas)
-- System Overview: System overview of the machine where the node is located, including CPU load, process memory usage ratio, and disk usage ratio
-- Write Point Per Second: The write speed per second of the machine where the node is located (including replicas)
-- System Memory: The system memory size of the machine where the node is located
-- Swap Memory:The swap memory size of the machine where the node is located
-- File Number: Number of files managed by nodes
-
-#### Performance
-
-- Session Idle Time:The total idle time and total busy time of the session connection of the node
-- Client Connection: The client connection status of the node, including the total number of connections and the number of active connections
-- Time Consumed Of Operation: The time consumption of various types of node operations, including average and P99
-- Average Time Consumed Of Interface: The average time consumption of each thrust interface of a node
-- P99 Time Consumed Of Interface: P99 time consumption of various thrust interfaces of nodes
-- Task Number: The number of system tasks for each node
-- Average Time Consumed of Task: The average time spent on various system tasks of a node
-- P99 Time Consumed of Task: P99 time consumption for various system tasks of nodes
-- Operation Per Second: The number of operations per second for a node
-- Mainstream Process
- - Operation Per Second Of Stage: The number of operations per second for each stage of the node's main process
- - Average Time Consumed Of Stage: The average time consumption of each stage in the main process of a node
- - P99 Time Consumed Of Stage: P99 time consumption for each stage of the node's main process
-- Schedule Stage
- - OPS Of Schedule: The number of operations per second in each sub stage of the node schedule stage
- - Average Time Consumed Of Schedule Stage:The average time consumption of each sub stage in the node schedule stage
- - P99 Time Consumed Of Schedule Stage: P99 time consumption for each sub stage of the schedule stage of the node
-- Local Schedule Sub Stages
- - OPS Of Local Schedule Stage: The number of operations per second in each sub stage of the local schedule node
- - Average Time Consumed Of Local Schedule Stage: The average time consumption of each sub stage in the local schedule stage of the node
- - P99 Time Consumed Of Local Schedule Stage: P99 time consumption for each sub stage of the local schedule stage of the node
-- Storage Stage
- - OPS Of Storage Stage: The number of operations per second in each sub stage of the node storage stage
- - Average Time Consumed Of Storage Stage: Average time consumption of each sub stage in the node storage stage
- - P99 Time Consumed Of Storage Stage: P99 time consumption for each sub stage of node storage stage
-- Engine Stage
- - OPS Of Engine Stage: The number of operations per second in each sub stage of the node engine stage
- - Average Time Consumed Of Engine Stage: The average time consumption of each sub stage in the engine stage of a node
- - P99 Time Consumed Of Engine Stage: P99 time consumption of each sub stage in the node engine stage
-
-#### System
-
-- CPU Load: CPU load of nodes
-- CPU Time Per Minute: The CPU time per minute of a node, with the maximum value related to the number of CPU cores
-- GC Time Per Minute:The average GC time per minute for nodes, including YGC and FGC
-- Heap Memory: Node's heap memory usage
-- Off Heap Memory: Non heap memory usage of nodes
-- The Number Of Java Thread: Number of Java threads on nodes
-- File Count:Number of files managed by nodes
-- File Size: Node management file size situation
-- Log Number Per Minute: Different types of logs per minute for nodes
-
-### ConfigNode Dashboard
-
-This panel displays the performance of all management nodes in the cluster, including partitioning, node information, and client connection statistics.
-
-#### Node Overview
-
-- Database Count: Number of databases for nodes
-- Region
- - DataRegion Count:Number of DataRegions for nodes
- - DataRegion Current Status: The state of the DataRegion of the node
- - SchemaRegion Count: Number of SchemeRegions for nodes
- - SchemaRegion Current Status: The state of the SchemeRegion of the node
-- System Memory: The system memory size of the node
-- Swap Memory: Node's swap memory size
-- ConfigNodes: The running status of the ConfigNode in the cluster where the node is located
-- DataNodes:The DataNode situation of the cluster where the node is located
-- System Overview: System overview of nodes, including system memory, disk usage, process memory, and CPU load
-
-#### NodeInfo
-
-- Node Count: The number of nodes in the cluster where the node is located, including ConfigNode and DataNode
-- ConfigNode Status: The status of the ConfigNode node in the cluster where the node is located
-- DataNode Status: The status of the DataNode node in the cluster where the node is located
-- SchemaRegion Distribution: The distribution of SchemaRegions in the cluster where the node is located
-- SchemaRegionGroup Leader Distribution: The distribution of leaders in the SchemaRegionGroup of the cluster where the node is located
-- DataRegion Distribution: The distribution of DataRegions in the cluster where the node is located
-- DataRegionGroup Leader Distribution:The distribution of leaders in the DataRegionGroup of the cluster where the node is located
-
-#### Protocol
-
-- Client Count
- - Active Client Num: The number of active clients in each thread pool of a node
- - Idle Client Num: The number of idle clients in each thread pool of a node
- - Borrowed Client Count: Number of borrowed clients in each thread pool of the node
- - Created Client Count: Number of created clients for each thread pool of the node
- - Destroyed Client Count: The number of destroyed clients in each thread pool of the node
-- Client time situation
- - Client Mean Active Time: The average active time of clients in each thread pool of a node
- - Client Mean Borrow Wait Time: The average borrowing waiting time of clients in each thread pool of a node
- - Client Mean Idle Time: The average idle time of clients in each thread pool of a node
-
-#### Partition Table
-
-- SchemaRegionGroup Count: The number of SchemaRegionGroups in the Database of the cluster where the node is located
-- DataRegionGroup Count: The number of DataRegionGroups in the Database of the cluster where the node is located
-- SeriesSlot Count: The number of SeriesSlots in the Database of the cluster where the node is located
-- TimeSlot Count: The number of TimeSlots in the Database of the cluster where the node is located
-- DataRegion Status: The DataRegion status of the cluster where the node is located
-- SchemaRegion Status: The status of the SchemeRegion of the cluster where the node is located
-
-#### Consensus
-
-- Ratis Stage Time: The time consumption of each stage of the node's Ratis
-- Write Log Entry: The time required to write a log for the Ratis of a node
-- Remote / Local Write Time: The time consumption of remote and local writes for the Ratis of nodes
-- Remote / Local Write QPS: Remote and local QPS written to node Ratis
-- RatisConsensus Memory: Memory usage of Node Ratis consensus protocol
-
-### DataNode Dashboard
-
-This panel displays the monitoring status of all data nodes in the cluster, including write time, query time, number of stored files, etc.
-
-#### Node Overview
-
-- The Number Of Entity: Entity situation of node management
-- Write Point Per Second: The write speed per second of the node
-- Memory Usage: The memory usage of the node, including the memory usage of various parts of IoT Consensus, the total memory usage of SchemaRegion, and the memory usage of various databases.
-
-#### Protocol
-
-- Node Operation Time Consumption
- - The Time Consumed Of Operation (avg): The average time spent on various operations of a node
- - The Time Consumed Of Operation (50%): The median time spent on various operations of a node
- - The Time Consumed Of Operation (99%): P99 time consumption for various operations of nodes
-- Thrift Statistics
- - The QPS Of Interface: QPS of various Thrift interfaces of nodes
- - The Avg Time Consumed Of Interface: The average time consumption of each Thrift interface of a node
- - Thrift Connection: The number of Thrfit connections of each type of node
- - Thrift Active Thread: The number of active Thrift connections for each type of node
-- Client Statistics
- - Active Client Num: The number of active clients in each thread pool of a node
- - Idle Client Num: The number of idle clients in each thread pool of a node
- - Borrowed Client Count:Number of borrowed clients for each thread pool of a node
- - Created Client Count: Number of created clients for each thread pool of the node
- - Destroyed Client Count: The number of destroyed clients in each thread pool of the node
- - Client Mean Active Time: The average active time of clients in each thread pool of a node
- - Client Mean Borrow Wait Time: The average borrowing waiting time of clients in each thread pool of a node
- - Client Mean Idle Time: The average idle time of clients in each thread pool of a node
-
-#### Storage Engine
-
-- File Count: Number of files of various types managed by nodes
-- File Size: Node management of various types of file sizes
-- TsFile
- - TsFile Total Size In Each Level: The total size of TsFile files at each level of node management
- - TsFile Count In Each Level: Number of TsFile files at each level of node management
- - Avg TsFile Size In Each Level: The average size of TsFile files at each level of node management
-- Task Number: Number of Tasks for Nodes
-- The Time Consumed of Task: The time consumption of tasks for nodes
-- Compaction
- - Compaction Read And Write Per Second: The merge read and write speed of nodes per second
- - Compaction Number Per Minute: The number of merged nodes per minute
- - Compaction Process Chunk Status: The number of Chunks in different states merged by nodes
- - Compacted Point Num Per Minute: The number of merged nodes per minute
-
-#### Write Performance
-
-- Write Cost(avg): Average node write time, including writing wal and memtable
-- Write Cost(50%): Median node write time, including writing wal and memtable
-- Write Cost(99%): P99 for node write time, including writing wal and memtable
-- WAL
- - WAL File Size: Total size of WAL files managed by nodes
- - WAL File Num:Number of WAL files managed by nodes
- - WAL Nodes Num: Number of WAL nodes managed by nodes
- - Make Checkpoint Costs: The time required to create various types of CheckPoints for nodes
- - WAL Serialize Total Cost: Total time spent on node WAL serialization
- - Data Region Mem Cost: Memory usage of different DataRegions of nodes, total memory usage of DataRegions of the current instance, and total memory usage of DataRegions of the current cluster
- - Serialize One WAL Info Entry Cost: Node serialization time for a WAL Info Entry
- - Oldest MemTable Ram Cost When Cause Snapshot: MemTable size when node WAL triggers oldest MemTable snapshot
- - Oldest MemTable Ram Cost When Cause Flush: MemTable size when node WAL triggers oldest MemTable flush
- - Effective Info Ratio Of WALNode: The effective information ratio of different WALNodes of nodes
+## 3 Appendix, Detailed Monitoring Metrics
+
+### 3.1 System Dashboard
+
+This dashboard displays the current system's **CPU****, memory, disk, and network resource****s**, as well as some **JVM****-related metrics**.
+
+#### 3.1.1 CPU
+
+- **CPU Core:** Number of CPU cores.
+- **CPU Load:**
+ - **System CPU Load:** The average CPU load and utilization of the entire system during the sampling period.
+ - **Process CPU Load:** The percentage of CPU resources occupied by the IoTDB process during the sampling period.
+- **CPU Time Per Minute:** The total CPU time consumed by all processes in the system per minute.
+
+#### 3.1.2 Memory
+
+- **System Memory:** Current system memory usage.
+ - **Committed VM Size:** Virtual memory size allocated by the operating system to running processes.
+ - **Total Physical Memory****:** Total available physical memory in the system.
+ - **Used Physical Memory****:** The total amount of memory currently in use, including memory actively used by processes and memory occupied by the operating system for buffers and caching.
+- **System Swap Memory:** The amount of swap space memory in use.
+- **Process Memory:** Memory usage of the IoTDB process.
+ - **Max Memory:** The maximum amount of memory that the IoTDB process can request from the OS (configured in the `datanode-env`/`confignode-env` configuration files).
+ - **Total Memory:** The total amount of memory currently allocated by the IoTDB process from the OS.
+ - **Used Memory:** The total amount of memory currently in use by the IoTDB process.
+
+#### 3.1.3 Disk
+
+- **Disk Space:**
+ - **Total Disk Space:** Maximum disk space available for IoTDB.
+ - **Used Disk Space:** Disk space currently occupied by IoTDB.
+- **Log Number Per Minute:** Average number of IoTDB logs generated per minute, categorized by log levels.
+- **File Count:** The number of files related to IoTDB.
+ - **All:** Total number of files.
+ - **TsFile:** Number of TsFiles.
+ - **Seq:** Number of sequential TsFiles.
+ - **Unseq:** Number of unordered TsFiles.
+ - **WAL:** Number of WAL (Write-Ahead Log) files.
+ - **Cross-Temp:** Number of temporary files generated during cross-space merge operations.
+ - **Inner-Seq-Temp:** Number of temporary files generated during sequential-space merge operations.
+ - **Inner-Unseq-Temp:** Number of temporary files generated during unordered-space merge operations.
+ - **Mods:** Number of tombstone files.
+- **Open File Count:** Number of open file handles in the system.
+- **File Size:** The size of IoTDB-related files, with each sub-item representing the size of a specific file type.
+- **Disk I/O Busy Rate:** Equivalent to the `%util` metric in `iostat`, indicating the level of disk utilization. Each sub-item corresponds to a specific disk.
+- **Disk I/O Throughput****:** Average I/O throughput of system disks over a given period. Each sub-item corresponds to a specific disk.
+- **Disk I/O Ops:** Equivalent to `r/s`, `w/s`, `rrqm/s`, and `wrqm/s` in `iostat`, representing the number of I/O operations per second.
+- **Disk I/O Avg Time:** Equivalent to the `await` metric in `iostat`, representing the average latency of each I/O request, recorded separately for read and write operations.
+- **Disk I/O Avg Size:** Equivalent to the `avgrq-sz` metric in `iostat`, indicating the average size of each I/O request, recorded separately for read and write operations.
+- **Disk I/O Avg Queue Size:** Equivalent to `avgqu-sz` in `iostat`, representing the average length of the I/O request queue.
+- **I/O System Call Rate:** Frequency of read/write system calls invoked by the process, similar to IOPS.
+- **I/O Throughput****:** I/O throughput of the process, divided into `actual_read/write` and `attempt_read/write`. `Actual read` and `actual write` refer to the number of bytes actually written to or read from the storage device, excluding those handled by the Page Cache.
+
+#### 3.1.4 JVM
+
+- **GC Time Percentage:** Percentage of time spent on garbage collection (GC) by the JVM in the past minute.
+- **GC Allocated/Promoted Size Detail:** The average size of objects promoted to the old generation per minute, as well as newly allocated objects in the young/old generation and non-generational areas.
+- **GC Data Size Detail:** Size of long-lived objects in the JVM and the maximum allowed size for each generation.
+- **Heap Memory:** JVM heap memory usage.
+ - **Maximum Heap Memory:** Maximum available heap memory for the JVM.
+ - **Committed Heap Memory:** Committed heap memory size for the JVM.
+ - **Used Heap Memory:** The amount of heap memory currently in use.
+ - **PS Eden Space:** Size of the PS Young generation's Eden space.
+ - **PS Old Space:** Size of the PS Old generation.
+ - **PS Survivor Space:** Size of the PS Survivor space.
+- **O****ff Heap Memory:** Off-heap memory usage.
+ - **Direct Memory:** The amount of direct memory used.
+ - **Mapped Memory:** The amount of memory used for mapped files.
+- **GC Number Per Minute:** Average number of garbage collections (YGC and FGC) performed per minute.
+- **GC Time Per Minute:** Average time spent on garbage collection (YGC and FGC) per minute.
+- **GC Number Per Minute Detail:** Average number of garbage collections performed per minute due to different causes.
+- **GC Time Per Minute Detail:** Average time spent on garbage collection per minute due to different causes.
+- **Time Consumed of Compilation Per Minute:** Total time spent on JVM compilation per minute.
+- **The Number of Class:**
+ - **Loaded:** Number of classes currently loaded by the JVM.
+ - **Unloaded:** Number of classes unloaded by the JVM since system startup.
+- **The Number of Java Thread:** The number of currently active threads in IoTDB. Each sub-item represents the number of threads in different states.
+
+#### 3.1.5 Network
+
+- **Net Speed:** Data transmission and reception speed by the network interface.
+- **Receive/Transmit Data Size:** The total size of data packets sent and received by the network interface since system startup.
+- **Packet Speed:** The rate of data packets sent and received by the network interface. A single RPC request may correspond to one or more packets.
+- **Connection Num:** Number of socket connections for the current process (IoTDB only uses TCP).
+
+### 3.2 Performance Overview Dashboard
+
+This dashboard provides an overview of the system's overall performance.
+
+#### 3.2.1 Cluster Overview
+
+- **Total CPU Core:** Total number of CPU cores in the cluster.
+- **DataNode CPU Load:** CPU utilization of each DataNode in the cluster.
+- Disk:
+ - **Total Disk Space:** Total disk space across all cluster nodes.
+ - **DataNode Disk Usage:** Disk usage of each DataNode in the cluster.
+- **Total Timeseries:** The total number of time series managed by the cluster (including replicas). The actual number of time series should be calculated considering metadata replicas.
+- **Cluster:** The number of ConfigNode and DataNode instances in the cluster.
+- **Up Time:** The duration since the cluster started.
+- **Total Write Point Per Second:** The total number of data points written per second in the cluster (including replicas). The actual number of writes should be analyzed in conjunction with the data replication factor.
+- Memory:
+ - **Total System Memory:** The total system memory available in the cluster.
+ - **Total Swap Memory:** The total swap memory available in the cluster.
+ - **DataNode Process Memory Usage:** The memory usage of each DataNode in the cluster.
+- **Total File Number:** The total number of files managed by the cluster.
+- **Cluster System Overview:** An overview of cluster-wide system resources, including average DataNode memory usage and average disk usage.
+- **Total Database:** The total number of databases managed by the cluster (including replicas).
+- **Total DataRegion:** The total number of DataRegions in the cluster.
+- **Total SchemaRegion:** The total number of SchemaRegions in the cluster.
+
+#### 3.2.2 Node Overview
+
+- **CPU Core:** Number of CPU cores on the node’s machine.
+- **Disk Space:** Total disk space available on the node’s machine.
+- **Timeseries:** The number of time series managed by the node (including replicas).
+- **System Overview:** Overview of the node’s system resources, including CPU load, process memory usage, and disk usage.
+- **Write Point Per Second:** The write speed of the node, including replicated data.
+- **System Memory:** The total system memory available on the node’s machine.
+- **Swap Memory:** The total swap memory available on the node’s machine.
+- **File Number:** The number of files managed by the node.
+
+#### 3.2.3 Performance
+
+- **Session Idle Time:** The total idle time of session connections on the node.
+- **Client Connection:** The status of client connections on the node, including the total number of connections and the number of active connections.
+- **Time Consumed Of Operation:** The latency of various operations on the node, including the average value and P99 percentile.
+- **Average Time Consumed Of Interface:** The average latency of each **Thrift interface** on the node.
+- **P99 Time Consumed Of Interface:** The P99 latency of each Thrift interface on the node.
+- **Task Number:** The number of system tasks running on the node.
+- **Average Time Consumed Of Task:** The average execution time of system tasks on the node.
+- **P99 Time Consumed Of Task:** The P99 execution time of system tasks on the node.
+- **Operation Per Second:** The number of operations executed per second on the node.
+- Main Process:
+ - **Operation Per Second of Stage:** The number of operations executed per second in different stages of the main process.
+ - **Average Time Consumed of Stage:** The average execution time of different stages in the main process.
+ - **P99 Time Consumed of Stage:** The P99 execution time of different stages in the main process.
+- Scheduling Stage:
+ - **OPS Of Schedule:** The number of operations executed per second in different sub-stages of the scheduling stage.
+ - **Average Time Consumed Of Schedule Stage:** The average execution time in different sub-stages of the scheduling stage.
+ - **P99 Time Consumed Of Schedule Stage:** The P99 execution time in different sub-stages of the scheduling stage.
+- Local Scheduling Stage:
+ - **OPS of Local Schedule Stage:** Number of operations per second at each sub-stage of the local schedule stage.
+ - **Average Time Consumed of Local Schedule Stage:** Average time consumed at each sub-stage of the local schedule stage.
+ - **P99 Time Consumed of Local Schedule Stage:** P99 time consumed at each sub-stage of the local schedule stage.
+- Storage Stage:
+ - **OPS of Storage Stage:** Number of operations per second at each sub-stage of the storage stage.
+ - **Average Time Consumed of Storage Stage:** Average time consumed at each sub-stage of the storage stage.
+ - **P99 Time Consumed of Storage Stage:** P99 time consumed at each sub-stage of the storage stage.
+- Engine Stage:
+ - **OPS Of Engine Stage:** The number of operations executed per second in different sub-stages of the engine stage.
+ - **Average Time Consumed Of Engine Stage:** The average execution time in different sub-stages of the engine stage.
+ - **P99 Time Consumed Of Engine Stage:** The P99 execution time in different sub-stages of the engine stage.
+
+#### 3.2.4 System
+
+- **CPU Load:** The CPU load of the node.
+- **CPU Time Per Minute:** The total CPU time per minute on the node, which is influenced by the number of CPU cores.
+- **GC Time Per Minute:** The average time spent on Garbage Collection (GC) per minute on the node, including Young GC (YGC) and Full GC (FGC).
+- **Heap Memory:** The heap memory usage of the node.
+- **Off-Heap Memory:** The off-heap memory usage of the node.
+- **The Number Of Java Thread:** The number of Java threads on the node.
+- **File Count:** The number of files managed by the node.
+- **File Size:** The total size of files managed by the node.
+- **Log Number Per Minute:** The number of logs generated per minute on the node, categorized by log type.
+
+### 3.3 ConfigNode Dashboard
+
+This dashboard displays the performance metrics of all management nodes in the cluster, including **partition information, node status, and client connection statistics**.
+
+#### 3.3.1 Node Overview
+
+- **Database Count:** Number of databases on the node.
+- Region:
+ - **DataRegion Count:** Number of DataRegions on the node.
+ - **DataRegion Current Status:** Current status of DataRegions on the node.
+ - **SchemaRegion Count:** Number of SchemaRegions on the node.
+ - **SchemaRegion Current Status:** Current status of SchemaRegions on the node.
+- **System Memory:** System memory on the node's machine.
+- **Swap Memory:** Swap memory on the node's machine.
+- **ConfigNodes:** Status of ConfigNodes in the cluster.
+- **DataNodes:** Status of DataNodes in the cluster.
+- **System Overview:** Overview of the node's system resources, including system memory, disk usage, process memory, and CPU load.
+
+#### 3.3.2 NodeInfo
+
+- **Node Count:** The total number of nodes in the cluster, including ConfigNodes and DataNodes.
+- **ConfigNode Status:** The status of ConfigNodes in the cluster.
+- **DataNode Status:** The status of DataNodes in the cluster.
+- **SchemaRegion Distribution:** The distribution of SchemaRegions in the cluster.
+- **SchemaRegionGroup Leader Distribution:** The leader distribution of SchemaRegionGroups in the cluster.
+- **DataRegion Distribution:** The distribution of DataRegions in the cluster.
+- **DataRegionGroup Leader Distribution:** The leader distribution of DataRegionGroups in the cluster.
+
+#### 3.3.3 Protocol
+
+- Client Count Statistics:
+ - **Active Client Num:** The number of active clients in each thread pool on the node.
+ - **Idle Client Num:** The number of idle clients in each thread pool on the node.
+ - **Borrowed Client Count:** The number of borrowed clients in each thread pool on the node.
+ - **Created Client Count:** The number of clients created in each thread pool on the node.
+ - **Destroyed Client Count:** The number of clients destroyed in each thread pool on the node.
+- Client Time Statistics:
+ - **Client Mean Active Time:** The average active time of clients in each thread pool on the node.
+ - **Client Mean Borrow Wait Time:** The average time clients spend waiting for borrowed resources in each thread pool.
+ - **Client Mean Idle Time:** The average idle time of clients in each thread pool.
+
+#### 3.3.4 Partition Table
+
+- **SchemaRegionGroup Count:** The number of **SchemaRegionGroups** in the cluster’s databases.
+- **DataRegionGroup Count:** The number of DataRegionGroups in the cluster’s databases.
+- **SeriesSlot Count:** The number of SeriesSlots in the cluster’s databases.
+- **TimeSlot Count:** The number of TimeSlots in the cluster’s databases.
+- **DataRegion Status:** The status of DataRegions in the cluster.
+- **SchemaRegion Status:** The status of SchemaRegions in the cluster.
+
+#### 3.3.5 Consensus
+
+- **Ratis Stage Time:** The execution time of different stages in the Ratis consensus protocol.
+- **Write Log Entry:** The execution time for writing log entries in Ratis.
+- **Remote / Local Write Time:** The time taken for remote and local writes in Ratis.
+- **Remote / Local Write QPS:** The **queries per second (QPS)** for remote and local writes in Ratis.
+- **RatisConsensus Memory:** The memory usage of the Ratis consensus protocol on the node.
+
+### 3.4 DataNode Dashboard
+
+This dashboard displays the monitoring status of all **DataNodes** in the cluster, including **write latency, query latency, and storage file counts**.
+
+#### 3.4.1 Node Overview
+
+- **The Number of Entity:** The number of entities managed by the node.
+- **Write Point Per Second:** The write speed of the node (points per second).
+- **Memory Usage:** The memory usage of the node, including IoT Consensus memory usage, SchemaRegion memory usage, and per-database memory usage.
+
+#### 3.4.2 Protocol
+
+- Operation Latency:
+ - **The Time Consumed of Operation (avg):** The average latency of operations on the node.
+ - **The Time Consumed of Operation (50%):** The median latency of operations on the node.
+ - **The Time Consumed of Operation (99%):** The P99 latency of operations on the node.
+- Thrift Statistics:
+ - **The QPS of Interface:** The queries per second (QPS) for each Thrift interface on the node.
+ - **The Avg Time Consumed of Interface:** The average execution time for each Thrift interface on the node.
+ - **Thrift Connection:** The number of active Thrift connections on the node.
+ - **Thrift Active Thread:** The number of active Thrift threads on the node.
+- Client Statistics:
+ - **Active Client Num:** The number of active clients in each thread pool.
+ - **Idle Client Num:** The number of idle clients in each thread pool.
+ - **Borrowed Client Count:** The number of borrowed clients in each thread pool.
+ - **Created Client Count:** The number of clients created in each thread pool.
+ - **Destroyed Client Count:** The number of clients destroyed in each thread pool.
+ - **Client Mean Active Time:** The average active time of clients in each thread pool.
+ - **Client Mean Borrow Wait Time:** The average time clients spend waiting for borrowed resources in each thread pool.
+ - **Client Mean Idle Time:** The average idle time of clients in each thread pool.
+
+#### 3.4.3 Storage Engine
+
+- **File Count:** The number of files managed by the node.
+- **File Size:** The total size of files managed by the node.
+- TsFile:
+ - **TsFile Total Size In Each Level:** The total size of TsFiles at each level.
+ - **TsFile Count In Each Level:** The number of TsFiles at each level.
+ - **Avg TsFile Size In Each Level:** The average size of TsFiles at each level.
+- **Task Number:** The number of tasks on the node.
+- **The Time Consumed of Task:** The total execution time of tasks on the node.
+- Compaction:
+ - **Compaction Read And Write Per Second:** The read/write speed of compaction operations.
+ - **Compaction Number Per Minute:** The number of **compaction** operations per minute.
+ - **Compaction Process Chunk Status:** The number of **chunks** in different states during compaction.
+ - **Compacted Point Num Per Minute:** The number of data points compacted per minute.
+
+#### 3.4.4 Write Performance
+
+- **Write Cost (avg):** The average **write latency**, including WAL and **memtable** writes.
+- **Write Cost (50%):** The **median write latency**, including WAL and **memtable** writes.
+- **Write Cost (99%):** The **P99 write latency**, including WAL and **memtable** writes.
+- WAL (Write-Ahead Logging)
+ - **WAL File Size:** The total size of WAL files managed by the node.
+ - **WAL File Num:** The total number of WAL files managed by the node.
+ - **WAL Nodes Num:** The total number of WAL Nodes managed by the node.
+ - **Make Checkpoint Costs:** The time required to create different types of Checkpoints.
+ - **WAL Serialize Total Cost:** The total serialization time for WAL.
+ - **Data Region Mem Cost:** The memory usage of different DataRegions, including total memory usage of DataRegions on the current instance and total memory usage of DataRegions across the entire cluster.
+ - **Serialize One WAL Info Entry Cost:** The time taken to serialize a single WAL Info Entry.
+ - **Oldest MemTable Ram Cost When Cause Snapshot:** The memory size of the oldest MemTable when a snapshot is triggered by WAL.
+ - **Oldest MemTable Ram Cost When Cause Flush:** The memory size of the oldest MemTable when a flush is triggered by WAL.
+ - **Effective Info Ratio of WALNode:** The ratio of effective information in different WALNodes.
- WAL Buffer
- - WAL Buffer Cost: Node WAL flush SyncBuffer takes time, including both synchronous and asynchronous options
- - WAL Buffer Used Ratio: The usage rate of the WAL Buffer of the node
- - WAL Buffer Entries Count: The number of entries in the WAL Buffer of a node
+ - **WAL Buffer Cost:** The time taken to flush the SyncBuffer of WAL, including both synchronous and asynchronous flushes.
+ - **WAL Buffer Used Ratio:** The utilization ratio of the WAL Buffer.
+ - **WAL Buffer Entries Count:** The number of entries in the WAL Buffer.
- Flush Statistics
- - Flush MemTable Cost(avg): The total time spent on node Flush and the average time spent on each sub stage
- - Flush MemTable Cost(50%): The total time spent on node Flush and the median time spent on each sub stage
- - Flush MemTable Cost(99%): The total time spent on node Flush and the P99 time spent on each sub stage
- - Flush Sub Task Cost(avg): The average time consumption of each node's Flush subtask, including sorting, encoding, and IO stages
- - Flush Sub Task Cost(50%): The median time consumption of each subtask of the Flush node, including sorting, encoding, and IO stages
- - Flush Sub Task Cost(99%): The average subtask time P99 for Flush of nodes, including sorting, encoding, and IO stages
-- Pending Flush Task Num: The number of Flush tasks in a blocked state for a node
-- Pending Flush Sub Task Num: Number of Flush subtasks blocked by nodes
-- Tsfile Compression Ratio Of Flushing MemTable: The compression rate of TsFile corresponding to node flashing Memtable
-- Flush TsFile Size Of DataRegions: The corresponding TsFile size for each disk flush of nodes in different DataRegions
-- Size Of Flushing MemTable: The size of the Memtable for node disk flushing
-- Points Num Of Flushing MemTable: The number of points when flashing data in different DataRegions of a node
-- Series Num Of Flushing MemTable: The number of time series when flashing Memtables in different DataRegions of a node
-- Average Point Num Of Flushing MemChunk: The average number of disk flushing points for node MemChunk
-
-#### Schema Engine
-
-- Schema Engine Mode: The metadata engine pattern of nodes
-- Schema Consensus Protocol: Node metadata consensus protocol
-- Schema Region Number:Number of SchemeRegions managed by nodes
-- Schema Region Memory Overview: The amount of memory in the SchemeRegion of a node
-- Memory Usgae per SchemaRegion:The average memory usage size of node SchemaRegion
-- Cache MNode per SchemaRegion: The number of cache nodes in each SchemeRegion of a node
-- MLog Length and Checkpoint: The total length and checkpoint position of the current mlog for each SchemeRegion of the node (valid only for SimpleConsense)
-- Buffer MNode per SchemaRegion: The number of buffer nodes in each SchemeRegion of a node
-- Activated Template Count per SchemaRegion: The number of activated templates in each SchemeRegion of a node
-- Time Series statistics
- - Timeseries Count per SchemaRegion: The average number of time series for node SchemaRegion
- - Series Type: Number of time series of different types of nodes
- - Time Series Number: The total number of time series nodes
- - Template Series Number: The total number of template time series for nodes
- - Template Series Count per SchemaRegion: The number of sequences created through templates in each SchemeRegion of a node
+ - **Flush MemTable Cost (avg):** The average total flush time, including time spent in different sub-stages.
+ - **Flush MemTable Cost (50%):** The median total flush time, including time spent in different sub-stages.
+ - **Flush MemTable Cost (99%):** The P99 total flush time, including time spent in different sub-stages.
+ - **Flush Sub Task Cost (avg):** The average execution time of flush sub-tasks, including sorting, encoding, and I/O stages.
+ - **Flush Sub Task Cost (50%):** The median execution time of flush sub-tasks, including sorting, encoding, and I/O stages.
+ - **Flush Sub Task Cost (99%):** The P99 execution time of flush sub-tasks, including sorting, encoding, and I/O stages.
+- **Pending Flush Task Num:** The number of Flush tasks currently in a blocked state.
+- **Pending Flush Sub Task Num:** The number of blocked Flush sub-tasks.
+- **TsFile Compression Ratio of Flushing MemTable:** The compression ratio of TsFiles generated from flushed MemTables.
+- **Flush TsFile Size of DataRegions:** The size of TsFiles generated from flushed MemTables in different DataRegions.
+- **Size of Flushing MemTable:** The size of the MemTable currently being flushed.
+- **Points Num of Flushing MemTable:** The number of data points being flushed from MemTables in different DataRegions.
+- S**eries Num of Flushing MemTable:** The number of time series being flushed from MemTables in different DataRegions.
+- **Average Point Num of Flushing MemChunk:** The average number of points in MemChunks being flushed.
+
+#### 3.4.5 Schema Engine
+
+- **Schema Engine Mode:** The metadata engine mode used by the node.
+- **Schema Consensus Protocol:** The metadata consensus protocol used by the node.
+- **Schema Region Number:** The number of SchemaRegions managed by the node.
+- **Schema Region Memory Overview:** The total memory used by SchemaRegions on the node.
+- **Memory Usage per SchemaRegion:** The average memory usage per SchemaRegion.
+- **Cache MNode per SchemaRegion:** The number of cached MNodes per SchemaRegion.
+- **MLog Length and Checkpoint****:** The current MLog size and checkpoint position for each SchemaRegion (valid only for SimpleConsensus).
+- **Buffer MNode per SchemaRegion:** The number of buffered MNodes per SchemaRegion.
+- **Activated Template Count per SchemaRegion:** The number of activated templates per SchemaRegion.
+- Time Series Statistics
+ - **Timeseries Count per SchemaRegion:** The average number of time series per SchemaRegion.
+ - **Series Type:** The number of time series of different types.
+ - **Time Series Number:** The total number of time series on the node.
+ - **Template Series Number:** The total number of template-based time series on the node.
+ - **Template Series Count per SchemaRegion:** The number of time series created via templates per SchemaRegion.
- IMNode Statistics
- - Pinned MNode per SchemaRegion: Number of IMNode nodes with Pinned nodes in each SchemeRegion
- - Pinned Memory per SchemaRegion: The memory usage size of the IMNode node for Pinned nodes in each SchemeRegion of the node
- - Unpinned MNode per SchemaRegion: The number of unpinned IMNode nodes in each SchemeRegion of a node
- - Unpinned Memory per SchemaRegion: Memory usage size of unpinned IMNode nodes in each SchemeRegion of the node
- - Schema File Memory MNode Number: Number of IMNode nodes with global pinned and unpinned nodes
- - Release and Flush MNode Rate: The number of IMNodes that release and flush nodes per second
-- Cache Hit Rate: Cache hit rate of nodes
-- Release and Flush Thread Number: The current number of active Release and Flush threads on the node
-- Time Consumed of Relead and Flush (avg): The average time taken for node triggered cache release and buffer flushing
-- Time Consumed of Relead and Flush (99%): P99 time consumption for node triggered cache release and buffer flushing
-
-#### Query Engine
-
-- Time Consumption In Each Stage
- - The time consumed of query plan stages(avg): The average time spent on node queries at each stage
- - The time consumed of query plan stages(50%): Median time spent on node queries at each stage
- - The time consumed of query plan stages(99%): P99 time consumption for node query at each stage
-- Execution Plan Distribution Time
- - The time consumed of plan dispatch stages(avg): The average time spent on node query execution plan distribution
- - The time consumed of plan dispatch stages(50%): Median time spent on node query execution plan distribution
- - The time consumed of plan dispatch stages(99%): P99 of node query execution plan distribution time
-- Execution Plan Execution Time
- - The time consumed of query execution stages(avg): The average execution time of node query execution plan
- - The time consumed of query execution stages(50%):Median execution time of node query execution plan
- - The time consumed of query execution stages(99%): P99 of node query execution plan execution time
+ - **Pinned MNode per SchemaRegion:** The number of pinned IMNodes per SchemaRegion.
+ - **Pinned Memory per SchemaRegion:** The memory usage of pinned IMNodes per SchemaRegion.
+ - **Unpinned MNode per SchemaRegion:** The number of unpinned IMNodes per SchemaRegion.
+ - **Unpinned Memory per SchemaRegion:** The memory usage of unpinned IMNodes per SchemaRegion.
+ - **Schema File Memory MNode Number:** The total number of pinned and unpinned IMNodes on the node.
+ - **Release and Flush MNode Rate:** The number of IMNodes released and flushed per second.
+- **Cache Hit Rate:** The cache hit ratio of the node.
+- **Release and Flush Thread Number:** The number of active threads for releasing and flushing memory.
+- **Time Consumed of Release and Flush (avg):** The average execution time for cache release and buffer flush.
+- **Time Consumed of Release and Flush (99%):** The P99 execution time for cache release and buffer flush.
+
+#### 3.4.6 Query Engine
+
+- Time Consumed at Each Stage
+ - **The time consumed of query plan stages (avg):** The average time consumed in different query plan stages on the node.
+ - **The time consumed of query plan stages (50%):** The median time consumed in different query plan stages on the node.
+ - **The time consumed of query plan stages (99%):** The P99 time consumed in different query plan stages on the node.
+- Plan Dispatch Time
+ - **The time consumed of plan dispatch stages (avg):** The average time consumed in query execution plan dispatch.
+ - **The time consumed of plan dispatch stages (50%):** The median time consumed in query execution plan dispatch.
+ - **The time consumed of plan dispatch stages (99%):** The P99 time consumed in query execution plan dispatch.
+- Query Execution Time
+ - **The time consumed of query execution stages (avg):** The average time consumed in query execution on the node.
+ - **The time consumed of query execution stages (50%):** The median time consumed in query execution on the node.
+ - **The time consumed of query execution stages (99%):** The P99 time consumed in query execution on the node.
- Operator Execution Time
- - The time consumed of operator execution stages(avg): The average execution time of node query operators
- - The time consumed of operator execution(50%): Median execution time of node query operator
- - The time consumed of operator execution(99%): P99 of node query operator execution time
+ - **The time consumed of operator execution stages (avg):** The average time consumed in query operator execution.
+ - **The time consumed of operator execution (50%):** The median time consumed in query operator execution.
+ - **The time consumed of operator execution (99%):** The P99 time consumed in query operator execution
- Aggregation Query Computation Time
- - The time consumed of query aggregation(avg): The average computation time for node aggregation queries
- - The time consumed of query aggregation(50%): Median computation time for node aggregation queries
- - The time consumed of query aggregation(99%): P99 of node aggregation query computation time
-- File/Memory Interface Time Consumption
- - The time consumed of query scan(avg): The average time spent querying file/memory interfaces for nodes
- - The time consumed of query scan(50%): Median time spent querying file/memory interfaces for nodes
- - The time consumed of query scan(99%): P99 time consumption for node query file/memory interface
-- Number Of Resource Visits
- - The usage of query resource(avg): The average number of resource visits for node queries
- - The usage of query resource(50%): Median number of resource visits for node queries
- - The usage of query resource(99%): P99 for node query resource access quantity
+ - **The time consumed of query aggregation (avg):** The average time consumed in aggregation query computation.
+ - **The time consumed of query aggregation (50%):** The median time consumed in aggregation query computation.
+ - **The time consumed of query aggregation (99%):** The P99 time consumed in aggregation query computation.
+- File/Memory Interface Time
+ - **The time consumed of query scan (avg):** The average time consumed in file/memory interface query scans.
+ - **The time consumed of query scan (50%):** The median time consumed in file/memory interface query scans.
+ - **The time consumed of query scan (99%):** The P99 time consumed in file/memory interface query scans.
+- Resource Access Count
+ - **The usage of query resource (avg):** The average number of resource accesses during query execution.
+ - **The usage of query resource (50%):** The median number of resource accesses during query execution.
+ - **The usage of query resource (99%):** The P99 number of resource accesses during query execution.
- Data Transmission Time
- - The time consumed of query data exchange(avg): The average time spent on node query data transmission
- - The time consumed of query data exchange(50%): Median query data transmission time for nodes
- - The time consumed of query data exchange(99%): P99 for node query data transmission time
-- Number Of Data Transfers
- - The count of Data Exchange(avg): The average number of data transfers queried by nodes
- - The count of Data Exchange: The quantile of the number of data transfers queried by nodes, including the median and P99
-- Task Scheduling Quantity And Time Consumption
- - The number of query queue: Node query task scheduling quantity
- - The time consumed of query schedule time(avg): The average time spent on scheduling node query tasks
- - The time consumed of query schedule time(50%): Median time spent on node query task scheduling
- - The time consumed of query schedule time(99%): P99 of node query task scheduling time
-
-#### Query Interface
+ - **The time consumed of query data exchange (avg):** The average time consumed in query data exchange.
+ - **The time consumed of query data exchange (50%):** The median time consumed in query data exchange.
+ - **The time consumed of query data exchange (99%):** The P99 time consumed in query data exchange.
+- Data Transmission Count
+ - **The count of Data Exchange (avg):** The average number of data exchanges during queries.
+ - **The count of Data Exchange:** The quantiles (median, P99) of data exchanges during queries.
+- Task Scheduling Count and Time
+ - **The number of query queue:** The number of query tasks scheduled.
+ - **The time consumed of query schedule time (avg):** The average time consumed for query scheduling.
+ - **The time consumed of query schedule time (50%):** The median time consumed for query scheduling.
+ - **The time consumed of query schedule time (99%):** The P99 time consumed for query scheduling.
+
+#### 3.4.7 Query Interface
- Load Time Series Metadata
- - The time consumed of load timeseries metadata(avg): The average time taken for node queries to load time series metadata
- - The time consumed of load timeseries metadata(50%): Median time spent on loading time series metadata for node queries
- - The time consumed of load timeseries metadata(99%): P99 time consumption for node query loading time series metadata
+ - **The time consumed of load timeseries metadata (avg):** The average time consumed for loading time series metadata.
+ - **The time consumed of load timeseries metadata (50%):** The median time consumed for loading time series metadata.
+ - **The time consumed of load timeseries metadata (99%):** The P99 time consumed for loading time series metadata.
- Read Time Series
- - The time consumed of read timeseries metadata(avg): The average time taken for node queries to read time series
- - The time consumed of read timeseries metadata(50%): The median time taken for node queries to read time series
- - The time consumed of read timeseries metadata(99%): P99 time consumption for node query reading time series
+ - **The time consumed of read timeseries metadata (avg):** The average time consumed for reading time series.
+ - **The time consumed of read timeseries metadata (50%):** The median time consumed for reading time series.
+ - **The time consumed of read timeseries metadata (99%):** The P99 time consumed for reading time series.
- Modify Time Series Metadata
- - The time consumed of timeseries metadata modification(avg):The average time taken for node queries to modify time series metadata
- - The time consumed of timeseries metadata modification(50%): Median time spent on querying and modifying time series metadata for nodes
- - The time consumed of timeseries metadata modification(99%): P99 time consumption for node query and modification of time series metadata
+ - **The time consumed of timeseries metadata modification (avg):** The average time consumed for modifying time series metadata.
+ - **The time consumed of timeseries metadata modification (50%):** The median time consumed for modifying time series metadata.
+ - **The time consumed of timeseries metadata modification (99%):** The P99 time consumed for modifying time series metadata.
- Load Chunk Metadata List
- - The time consumed of load chunk metadata list(avg): The average time it takes for node queries to load Chunk metadata lists
- - The time consumed of load chunk metadata list(50%): Median time spent on node query loading Chunk metadata list
- - The time consumed of load chunk metadata list(99%): P99 time consumption for node query loading Chunk metadata list
+ - The time consumed of load chunk metadata list(avg): Average time consumed of loading chunk metadata list by the node
+ - The time consumed of load chunk metadata list(50%): Median time consumed of loading chunk metadata list by the node
+ - The time consumed of load chunk metadata list(99%): P99 time consumed of loading chunk metadata list by the node
- Modify Chunk Metadata
- - The time consumed of chunk metadata modification(avg): The average time it takes for node queries to modify Chunk metadata
- - The time consumed of chunk metadata modification(50%): The total number of bits spent on modifying Chunk metadata for node queries
- - The time consumed of chunk metadata modification(99%): P99 time consumption for node query and modification of Chunk metadata
-- Filter According To Chunk Metadata
- - The time consumed of chunk metadata filter(avg): The average time spent on node queries filtering by Chunk metadata
- - The time consumed of chunk metadata filter(50%): Median filtering time for node queries based on Chunk metadata
- - The time consumed of chunk metadata filter(99%): P99 time consumption for node query filtering based on Chunk metadata
-- Constructing Chunk Reader
- - The time consumed of construct chunk reader(avg): The average time spent on constructing Chunk Reader for node queries
- - The time consumed of construct chunk reader(50%): Median time spent on constructing Chunk Reader for node queries
- - The time consumed of construct chunk reader(99%): P99 time consumption for constructing Chunk Reader for node queries
+ - The time consumed of chunk metadata modification(avg): Average time consumed of modifying chunk metadata by the node
+ - The time consumed of chunk metadata modification(50%): Median time consumed of modifying chunk metadata by the node
+ - The time consumed of chunk metadata modification(99%): P99 time consumed of modifying chunk metadata by the node
+- Filter by Chunk Metadata
+ - **The time consumed of chunk metadata filter (avg):** The average time consumed for filtering by chunk metadata.
+ - **The time consumed of chunk metadata filter (50%):** The median time consumed for filtering by chunk metadata.
+ - **The time consumed of chunk metadata filter (99%):** The P99 time consumed for filtering by chunk metadata.
+- Construct Chunk Reader
+ - **The time consumed of construct chunk reader (avg):** The average time consumed for constructing a Chunk Reader.
+ - **The time consumed of construct chunk reader (50%):** The median time consumed for constructing a Chunk Reader.
+ - **The time consumed of construct chunk reader (99%):** The P99 time consumed for constructing a Chunk Reader.
- Read Chunk
- - The time consumed of read chunk(avg): The average time taken for node queries to read Chunks
- - The time consumed of read chunk(50%): Median time spent querying nodes to read Chunks
- - The time consumed of read chunk(99%): P99 time spent on querying and reading Chunks for nodes
+ - **The time consumed of read chunk (avg):** The average time consumed for reading a Chunk.
+ - **The time consumed of read chunk (50%):** The median time consumed for reading a Chunk.
+ - **The time consumed of read chunk (99%):** The P99 time consumed for reading a Chunk.
- Initialize Chunk Reader
- - The time consumed of init chunk reader(avg): The average time spent initializing Chunk Reader for node queries
- - The time consumed of init chunk reader(50%): Median time spent initializing Chunk Reader for node queries
- - The time consumed of init chunk reader(99%):P99 time spent initializing Chunk Reader for node queries
-- Constructing TsBlock Through Page Reader
- - The time consumed of build tsblock from page reader(avg): The average time it takes for node queries to construct TsBlock through Page Reader
- - The time consumed of build tsblock from page reader(50%): The median time spent on constructing TsBlock through Page Reader for node queries
- - The time consumed of build tsblock from page reader(99%):Node query using Page Reader to construct TsBlock time-consuming P99
-- Query the construction of TsBlock through Merge Reader
- - The time consumed of build tsblock from merge reader(avg): The average time taken for node queries to construct TsBlock through Merge Reader
- - The time consumed of build tsblock from merge reader(50%): The median time spent on constructing TsBlock through Merge Reader for node queries
- - The time consumed of build tsblock from merge reader(99%): Node query using Merge Reader to construct TsBlock time-consuming P99
-
-#### Query Data Exchange
-
-The data exchange for the query is time-consuming.
-
-- Obtain TsBlock through source handle
- - The time consumed of source handle get tsblock(avg): The average time taken for node queries to obtain TsBlock through source handle
- - The time consumed of source handle get tsblock(50%):Node query obtains the median time spent on TsBlock through source handle
- - The time consumed of source handle get tsblock(99%): Node query obtains TsBlock time P99 through source handle
-- Deserialize TsBlock through source handle
- - The time consumed of source handle deserialize tsblock(avg): The average time taken for node queries to deserialize TsBlock through source handle
- - The time consumed of source handle deserialize tsblock(50%): The median time taken for node queries to deserialize TsBlock through source handle
- - The time consumed of source handle deserialize tsblock(99%): P99 time spent on deserializing TsBlock through source handle for node query
-- Send TsBlock through sink handle
- - The time consumed of sink handle send tsblock(avg): The average time taken for node queries to send TsBlock through sink handle
- - The time consumed of sink handle send tsblock(50%): Node query median time spent sending TsBlock through sink handle
- - The time consumed of sink handle send tsblock(99%): Node query sends TsBlock through sink handle with a time consumption of P99
-- Callback data block event
- - The time consumed of on acknowledge data block event task(avg): The average time taken for node query callback data block event
- - The time consumed of on acknowledge data block event task(50%): Median time spent on node query callback data block event
- - The time consumed of on acknowledge data block event task(99%): P99 time consumption for node query callback data block event
-- Get Data Block Tasks
- - The time consumed of get data block task(avg): The average time taken for node queries to obtain data block tasks
- - The time consumed of get data block task(50%): The median time taken for node queries to obtain data block tasks
- - The time consumed of get data block task(99%): P99 time consumption for node query to obtain data block task
-
-#### Query Related Resource
-
-- MppDataExchangeManager:The number of shuffle sink handles and source handles during node queries
-- LocalExecutionPlanner: The remaining memory that nodes can allocate to query shards
-- FragmentInstanceManager: The query sharding context information and the number of query shards that the node is running
-- Coordinator: The number of queries recorded on the node
-- MemoryPool Size: Node query related memory pool situation
-- MemoryPool Capacity: The size of memory pools related to node queries, including maximum and remaining available values
-- DriverScheduler: Number of queue tasks related to node queries
-
-#### Consensus - IoT Consensus
+ - **The time consumed of init chunk reader (avg):** The average time consumed for initializing a Chunk Reader.
+ - **The time consumed of init chunk reader (50%):** The median time consumed for initializing a Chunk Reader.
+ - **The time consumed of init chunk reader (99%):** The P99 time consumed for initializing a Chunk Reader.
+- Build TsBlock from Page Reader
+ - **The time consumed of build tsblock from page reader (avg):** The average time consumed for building a TsBlock using a Page Reader.
+ - **The time consumed of build tsblock from page reader (50%):** The median time consumed for building a TsBlock using a Page Reader.
+ - **The time consumed of build tsblock from page reader (99%):** The P99 time consumed for building a TsBlock using a Page Reader.
+- Build TsBlock from Merge Reader
+ - **The time consumed of build tsblock from merge reader (avg):** The average time consumed for building a TsBlock using a Merge Reader.
+ - **The time consumed of build tsblock from merge reader (50%):** The median time consumed for building a TsBlock using a Merge Reader.
+ - **The time consumed of build tsblock from merge reader (99%):** The P99 time consumed for building a TsBlock using a Merge Reader.
+
+#### 3.4.8 Query Data Exchange
+
+Time consumed of data exchange in queries.
+
+- Get TsBlock via Source Handle
+ - **The time consumed of source handle get tsblock (avg):** The average time consumed for retrieving a TsBlock using the source handle.
+ - **The time consumed of source handle get tsblock (50%):** The median time consumed for retrieving a TsBlock using the source handle.
+ - **The time consumed of source handle get tsblock (99%):** The P99 time consumed for retrieving a TsBlock using the source handle.
+- Deserialize TsBlock via Source Handle
+ - **The time consumed of source handle deserialize tsblock (avg):** The average time consumed for deserializing a TsBlock via the source handle.
+ - **The time consumed of source handle deserialize tsblock (50%):** The median time consumed for deserializing a TsBlock via the source handle.
+ - **The time consumed of source handle deserialize tsblock (99%):** The P99 time consumed for deserializing a TsBlock via the source handle.
+- Send TsBlock via Sink Handle
+ - **The time consumed of sink handle send tsblock (avg):** The average time consumed for sending a TsBlock via the sink handle.
+ - **The time consumed of sink handle send tsblock (50%):** The median time consumed for sending a TsBlock via the sink handle.
+ - **The time consumed of sink handle send tsblock (99%):** The P99 time consumed for sending a TsBlock via the sink handle.
+- Handle Data Block Event Callback
+ - **The time consumed of handling data block event callback (avg):** The average time consumed for handling the callback of a data block event during query execution.
+ - **The time consumed of handling data block event callback (50%):** The median time consumed for handling the callback of a data block event during query execution.
+ - **The time consumed of handling data block event callback (99%):** The P99 time consumed for handling the callback of a data block event during query execution.
+- Get Data Block Task
+ - **The time consumed of get data block task (avg):** The average time consumed for retrieving a data block task.
+ - **The time consumed of get data block task (50%):** The median time consumed for retrieving a data block task.
+ - **The time consumed of get data block task (99%):** The P99 time consumed for retrieving a data block task.
+
+#### 3.4.9 Query Related Resource
+
+- **MppDataExchangeManager:** The number of shuffle sink handles and source handles during queries.
+- **LocalExecutionPlanner:** The remaining memory available for query fragments.
+- **FragmentInstanceManager:** The context information and count of running query fragments.
+- **Coordinator:** The number of queries recorded on the node.
+- **MemoryPool Size:** The status of the memory pool related to queries.
+- **MemoryPool Capacity:** The size of the query-related memory pool, including the maximum and remaining available capacity.
+- **DriverScheduler:** The number of queued query tasks.
+
+#### 3.4.10 Consensus - IoT Consensus
- Memory Usage
- - IoTConsensus Used Memory: The memory usage of IoT Consumes for nodes, including total memory usage, queue usage, and synchronization usage
-- Synchronization Status Between Nodes
- - IoTConsensus Sync Index: SyncIndex size for different DataRegions of IoT Consumption nodes
- - IoTConsensus Overview:The total synchronization gap and cached request count of IoT consumption for nodes
- - IoTConsensus Search Index Rate: The growth rate of writing SearchIndex for different DataRegions of IoT Consumer nodes
- - IoTConsensus Safe Index Rate: The growth rate of synchronous SafeIndex for different DataRegions of IoT Consumer nodes
- - IoTConsensus LogDispatcher Request Size: The request size for node IoT Consusus to synchronize different DataRegions to other nodes
- - Sync Lag: The size of synchronization gap between different DataRegions in IoT Consumption node
- - Min Peer Sync Lag: The minimum synchronization gap between different DataRegions and different replicas of node IoT Consumption
- - Sync Speed Diff Of Peers: The maximum difference in synchronization from different DataRegions to different replicas for node IoT Consumption
- - IoTConsensus LogEntriesFromWAL Rate: The rate at which nodes IoT Consumus obtain logs from WAL for different DataRegions
- - IoTConsensus LogEntriesFromQueue Rate: The rate at which nodes IoT Consumes different DataRegions retrieve logs from the queue
-- Different Execution Stages Take Time
- - The Time Consumed Of Different Stages (avg): The average time spent on different execution stages of node IoT Consumus
- - The Time Consumed Of Different Stages (50%): The median time spent on different execution stages of node IoT Consusus
- - The Time Consumed Of Different Stages (99%):P99 of the time consumption for different execution stages of node IoT Consusus
-
-#### Consensus - DataRegion Ratis Consensus
-
-- Ratis Stage Time: The time consumption of different stages of node Ratis
-- Write Log Entry: The time consumption of writing logs at different stages of node Ratis
-- Remote / Local Write Time: The time it takes for node Ratis to write locally or remotely
-- Remote / Local Write QPS: QPS written by node Ratis locally or remotely
-- RatisConsensus Memory:Memory usage of node Ratis
-
-#### Consensus - SchemaRegion Ratis Consensus
-
-- Ratis Stage Time: The time consumption of different stages of node Ratis
-- Write Log Entry: The time consumption for writing logs at each stage of node Ratis
-- Remote / Local Write Time: The time it takes for node Ratis to write locally or remotelyThe time it takes for node Ratis to write locally or remotely
-- Remote / Local Write QPS: QPS written by node Ratis locally or remotely
-- RatisConsensus Memory: Node Ratis Memory Usage
\ No newline at end of file
+ - **IoTConsensus Used Memory:** The memory usage of IoT Consensus, including total used memory, queue memory usage, and synchronization memory usage.
+- Synchronization between Nodes
+ - **IoTConsensus Sync Index:** The sync index size of different DataRegions.
+ - **IoTConsensus Overview:** The total synchronization lag and cached request count of IoT Consensus.
+ - **IoTConsensus Search Index Rate:** The growth rate of SearchIndex writes for different DataRegions.
+ - **IoTConsensus Safe Index Rate:** The growth rate of SafeIndex synchronization for different DataRegions.
+ - **IoTConsensus LogDispatcher Request Size:** The size of synchronization requests sent to other nodes for different DataRegions.
+ - **Sync Lag:** The synchronization lag size of different DataRegions.
+ - **Min Peer Sync Lag:** The minimum synchronization lag to different replicas for different DataRegions.
+ - **Sync Speed Diff of Peers:** The maximum synchronization lag to different replicas for different DataRegions.
+ - **IoTConsensus LogEntriesFromWAL Rate:** The rate of retrieving log entries from WAL for different DataRegions.
+ - **IoTConsensus LogEntriesFromQueue Rate:** The rate of retrieving log entries from the queue for different DataRegions.
+- Execution Time of Different Stages
+ - **The Time Consumed of Different Stages (avg):** The average execution time of different stages in IoT Consensus.
+ - **The Time Consumed of Different Stages (50%):** The median execution time of different stages in IoT Consensus.
+ - **The Time Consumed of Different Stages (99%):** The P99 execution time of different stages in IoT Consensus.
+
+#### 3.4.11 Consensus - DataRegion Ratis Consensus
+
+- **Ratis Stage Time:** The execution time of different stages in Ratis.
+- **Write Log Entry:** The execution time for writing logs in Ratis.
+- **Remote / Local Write Time:** The time taken for remote and local writes in Ratis.
+- **Remote / Local Write QPS****:** The QPS for remote and local writes in Ratis.
+- **RatisConsensus Memory:** The memory usage of Ratis consensus.
+
+#### 3.4.12 Consensus - SchemaRegion Ratis Consensus
+
+- **Ratis Stage Time:** The execution time of different stages in Ratis.
+- **Write Log Entry:** The execution time for writing logs in Ratis.
+- **Remote / Local Write Time:** The time taken for remote and local writes in Ratis.
+- **Remote / Local Write QPS****:** The QPS for remote and local writes in Ratis.
+- **RatisConsensus Memory:** The memory usage of Ratis consensus.
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
new file mode 100644
index 000000000..a5dd070ba
--- /dev/null
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
@@ -0,0 +1,192 @@
+
+# Stand-Alone Deployment
+
+This guide introduces how to set up a standalone TimechoDB instance, which includes one ConfigNode and one DataNode (commonly referred to as 1C1D).
+
+## 1 Prerequisites
+
+1. **System Preparation**: Ensure the system has been configured according to the [System Requirements](../Deployment-and-Maintenance/Environment-Requirements.md).
+
+2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Set the hostname by editing the `/etc/hosts` file. For example, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, run:
+
+```Bash
+echo "192.168.1.3 iotdb-1" >> /etc/hosts
+```
+
+Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration.
+
+3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the [Parameter Configuration](#22-parameters-configuration) section.
+
+4. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues.
+
+5. **User Permissions**: Choose one of the following permissions during installation and deployment:
+ - **Root User (Recommended)**: This avoids permission-related issues.
+ - **Non-Root User**:
+ - Use the same user for all operations, including starting, activating, and stopping services.
+ - Avoid using `sudo`, which can cause permission conflicts.
+
+6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the [Monitoring Board Install and Deploy](../Deployment-and-Maintenance/Monitoring-panel-deployment.md).
+
+## 2 Installation Steps
+
+### 2.1 Extract Installation Package
+
+Unzip the installation package and navigate to the directory:
+
+```Plain
+unzip apache-iotdb-{version}-all-bin.zip
+cd apache-iotdb-{version}-all-bin
+```
+
+### 2.2 Parameters Configuration
+
+#### 2.2.1 Memory Configuration
+
+Edit the following files for memory allocation:
+
+- **ConfigNode**: `conf/confignode-env.sh` (or `.bat` for Windows)
+- **DataNode**: `conf/datanode-env.sh` (or `.bat` for Windows)
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------ | :---------------------------------- | :---------- | :-------------- | :---------------------- |
+| MEMORY_SIZE | Total memory allocated for the node | Empty | As needed | Effective after restart |
+
+#### 2.2.2 General Configuration
+
+Set the following parameters in `conf/iotdb-system.properties`. Refer to `conf/iotdb-system.properties.template` for a complete list.
+
+
+**Cluster-Level Parameters**:
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------------ | :-------------------------- | :------------- | :-------------- | :----------------------------------------------------------- |
+| cluster_name | Name of the cluster | defaultCluster | Customizable | If there is no specific requirement, keep the default value. |
+| schema_replication_factor | Number of metadata replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. |
+| data_replication_factor | Number of data replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. |
+
+**ConfigNode Parameters**:
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- |
+| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. |
+| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | This parameter cannot be modified after the first startup. |
+| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | This parameter cannot be modified after the first startup. |
+| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. |
+
+**DataNode Parameters**:
+
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- |
+| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. |
+| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | Effective after restarting the service. |
+| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. |
+| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | This parameter cannot be modified after the first startup. |
+| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | This parameter cannot be modified after the first startup. |
+| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | This parameter cannot be modified after the first startup. |
+| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | This parameter cannot be modified after the first startup. |
+| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. |
+
+### 2.3 Start ConfigNode
+
+Navigate to the `sbin` directory and start ConfigNode:
+
+```Bash
+./sbin/start-confignode.sh -d # The "-d" flag starts the process in the background.
+```
+
+If the startup fails, refer to the [Common Issues](#3-common-issues)。 section below for troubleshooting.
+
+
+
+### 2.4 Start DataNode
+
+Navigate to the `sbin` directory of IoTDB and start the DataNode:
+
+```Bash
+./sbin/start-datanode.sh -d # The "-d" flag starts the process in the background.
+```
+
+### 2.5 Verify Activation
+
+Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated.
+
+
+
+## 3 Common Issues
+
+1. ConfigNode Fails to Start
+
+ 1. Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed.
+ 2. Check the logs for any other errors. If unresolved, contact technical support for assistance.
+ 3. If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps:
+
+ **Clean the Environment**
+
+ 1. Stop all ConfigNode and DataNode processes:
+ ```Bash
+ sbin/stop-standalone.sh
+ ```
+
+ 2. Check for any remaining processes:
+ ```Bash
+ jps
+ # or
+ ps -ef | grep iotdb
+ ```
+
+ 3. If processes remain, terminate them manually:
+ ```Bash
+ kill -9
+
+ #For systems with a single IoTDB instance, you can clean up residual processes with:
+ ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9
+ ```
+
+ 4. Delete the `data` and `logs` directories:
+ ```Bash
+ cd /data/iotdb
+ rm -rf data logs
+ ```
+
+## 4 Appendix
+
+### 4.1 ConfigNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :---------------------------------------------------------- | :------- |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+
+### 4.2 DataNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :----------------------------------------------------------- | :------- |
+| -v | Displays version information. | No |
+| -f | Runs the script in the foreground without backgrounding it. | No |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+| -p | Specifies a file to store the process ID for process management. | No |
+| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No |
+| -g | Prints detailed garbage collection (GC) information. | No |
+| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No |
+| -E | Specifies the file for JVM error logs. | No |
+| -D | Defines system properties in the format `key=value`. | No |
+| -X | Passes `-XX` options directly to the JVM. | No |
+| -h | Displays the help instructions. | No |
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md
index b28301df5..86c70ce67 100644
--- a/src/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md
@@ -20,158 +20,148 @@
-->
# Stand-Alone Deployment
-This chapter will introduce how to start an IoTDB standalone instance, which includes 1 ConfigNode and 1 DataNode (commonly known as 1C1D).
+This guide introduces how to set up a standalone TimechoDB instance, which includes one ConfigNode and one DataNode (commonly referred to as 1C1D).
-## Note
+## 1 Prerequisites
-1. Before installation, ensure that the system is complete by referring to [System Requirements](./Environment-Requirements.md).
+1. **System Preparation**: Ensure the system has been configured according to the [System Requirements](../Deployment-and-Maintenance/Environment-Requirements.md).
- 2. It is recommended to prioritize using 'hostname' for IP configuration during deployment, which can avoid the problem of modifying the host IP in the later stage and causing the database to fail to start. To set the host name, you need to configure/etc/hosts on the target server. For example, if the local IP is 192.168.1.3 and the host name is iotdb-1, you can use the following command to set the server's host name and configure IoTDB's' cn_internal-address' using the host name dn_internal_address、dn_rpc_address。
+2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Set the hostname by editing the `/etc/hosts` file. For example, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, run:
- ```shell
- echo "192.168.1.3 iotdb-1" >> /etc/hosts
- ```
-
- 3. Some parameters cannot be modified after the first startup. Please refer to the "Parameter Configuration" section below for settings.
-
- 4. Whether in linux or windows, ensure that the IoTDB installation path does not contain Spaces and Chinese characters to avoid software exceptions.
-
- 5. Please note that when installing and deploying IoTDB (including activating and using software), it is necessary to use the same user for operations. You can:
-
- - Using root user (recommended): Using root user can avoid issues such as permissions.
- - Using a fixed non root user:
- - Using the same user operation: Ensure that the same user is used for start, activation, stop, and other operations, and do not switch users.
- - Avoid using sudo: Try to avoid using sudo commands as they execute commands with root privileges, which may cause confusion or security issues.
-
- 6. It is recommended to deploy a monitoring panel, which can monitor important operational indicators and keep track of database operation status at any time. The monitoring panel can be obtained by contacting the business department, and the steps for deploying the monitoring panel can be referred to:[Monitoring Board Install and Deploy](./Monitoring-panel-deployment.md).
-
-## Installation Steps
-
-### 1、Unzip the installation package and enter the installation directory
-
-```Plain
-unzip timechodb-{version}-bin.zip
-cd timechodb-{version}-bin
+```Bash
+echo "192.168.1.3 iotdb-1" >> /etc/hosts
```
-### 2、Parameter Configuration
-
-#### Memory Configuration
+Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration.
-- conf/confignode-env.sh(or .bat)
+3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the [Parameter Configuration](#22-parameters-configuration) section.
- | **Configuration** | **Description** | **Default** | **Recommended value** | Note |
- | :---------------: | :----------------------------------------------------------: | :---------: | :----------------------------------------------------------: | :---------------------------------: |
- | MEMORY_SIZE | The total amount of memory that IoTDB ConfigNode nodes can use | empty | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect |
+4. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues.
-- conf/datanode-env.sh(or .bat)
+5. **User Permissions**: Choose one of the following permissions during installation and deployment:
+ - **Root User (Recommended)**: This avoids permission-related issues.
+ - **Non-Root User**:
+ - Use the same user for all operations, including starting, activating, and stopping services.
+ - Avoid using `sudo`, which can cause permission conflicts.
- | **Configuration** | **Description** | **Default** | **Recommended value** | **Note** |
- | :---------------: | :----------------------------------------------------------: | :---------: | :----------------------------------------------------------: | :---------------------------------: |
- | MEMORY_SIZE | The total amount of memory that IoTDB DataNode nodes can use | empty | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect |
+6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the [Monitoring Board Install and Deploy](../Deployment-and-Maintenance/Monitoring-panel-deployment.md).
-#### Function Configuration
+## 2 Installation Steps
-The parameters that actually take effect in the system are in the file conf/iotdb-system.exe. To start, the following parameters need to be set, which can be viewed in the conf/iotdb-system.exe file for all parameters
+### 2.1 Extract Installation Package
-Cluster function configuration
+Unzip the installation package and navigate to the directory:
-| **Configuration** | **Description** | **Default** | **Recommended value** | Note |
-| :-----------------------: | :----------------------------------------------------------: | :------------: | :----------------------------------------------------------: | :---------------------------------------------------: |
-| cluster_name | Cluster Name | defaultCluster | The cluster name can be set as needed, and if there are no special needs, the default can be kept | Cannot be modified after initial startup |
-| schema_replication_factor | Number of metadata replicas, set to 1 for the standalone version here | 1 | 1 | Default 1, cannot be modified after the first startup |
-| data_replication_factor | Number of data replicas, set to 1 for the standalone version here | 1 | 1 | Default 1, cannot be modified after the first startup |
+```Bash
+unzip timechodb-{version}-bin.zip
+cd timechodb-{version}-bin
+```
-ConfigNode Configuration
+### 2.2 Parameters Configuration
-| **Configuration** | **Description** | **Default** | **Recommended value** | Note |
-| :-----------------: | :----------------------------------------------------------: | :-------------: | :----------------------------------------------------------: | :--------------------------------------: |
-| cn_internal_address | The address used by ConfigNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Cannot be modified after initial startup |
-| cn_internal_port | The port used by ConfigNode for communication within the cluster | 10710 | 10710 | Cannot be modified after initial startup |
-| cn_consensus_port | The port used for ConfigNode replica group consensus protocol communication | 10720 | 10720 | Cannot be modified after initial startup |
-| cn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, cn_internal_address:cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | Cannot be modified after initial startup |
+#### 2.2.1 Memory Configuration
-DataNode Configuration
+Edit the following files for memory allocation:
-| **Configuration** | **Description** | **Default** | **Recommended value** | **Note** |
-| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------- |
-| dn_rpc_address | The address of the client RPC service | 0.0.0.0 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Restarting the service takes effect |
-| dn_rpc_port | The port of the client RPC service | 6667 | 6667 | Restarting the service takes effect |
-| dn_internal_address | The address used by DataNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Cannot be modified after initial startup |
-| dn_internal_port | The port used by DataNode for communication within the cluster | 10730 | 10730 | Cannot be modified after initial startup |
-| dn_mpp_data_exchange_port | The port used by DataNode to receive data streams | 10740 | 10740 | Cannot be modified after initial startup |
-| dn_data_region_consensus_port | The port used by DataNode for data replica consensus protocol communication | 10750 | 10750 | Cannot be modified after initial startup |
-| dn_schema_region_consensus_port | The port used by DataNode for metadata replica consensus protocol communication | 10760 | 10760 | Cannot be modified after initial startup |
-| dn_seed_config_node | The ConfigNode address that the node connects to when registering to join the cluster, i.e. cn_internal-address: cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | Cannot be modified after initial startup |
+- **ConfigNode**: `conf/confignode-env.sh` (or `.bat` for Windows)
+- **DataNode**: `conf/datanode-env.sh` (or `.bat` for Windows)
-### 3、Start ConfigNode
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------ | :---------------------------------- | :---------- | :-------------- | :---------------------- |
+| MEMORY_SIZE | Total memory allocated for the node | Empty | As needed | Effective after restart |
-Enter the sbin directory of iotdb and start confignode
+#### 2.2.2 General Configuration
-```shell
+Set the following parameters in `conf/iotdb-system.properties`. Refer to `conf/iotdb-system.properties.template` for a complete list.
-./start-confignode.sh -d #The "- d" parameter will start in the background
+**Cluster-Level Parameters**:
-```
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------------ | :-------------------------- | :------------- | :-------------- | :----------------------------------------------------------- |
+| cluster_name | Name of the cluster | defaultCluster | Customizable | If there is no specific requirement, keep the default value. |
+| schema_replication_factor | Number of metadata replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. |
+| data_replication_factor | Number of data replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. |
-If the startup fails, please refer to [Common Problem](#common-problem).
+**ConfigNode Parameters**:
-### 4、Start DataNode
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- |
+| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. |
+| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | This parameter cannot be modified after the first startup. |
+| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | This parameter cannot be modified after the first startup. |
+| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. |
- Enter the sbin directory of iotdb and start datanode:
+**DataNode Parameters**:
-```shell
+| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** |
+| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- |
+| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. |
+| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | Effective after restarting the service. |
+| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. |
+| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | This parameter cannot be modified after the first startup. |
+| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | This parameter cannot be modified after the first startup. |
+| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | This parameter cannot be modified after the first startup. |
+| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | This parameter cannot be modified after the first startup. |
+| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. |
-cd sbin
+### 2.3 Start ConfigNode
-./start-datanode.sh -d # The "- d" parameter will start in the background
+Navigate to the `sbin` directory and start ConfigNode:
+```Bash
+./sbin/start-confignode.sh -d # The "-d" flag starts the process in the background.
```
-### 5、Activate Database
+If the startup fails, refer to the [Common Issues](#3-common-issues)。 section below for troubleshooting.
-#### Method 1: Activate file copy activation
-- After starting the confignode datanode node, enter the activation folder and copy the systeminfo file to the Timecho staff
-- Received the license file returned by the staff
+### 2.4 Start DataNode
-- Place the license file in the activation folder of the corresponding node;
+Navigate to the `sbin` directory of IoTDB and start the DataNode:
-#### Method 2: Activate Script Activation
+```Bash
+./sbin/start-datanode.sh -d # The "-d" flag starts the process in the background.
+```
-- Retrieve the machine codes of 3 machines in sequence and enter IoTDB CLI
+### 2.5 Activate the Database
- - Table Model CLI Enter Command:
+#### Option 1: File-Based Activation
- ```SQL
- # Linux or MACOS
- ./start-cli.sh -sql_dialect table
-
- # windows
- ./start-cli.bat -sql_dialect table
- ```
+- Start both the ConfigNode and DataNode.
+- Navigate to the `activation` folder and copy the `system_info` file.
+- Send the `system_info` file to the Timecho team.
+- Place the license file provided by the Timecho team into the corresponding `activation` folder for each node.
- - Enter the tree model CLI command:
+#### Option 2: Command-Based Activation
- ```SQL
- # Linux or MACOS
- ./start-cli.sh
-
- # windows
- ./start-cli.bat
- ```
+1. Enter the IoTDB CLI.
+ - **For Table Model**:
+ - ```SQL
+ # For Linux or macOS
+ ./start-cli.sh -sql_dialect table
+
+ # For Windows
+ ./start-cli.bat -sql_dialect table
+ ```
-- Execute the following to obtain the machine code required for activation:
- - Note: Currently, activation is only supported in tree models
-
- ```Bash
+ - **For Tree Model**:
+ - ```SQL
+ # For Linux or macOS
+ ./start-cli.sh
+
+ # For Windows
+ ./start-cli.bat
+ ```
+2. Run the following command to retrieve the machine code required for activation:
- show system info
+```Bash
+show system info
+```
- ```
+**Note**: Activation is currently supported only in the Tree Model.
-- The following information is displayed, which shows the machine code of one machine:
+3. Copy the returned machine code (displayed as a green string) and send it to the Timecho team:
```Bash
+--------------------------------------------------------------+
@@ -183,84 +173,78 @@ Total line number = 1
It costs 0.030s
```
-- Enter the activation code returned by the staff into the CLI and enter the following content
- - Note: The activation code needs to be marked with a `'`symbol before and after, as shown in
+4. Enter the activation code provided by the Timecho team in the CLI using the following format. Wrap the activation code in single quotes ('):
```Bash
IoTDB> activate '01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA==='
```
-### 6、Verify Activation
+### 2.6 Verify Activation
-When the "ClusterActivation Status" field is displayed as Activated, it indicates successful activation
+Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated.

-## Common Problem
-
-1. Multiple prompts indicating activation failure during deployment process
-
- - Use the `ls -al` command: Use the `ls -al` command to check if the owner information of the installation package root directory is the current user.
-
- - Check activation directory: Check all files in the `./activation` directory and whether the owner information is the current user.
-
-2. Confignode failed to start
-
- Step 1: Please check the startup log to see if any parameters that cannot be changed after the first startup have been modified.
-
- Step 2: Please check the startup log for any other abnormalities. If there are any abnormal phenomena in the log, please contact Timecho Technical Support personnel for consultation on solutions.
+## 3 Common Issues
+1. Activation Fails Repeatedly
- Step 3: If it is the first deployment or data can be deleted, you can also clean up the environment according to the following steps, redeploy, and restart.
+ 1. Use the `ls -al` command to verify that the ownership of the installation directory matches the current user.
+ 2. Check the ownership of all files in the `./activation` directory to ensure they belong to the current user.
- Step 4: Clean up the environment:
+2. ConfigNode Fails to Start
- a. Terminate all ConfigNode Node and DataNode processes.
+ 1. Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed.
+ 2. Check the logs for any other errors. If unresolved, contact technical support for assistance.
+ 3. If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps:
- ```Bash
- # 1. Stop the ConfigNode and DataNode services
- sbin/stop-standalone.sh
-
- # 2. Check for any remaining processes
- jps
- # Or
- ps -ef|gerp iotdb
-
- # 3. If there are any remaining processes, manually kill the
- kill -9
- # If you are sure there is only one iotdb on the machine, you can use the following command to clean up residual processes
- ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9
- ```
+ **Clean the Environment**
- b. Delete the data and logs directories.
-
- Explanation: Deleting the data directory is necessary, deleting the logs directory is for clean logs and is not mandatory.
-
- ```Bash
- cd /data/iotdb
- rm -rf data logs
- ```
-
-## Appendix
-
-### Introduction to Configuration Node Parameters
-
-| Parameter | Description | Is it required |
-| :-------- | :---------------------------------------------- | :----------------- |
-| -d | Start in daemon mode, running in the background | No |
-
-### Introduction to Datanode Node Parameters
+ 1. Stop all ConfigNode and DataNode processes:
+ ```Bash
+ sbin/stop-standalone.sh
+ ```
-| Abbreviation | Description | Is it required |
-| :----------- | :----------------------------------------------------------- | :------------- |
-| -v | Show version information | No |
-| -f | Run the script in the foreground, do not put it in the background | No |
-| -d | Start in daemon mode, i.e. run in the background | No |
-| -p | Specify a file to store the process ID for process management | No |
-| -c | Specify the path to the configuration file folder, the script will load the configuration file from here | No |
-| -g | Print detailed garbage collection (GC) information | No |
-| -H | Specify the path of the Java heap dump file, used when JVM memory overflows | No |
-| -E | Specify the path of the JVM error log file | No |
-| -D | Define system properties, in the format key=value | No |
-| -X | Pass -XX parameters directly to the JVM | No |
-| -h | Help instruction | No |
+ 2. Check for any remaining processes:
+ ```Bash
+ jps
+ # or
+ ps -ef | grep iotdb
+ ```
+ 3. If processes remain, terminate them manually:
+ ```Bash
+ kill -9
+
+ #For systems with a single IoTDB instance, you can clean up residual processes with:
+ ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9
+ ```
+
+ 4. Delete the `data` and `logs` directories:
+ ```Bash
+ cd /data/iotdb
+ rm -rf data logs
+ ```
+
+## 4 Appendix
+
+### 4.1 ConfigNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :---------------------------------------------------------- | :------- |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+
+### 4.2 DataNode Parameters
+
+| Parameter | Description | Required |
+| :-------- | :----------------------------------------------------------- | :------- |
+| -v | Displays version information. | No |
+| -f | Runs the script in the foreground without backgrounding it. | No |
+| -d | Starts the process in daemon mode (runs in the background). | No |
+| -p | Specifies a file to store the process ID for process management. | No |
+| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No |
+| -g | Prints detailed garbage collection (GC) information. | No |
+| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No |
+| -E | Specifies the file for JVM error logs. | No |
+| -D | Defines system properties in the format `key=value`. | No |
+| -X | Passes `-XX` options directly to the JVM. | No |
+| -h | Displays the help instructions. | No |
\ No newline at end of file
diff --git a/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_apache.md b/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_apache.md
index b11e592fe..4389a704f 100644
--- a/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_apache.md
+++ b/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_apache.md
@@ -50,7 +50,7 @@ This section will take the IoTDB classic cluster deployment architecture 3C3D (3
1. Prepare the IoTDB database installation package::apache-iotdb-{version}-all-bin.zip(Please refer to the installation package for details:[IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_apache.md))
-2. Configure the operating system environment according to environmental requirements (system environment configuration can be found in:[Environment Requirements](https://iotdb.apache.org/UserGuide/latest/Deployment-and-Maintenance/Environment-Requirements.html))
+2. Configure the operating system environment according to environmental requirements (system environment configuration can be found in:[Environment Requirement](../Deployment-and-Maintenance/Environment-Requirements.md))
## Installation Steps
diff --git a/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_timecho.md b/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
index 25d61324f..bd7d0aee5 100644
--- a/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
+++ b/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_timecho.md
@@ -55,8 +55,9 @@ This guide describes how to manually deploy a cluster instance consisting of 3 C
## Preparation
-1. Obtain the TimechoDB installation package: `timechodb-{version}-bin.zip` following[IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_timecho.md)
-2. Configure the operating system environment according to [Environment Requirement](./Environment-Requirements.md)
+1. Obtain the TimechoDB installation package: `timechodb-{version}-bin.zip` following [IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_timecho.md))
+
+2. Configure the operating system environment according to [Environment Requirement](../Deployment-and-Maintenance/Environment-Requirements.md))
## Installation Steps
diff --git a/src/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_apache.md
index aab760b7b..45aeedd4e 100644
--- a/src/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_apache.md
+++ b/src/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_apache.md
@@ -18,25 +18,30 @@
under the License.
-->
-# Package Acquisition
+# Obtain TimechoDB
+
+## 1 How to obtain TimechoDB
-## How to obtain installation packages
The installation package can be directly obtained from the Apache IoTDB official website:https://iotdb.apache.org/Download/
-## Installation Package Structure
+
+## 2 Installation Package Structure
+
+
Install the package after decompression(`apache-iotdb--all-bin.zip`),After decompressing the installation package, the directory structure is as follows:
-| **catalogue** | **Type** | **Explanation** |
-| :--------------: | :------: | :----------------------------------------------------------: |
-| conf | folder | Configuration file directory, including configuration files such as ConfigNode, DataNode, JMX, and logback |
-| data | folder | The default data file directory contains data files for ConfigNode and DataNode. (The directory will only be generated after starting the program) |
-| lib | folder | IoTDB executable library file directory |
-| licenses | folder | Open source community certificate file directory |
-| logs | folder | The default log file directory, which includes log files for ConfigNode and DataNode (this directory will only be generated after starting the program) |
-| sbin | folder | Main script directory, including start, stop, and other scripts |
-| tools | folder | Directory of System Peripheral Tools |
-| ext | folder | Related files for pipe, trigger, and UDF plugins (created by the user when needed) |
-| LICENSE | file | certificate |
-| NOTICE | file | Tip |
-| README_ZH\.md | file | Explanation of the Chinese version in Markdown format |
-| README\.md | file | Instructions for use |
-| RELEASE_NOTES\.md | file | Version Description |
\ No newline at end of file
+
+| **Catologue** | **Type** | **Description** |
+| :--------------- | :------- | :----------------------------------------------------------- |
+| conf | Folder | Configuration files directory, containing ConfigNode, DataNode, JMX, and logback configuration files. |
+| data | Folder | Default data file directory, containing data files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| lib | Folder | Library files directory. |
+| licenses | Folder | Directory for open-source license certificates. |
+| logs | Folder | Default log file directory, containing log files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| sbin | Folder | Main scripts directory, containing scripts for starting, stopping, and managing the database. |
+| tools | Folder | Tools directory. |
+| ext | Folder | Directory for pipe, trigger, and UDF plugin-related files. |
+| LICENSE | File | Open-source license file. |
+| NOTICE | File | Open-source notice file. |
+| README_ZH.md | File | User manual (Chinese version). |
+| README.md | File | User manual (English version). |
+| RELEASE_NOTES.md | File | Release notes. |
\ No newline at end of file
diff --git a/src/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_timecho.md b/src/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_timecho.md
index 86e0af2aa..261c8a10f 100644
--- a/src/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_timecho.md
+++ b/src/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_timecho.md
@@ -19,24 +19,28 @@
-->
# Obtain TimechoDB
-## How to obtain TimechoDB
-The enterprise version installation package can be obtained through product trial application or by directly contacting the business personnel who are in contact with you.
-## Installation Package Structure
-Install the package after decompression(iotdb-enterprise-{version}-bin.zip),The directory structure after unpacking the installation package is as follows:
-| **catalogue** | **Type** | **Explanation** |
-| :--------------: | -------- | ------------------------------------------------------------ |
-| activation | folder | The directory where the activation file is located, including the generated machine code and the enterprise version activation code obtained from the business side (this directory will only be generated after starting ConfigNode to obtain the activation code) |
-| conf | folder | Configuration file directory, including configuration files such as ConfigNode, DataNode, JMX, and logback |
-| data | folder | The default data file directory contains data files for ConfigNode and DataNode. (The directory will only be generated after starting the program) |
-| lib | folder | IoTDB executable library file directory |
-| licenses | folder | Open source community certificate file directory |
-| logs | folder | The default log file directory, which includes log files for ConfigNode and DataNode (this directory will only be generated after starting the program) |
-| sbin | folder | Main script directory, including start, stop, and other scripts |
-| tools | folder | Directory of System Peripheral Tools |
-| ext | folder | Related files for pipe, trigger, and UDF plugins (created by the user when needed) |
-| LICENSE | file | certificate |
-| NOTICE | file | Tip |
-| README_ZH\.md | file | Explanation of the Chinese version in Markdown format |
-| README\.md | file | Instructions for use |
-| RELEASE_NOTES\.md | file | Version Description |
+## 1 How to obtain TimechoDB
+
+The TimechoDB installation package can be obtained through product trial application or by directly contacting the Timecho team.
+
+## 2 Installation Package Structure
+
+After unpacking the installation package(`iotdb-enterprise-{version}-bin.zip`),you will see the directory structure is as follows:
+
+| **Catologue** | **Type** | **Description** |
+| :--------------- | :------- | :----------------------------------------------------------- |
+| activation | Folder | Directory for activation files, including the generated machine code and the TimechoDB activation code obtained from Timecho staff. *(This directory is generated after starting the ConfigNode, enabling you to obtain the activation code.)* |
+| conf | Folder | Configuration files directory, containing ConfigNode, DataNode, JMX, and logback configuration files. |
+| data | Folder | Default data file directory, containing data files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| lib | Folder | Library files directory. |
+| licenses | Folder | Directory for open-source license certificates. |
+| logs | Folder | Default log file directory, containing log files for ConfigNode and DataNode. *(This directory is generated after starting the program.)* |
+| sbin | Folder | Main scripts directory, containing scripts for starting, stopping, and managing the database. |
+| tools | Folder | Tools directory. |
+| ext | Folder | Directory for pipe, trigger, and UDF plugin-related files. |
+| LICENSE | File | Open-source license file. |
+| NOTICE | File | Open-source notice file. |
+| README_ZH.md | File | User manual (Chinese version). |
+| README.md | File | User manual (English version). |
+| RELEASE_NOTES.md | File | Release notes. |
\ No newline at end of file
diff --git a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
index 12f56432b..6764698b6 100644
--- a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
+++ b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
@@ -54,8 +54,8 @@
## 2 准备步骤
-1. 准备IoTDB数据库安装包 :timechodb-{version}-bin.zip(安装包获取见:[链接](./IoTDB-Package_timecho.md))
-2. 按环境要求配置好操作系统环境(系统环境配置见:[链接](./Environment-Requirements.md))
+1. 准备IoTDB数据库安装包 :apache-iotdb-{version}-all-bin.zip(安装包获取见:[链接](../Deployment-and-Maintenance/IoTDB-Package_apache.md))
+2. 按环境要求配置好操作系统环境(系统环境配置见:[链接](../Deployment-and-Maintenance/Environment-Requirements.md))
## 3 安装步骤
@@ -82,8 +82,8 @@ echo "11.101.17.226 iotdb-3" >> /etc/hosts
解压安装包并进入安装目录
```shell
-unzip timechodb-{version}-bin.zip
-cd timechodb-{version}-bin
+unzip apache-iotdb-{version}-all-bin.zip
+cd apache-iotdb-{version}-all-bin
```
#### 3.2.1 环境脚本配置
@@ -270,10 +270,7 @@ sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port]
## 5 常见问题
-1. 部署过程中多次提示激活失败
- - 使用 `ls -al` 命令:使用 `ls -al` 命令检查安装包根目录的所有者信息是否为当前用户。
- - 检查激活目录:检查 `./activation` 目录下的所有文件,所有者信息是否为当前用户。
-2. Confignode节点启动失败
+1. Confignode节点启动失败
- 步骤 1: 请查看启动日志,检查是否修改了某些首次启动后不可改的参数。
- 步骤 2: 请查看启动日志,检查是否出现其他异常。日志中若存在异常现象,请联系天谋技术支持人员咨询解决方案。
- 步骤 3: 如果是首次部署或者数据可删除,也可按下述步骤清理环境,重新部署后,再次启动。
diff --git a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
index 80e7cb01b..9e3270818 100644
--- a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
+++ b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
@@ -31,14 +31,14 @@
| ---------------- | -------- | ------------------------------------------------------------ |
| conf | 文件夹 | 配置文件目录,包含 ConfigNode、DataNode、JMX 和 logback 等配置文件 |
| data | 文件夹 | 默认的数据文件目录,包含 ConfigNode 和 DataNode 的数据文件。(启动程序后才会生成该目录) |
-| lib | 文件夹 | IoTDB可执行库文件目录 |
-| licenses | 文件夹 | 开源社区证书文件目录 |
+| lib | 文件夹 | 库文件目录 |
+| licenses | 文件夹 | 开源协议证书文件目录 |
| logs | 文件夹 | 默认的日志文件目录,包含 ConfigNode 和 DataNode 的日志文件(启动程序后才会生成该目录) |
-| sbin | 文件夹 | 主要脚本目录,包含启、停等脚本等 |
-| tools | 文件夹 | 系统周边工具目录 |
-| ext | 文件夹 | pipe,trigger,udf插件的相关文件(需要使用时用户自行创建) |
-| LICENSE | 文件 | 证书 |
-| NOTICE | 文件 | 提示 |
-| README_ZH\.md | 文件 | markdown格式的中文版说明 |
-| README\.md | 文件 | 使用说明 |
-| RELEASE_NOTES\.md | 文件 | 版本说明 |
\ No newline at end of file
+| sbin | 文件夹 | 主要脚本目录,包含数据库启、停等脚本 |
+| tools | 文件夹 | 工具目录 |
+| ext | 文件夹 | pipe,trigger,udf插件的相关文件 |
+| LICENSE | 文件 | 开源许可证文件 |
+| NOTICE | 文件 | 开源声明文件 |
+| README_ZH.md | 文件 | 使用说明(中文版) |
+| README.md | 文件 | 使用说明(英文版) |
+| RELEASE_NOTES.md | 文件 | 版本说明 |
diff --git a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
index 9ac555adc..0a248dff4 100644
--- a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
+++ b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
@@ -45,8 +45,8 @@
### 2.1 解压安装包并进入安装目录
```Plain
-unzip timechodb-{version}-bin.zip
-cd timechodb-{version}-bin
+unzip apache-iotdb-{version}-all-bin.zip
+cd apache-iotdb-{version}-all-bin
```
### 2.2 参数配置
@@ -124,10 +124,7 @@ DataNode 配置
## 3 常见问题
-1. 部署过程中多次提示激活失败
- - 使用 `ls -al` 命令:使用 `ls -al` 命令检查安装包根目录的所有者信息是否为当前用户。
- - 检查激活目录:检查 `./activation` 目录下的所有文件,所有者信息是否为当前用户。
-2. Confignode节点启动失败
+1. Confignode节点启动失败
- 步骤 1: 请查看启动日志,检查是否修改了某些首次启动后不可改的参数。
- 步骤 2: 请查看启动日志,检查是否出现其他异常。日志中若存在异常现象,请联系天谋技术支持人员咨询解决方案。
- 步骤 3: 如果是首次部署或者数据可删除,也可按下述步骤清理环境,重新部署后,再次启动。
diff --git a/src/zh/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/zh/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md
index 80e7cb01b..9e3270818 100644
--- a/src/zh/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md
+++ b/src/zh/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md
@@ -31,14 +31,14 @@
| ---------------- | -------- | ------------------------------------------------------------ |
| conf | 文件夹 | 配置文件目录,包含 ConfigNode、DataNode、JMX 和 logback 等配置文件 |
| data | 文件夹 | 默认的数据文件目录,包含 ConfigNode 和 DataNode 的数据文件。(启动程序后才会生成该目录) |
-| lib | 文件夹 | IoTDB可执行库文件目录 |
-| licenses | 文件夹 | 开源社区证书文件目录 |
+| lib | 文件夹 | 库文件目录 |
+| licenses | 文件夹 | 开源协议证书文件目录 |
| logs | 文件夹 | 默认的日志文件目录,包含 ConfigNode 和 DataNode 的日志文件(启动程序后才会生成该目录) |
-| sbin | 文件夹 | 主要脚本目录,包含启、停等脚本等 |
-| tools | 文件夹 | 系统周边工具目录 |
-| ext | 文件夹 | pipe,trigger,udf插件的相关文件(需要使用时用户自行创建) |
-| LICENSE | 文件 | 证书 |
-| NOTICE | 文件 | 提示 |
-| README_ZH\.md | 文件 | markdown格式的中文版说明 |
-| README\.md | 文件 | 使用说明 |
-| RELEASE_NOTES\.md | 文件 | 版本说明 |
\ No newline at end of file
+| sbin | 文件夹 | 主要脚本目录,包含数据库启、停等脚本 |
+| tools | 文件夹 | 工具目录 |
+| ext | 文件夹 | pipe,trigger,udf插件的相关文件 |
+| LICENSE | 文件 | 开源许可证文件 |
+| NOTICE | 文件 | 开源声明文件 |
+| README_ZH.md | 文件 | 使用说明(中文版) |
+| README.md | 文件 | 使用说明(英文版) |
+| RELEASE_NOTES.md | 文件 | 版本说明 |
diff --git a/src/zh/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md b/src/zh/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md
index f824da365..0fd789513 100644
--- a/src/zh/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md
+++ b/src/zh/UserGuide/Master/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md
@@ -30,17 +30,17 @@
| **目录** | **类型** | **说明** |
| ---------------- | -------- | ------------------------------------------------------------ |
-| activation | 文件夹 | 激活文件所在目录,包括生成的机器码以及从商务侧获取的企业版激活码(启动ConfigNode后才会生成该目录,即可获取激活码) |
+| activation | 文件夹 | 激活文件所在目录,包括生成的机器码以及从天谋工作人员获取的企业版激活码(启动ConfigNode后才会生成该目录,即可获取激活码) |
| conf | 文件夹 | 配置文件目录,包含 ConfigNode、DataNode、JMX 和 logback 等配置文件 |
| data | 文件夹 | 默认的数据文件目录,包含 ConfigNode 和 DataNode 的数据文件。(启动程序后才会生成该目录) |
-| lib | 文件夹 | IoTDB可执行库文件目录 |
-| licenses | 文件夹 | 开源社区证书文件目录 |
+| lib | 文件夹 | 库文件目录 |
+| licenses | 文件夹 | 开源协议证书文件目录 |
| logs | 文件夹 | 默认的日志文件目录,包含 ConfigNode 和 DataNode 的日志文件(启动程序后才会生成该目录) |
-| sbin | 文件夹 | 主要脚本目录,包含启、停等脚本等 |
-| tools | 文件夹 | 系统周边工具目录 |
-| ext | 文件夹 | pipe,trigger,udf插件的相关文件(需要使用时用户自行创建) |
-| LICENSE | 文件 | 证书 |
-| NOTICE | 文件 | 提示 |
-| README_ZH\.md | 文件 | markdown格式的中文版说明 |
-| README\.md | 文件 | 使用说明 |
-| RELEASE_NOTES\.md | 文件 | 版本说明 |
+| sbin | 文件夹 | 主要脚本目录,包含数据库启、停等脚本 |
+| tools | 文件夹 | 工具目录 |
+| ext | 文件夹 | pipe,trigger,udf插件的相关文件 |
+| LICENSE | 文件 | 开源许可证文件 |
+| NOTICE | 文件 | 开源声明文件 |
+| README_ZH.md | 文件 | 使用说明(中文版) |
+| README.md | 文件 | 使用说明(英文版) |
+| RELEASE_NOTES.md | 文件 | 版本说明 |
diff --git a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
index bcd80510b..6764698b6 100644
--- a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
+++ b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md
@@ -54,8 +54,8 @@
## 2 准备步骤
-1. 准备IoTDB数据库安装包 :timechodb-{version}-bin.zip(安装包获取见:[链接](./IoTDB-Package_apache.md))
-2. 按环境要求配置好操作系统环境(系统环境配置见:[链接](./Environment-Requirements.md))
+1. 准备IoTDB数据库安装包 :apache-iotdb-{version}-all-bin.zip(安装包获取见:[链接](../Deployment-and-Maintenance/IoTDB-Package_apache.md))
+2. 按环境要求配置好操作系统环境(系统环境配置见:[链接](../Deployment-and-Maintenance/Environment-Requirements.md))
## 3 安装步骤
@@ -82,8 +82,8 @@ echo "11.101.17.226 iotdb-3" >> /etc/hosts
解压安装包并进入安装目录
```shell
-unzip timechodb-{version}-bin.zip
-cd timechodb-{version}-bin
+unzip apache-iotdb-{version}-all-bin.zip
+cd apache-iotdb-{version}-all-bin
```
#### 3.2.1 环境脚本配置
@@ -270,10 +270,7 @@ sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port]
## 5 常见问题
-1. 部署过程中多次提示激活失败
- - 使用 `ls -al` 命令:使用 `ls -al` 命令检查安装包根目录的所有者信息是否为当前用户。
- - 检查激活目录:检查 `./activation` 目录下的所有文件,所有者信息是否为当前用户。
-2. Confignode节点启动失败
+1. Confignode节点启动失败
- 步骤 1: 请查看启动日志,检查是否修改了某些首次启动后不可改的参数。
- 步骤 2: 请查看启动日志,检查是否出现其他异常。日志中若存在异常现象,请联系天谋技术支持人员咨询解决方案。
- 步骤 3: 如果是首次部署或者数据可删除,也可按下述步骤清理环境,重新部署后,再次启动。
diff --git a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
index 80e7cb01b..9e3270818 100644
--- a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
+++ b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md
@@ -31,14 +31,14 @@
| ---------------- | -------- | ------------------------------------------------------------ |
| conf | 文件夹 | 配置文件目录,包含 ConfigNode、DataNode、JMX 和 logback 等配置文件 |
| data | 文件夹 | 默认的数据文件目录,包含 ConfigNode 和 DataNode 的数据文件。(启动程序后才会生成该目录) |
-| lib | 文件夹 | IoTDB可执行库文件目录 |
-| licenses | 文件夹 | 开源社区证书文件目录 |
+| lib | 文件夹 | 库文件目录 |
+| licenses | 文件夹 | 开源协议证书文件目录 |
| logs | 文件夹 | 默认的日志文件目录,包含 ConfigNode 和 DataNode 的日志文件(启动程序后才会生成该目录) |
-| sbin | 文件夹 | 主要脚本目录,包含启、停等脚本等 |
-| tools | 文件夹 | 系统周边工具目录 |
-| ext | 文件夹 | pipe,trigger,udf插件的相关文件(需要使用时用户自行创建) |
-| LICENSE | 文件 | 证书 |
-| NOTICE | 文件 | 提示 |
-| README_ZH\.md | 文件 | markdown格式的中文版说明 |
-| README\.md | 文件 | 使用说明 |
-| RELEASE_NOTES\.md | 文件 | 版本说明 |
\ No newline at end of file
+| sbin | 文件夹 | 主要脚本目录,包含数据库启、停等脚本 |
+| tools | 文件夹 | 工具目录 |
+| ext | 文件夹 | pipe,trigger,udf插件的相关文件 |
+| LICENSE | 文件 | 开源许可证文件 |
+| NOTICE | 文件 | 开源声明文件 |
+| README_ZH.md | 文件 | 使用说明(中文版) |
+| README.md | 文件 | 使用说明(英文版) |
+| RELEASE_NOTES.md | 文件 | 版本说明 |
diff --git a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
index 9ac555adc..0a248dff4 100644
--- a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
+++ b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md
@@ -45,8 +45,8 @@
### 2.1 解压安装包并进入安装目录
```Plain
-unzip timechodb-{version}-bin.zip
-cd timechodb-{version}-bin
+unzip apache-iotdb-{version}-all-bin.zip
+cd apache-iotdb-{version}-all-bin
```
### 2.2 参数配置
@@ -124,10 +124,7 @@ DataNode 配置
## 3 常见问题
-1. 部署过程中多次提示激活失败
- - 使用 `ls -al` 命令:使用 `ls -al` 命令检查安装包根目录的所有者信息是否为当前用户。
- - 检查激活目录:检查 `./activation` 目录下的所有文件,所有者信息是否为当前用户。
-2. Confignode节点启动失败
+1. Confignode节点启动失败
- 步骤 1: 请查看启动日志,检查是否修改了某些首次启动后不可改的参数。
- 步骤 2: 请查看启动日志,检查是否出现其他异常。日志中若存在异常现象,请联系天谋技术支持人员咨询解决方案。
- 步骤 3: 如果是首次部署或者数据可删除,也可按下述步骤清理环境,重新部署后,再次启动。
diff --git a/src/zh/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/zh/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_apache.md
index 80e7cb01b..9e3270818 100644
--- a/src/zh/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_apache.md
+++ b/src/zh/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_apache.md
@@ -31,14 +31,14 @@
| ---------------- | -------- | ------------------------------------------------------------ |
| conf | 文件夹 | 配置文件目录,包含 ConfigNode、DataNode、JMX 和 logback 等配置文件 |
| data | 文件夹 | 默认的数据文件目录,包含 ConfigNode 和 DataNode 的数据文件。(启动程序后才会生成该目录) |
-| lib | 文件夹 | IoTDB可执行库文件目录 |
-| licenses | 文件夹 | 开源社区证书文件目录 |
+| lib | 文件夹 | 库文件目录 |
+| licenses | 文件夹 | 开源协议证书文件目录 |
| logs | 文件夹 | 默认的日志文件目录,包含 ConfigNode 和 DataNode 的日志文件(启动程序后才会生成该目录) |
-| sbin | 文件夹 | 主要脚本目录,包含启、停等脚本等 |
-| tools | 文件夹 | 系统周边工具目录 |
-| ext | 文件夹 | pipe,trigger,udf插件的相关文件(需要使用时用户自行创建) |
-| LICENSE | 文件 | 证书 |
-| NOTICE | 文件 | 提示 |
-| README_ZH\.md | 文件 | markdown格式的中文版说明 |
-| README\.md | 文件 | 使用说明 |
-| RELEASE_NOTES\.md | 文件 | 版本说明 |
\ No newline at end of file
+| sbin | 文件夹 | 主要脚本目录,包含数据库启、停等脚本 |
+| tools | 文件夹 | 工具目录 |
+| ext | 文件夹 | pipe,trigger,udf插件的相关文件 |
+| LICENSE | 文件 | 开源许可证文件 |
+| NOTICE | 文件 | 开源声明文件 |
+| README_ZH.md | 文件 | 使用说明(中文版) |
+| README.md | 文件 | 使用说明(英文版) |
+| RELEASE_NOTES.md | 文件 | 版本说明 |
diff --git a/src/zh/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_timecho.md b/src/zh/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_timecho.md
index f824da365..0fd789513 100644
--- a/src/zh/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_timecho.md
+++ b/src/zh/UserGuide/latest/Deployment-and-Maintenance/IoTDB-Package_timecho.md
@@ -30,17 +30,17 @@
| **目录** | **类型** | **说明** |
| ---------------- | -------- | ------------------------------------------------------------ |
-| activation | 文件夹 | 激活文件所在目录,包括生成的机器码以及从商务侧获取的企业版激活码(启动ConfigNode后才会生成该目录,即可获取激活码) |
+| activation | 文件夹 | 激活文件所在目录,包括生成的机器码以及从天谋工作人员获取的企业版激活码(启动ConfigNode后才会生成该目录,即可获取激活码) |
| conf | 文件夹 | 配置文件目录,包含 ConfigNode、DataNode、JMX 和 logback 等配置文件 |
| data | 文件夹 | 默认的数据文件目录,包含 ConfigNode 和 DataNode 的数据文件。(启动程序后才会生成该目录) |
-| lib | 文件夹 | IoTDB可执行库文件目录 |
-| licenses | 文件夹 | 开源社区证书文件目录 |
+| lib | 文件夹 | 库文件目录 |
+| licenses | 文件夹 | 开源协议证书文件目录 |
| logs | 文件夹 | 默认的日志文件目录,包含 ConfigNode 和 DataNode 的日志文件(启动程序后才会生成该目录) |
-| sbin | 文件夹 | 主要脚本目录,包含启、停等脚本等 |
-| tools | 文件夹 | 系统周边工具目录 |
-| ext | 文件夹 | pipe,trigger,udf插件的相关文件(需要使用时用户自行创建) |
-| LICENSE | 文件 | 证书 |
-| NOTICE | 文件 | 提示 |
-| README_ZH\.md | 文件 | markdown格式的中文版说明 |
-| README\.md | 文件 | 使用说明 |
-| RELEASE_NOTES\.md | 文件 | 版本说明 |
+| sbin | 文件夹 | 主要脚本目录,包含数据库启、停等脚本 |
+| tools | 文件夹 | 工具目录 |
+| ext | 文件夹 | pipe,trigger,udf插件的相关文件 |
+| LICENSE | 文件 | 开源许可证文件 |
+| NOTICE | 文件 | 开源声明文件 |
+| README_ZH.md | 文件 | 使用说明(中文版) |
+| README.md | 文件 | 使用说明(英文版) |
+| RELEASE_NOTES.md | 文件 | 版本说明 |