diff --git a/ecp/directory.json b/ecp/directory.json index 3cc1b71..2731757 100644 --- a/ecp/directory.json +++ b/ecp/directory.json @@ -69,23 +69,31 @@ "path": "edge_service/introduction", "children": [ { - "title": "Docker方式批量安装边缘服务", + "title": "纳管边缘服务", + "path": "edge_service/batch_import" + }, + { + "title": "代理纳管边缘服务", + "path": "edge_service/edge_agent_management" + }, + { + "title": "Docker方式托管边缘服务", "path": "edge_service/batch_install" }, { - "title": "ECP 边缘节点", - "path": "edge_service/docker_node" + "title": "升级边缘服务", + "path": "edge_service/batch_upgrade" }, { - "title": "导入现有边缘服务", - "path": "edge_service/batch_import" + "title": "边缘服务管理运维", + "path": "edge_service/edge_ops" }, { - "title": "代理方式管理边缘服务", - "path": "edge_service/edge_agent_management" + "title": "边缘服务配置管理和下发", + "path": "edge_service/edge_resource_management" }, { - "title": "项目级监控统计", + "title": "边缘服务监控统计", "path": "edge_service/edge_project_statistics" }, { @@ -96,18 +104,6 @@ "title": "标签分组", "path": "edge_service/batch_tag" }, - { - "title": "升级边缘服务", - "path": "edge_service/batch_upgrade" - }, - { - "title": "边缘配置管理和下发", - "path": "edge_service/edge_resource_management" - }, - { - "title": "边缘服务管理运维", - "path": "edge_service/edge_ops" - }, { "title": "云边通道", "path": "edge_service/edge_cloud_tunnel" @@ -261,40 +257,40 @@ "path": "edge_service/introduction", "children": [ { - "title": "Edge Services Batch Installation Docker Mode", - "path": "edge_service/batch_intall" + "title": "Manage Edge Services", + "path": "edge_service/batch_import" }, { - "title": "ECP Edge Node", - "path": "edge_service/docker_node" + "title": "Manage Edge Services by Agent", + "path": "edge_service/edge_agent_management" }, { - "title": "Add Existing Edge Services", - "path": "edge_service/batch_import" + "title": "Host Edge Services By Docker", + "path": "edge_service/batch_intall" }, { - "title": "Project Level Overview", - "path": "edge_service/edge_project_statistics" + "title": "Upgrade Edge Services", + "path": "edge_service/batch_upgrade" }, { - "title": "Authenticate Edge Services", - "path": "edge_service/e2c" + "title": "Edge Service Operations", + "path": "edge_service/edge_ops" }, { - "title": "Tags and Grouping", - "path": "edge_service/batch_tag" + "title": "Edge Service Config Management and Delivery", + "path": "edge_service/edge_resource_management" }, { - "title": "Upgrade Edge Services", - "path": "edge_service/batch_upgrade" + "title": "Edge Service Monitoring", + "path": "edge_service/edge_project_statistics" }, { - "title": "Edge Config Management and Delivery", - "path": "edge_service/edge_resource_management" + "title": "Authenticate Edge Services", + "path": "edge_service/e2c" }, { - "title": "Edge Service Management & Operations", - "path": "edge_service/edge_ops" + "title": "Tags", + "path": "edge_service/batch_tag" }, { "title": "Edge Cloud Tunnel", diff --git a/ecp/en_US/cluster/_assets/cluster-v5-dashboard.png b/ecp/en_US/cluster/_assets/cluster-v5-dashboard.png new file mode 100644 index 0000000..58d9f64 Binary files /dev/null and b/ecp/en_US/cluster/_assets/cluster-v5-dashboard.png differ diff --git a/ecp/en_US/cluster/_assets/cluster-v5.png b/ecp/en_US/cluster/_assets/cluster-v5.png new file mode 100644 index 0000000..5df0614 Binary files /dev/null and b/ecp/en_US/cluster/_assets/cluster-v5.png differ diff --git a/ecp/en_US/cluster/add_manage.md b/ecp/en_US/cluster/add_manage.md index 220b0ba..21395cc 100644 --- a/ecp/en_US/cluster/add_manage.md +++ b/ecp/en_US/cluster/add_manage.md @@ -2,7 +2,7 @@ ECP supports adding clusters by creating (recommended) or adding existing EMQX clusters. It is recommended to add clusters by creating with ECP, which offers more extensive functionality and allows for license and connection quota sharing. -There are functional differences between creating and managing clusters on the ECP platform, as shown in the table below. +There are functional differences between creating (**Hosted Clusters**) and managing clusters (**Managed Clusters**) on the ECP platform, as shown in the table below. |Function|Hosted Clusters|Managed Clusters| |:--------:|:----:|:----:| @@ -35,25 +35,29 @@ The newly-created clusters will be listed in the **Cluster List** panel with the ## Add an Existing Cluster -ECP also provides the capability to manage existing EMQX clusters. To add an existing cluster, you can operate as follows: +ECP also provides the capability to manage existing EMQX clusters. ECP supports the management of EMQX v4 (version 4.4.6 and above) and EMQX v5 (version 5.6.0 and above). + +### EMQX V4 Managed Cluster 1. Log in to the ECP platform as a system admin, organization admin, or project admin. In **Workspace - Cluster** page, click **Add Cluster**. -2. Click to select **Existing Cluster** under **New Cluster**. +2. Click to select **Existing Cluster** under **Cluster Type**. -3. Provide a name for the cluster in the **Cluster Name** field. The name should be between 1-200 characters long and can include "\_" and blank spaces. +3. Select **v4** under **Cluster Version**. -4. Click **Confirm** to finish the adding process. The newly-created clusters will be listed in the **Cluster List** panel with the status **Created**. +4. Provide a name for the cluster in the **Cluster Name** field. The name should be between 1-200 characters long and can include "\_" and blank spaces. -5. Click **Register Node** and a cluster registration guide page will pop up. +5. Click **Confirm** to finish the adding process. The newly-created clusters will be listed in the **Cluster List** panel with the status **Created**. + +6. Click **Register Node** and a cluster registration guide page will pop up. cluster-running -6. Select the CPU architecture in the **CPU Architecture** field, amd64, arm, and arm64 are supported. Follow the steps on this page to finish adding the existing cluster. +7. Select the CPU architecture in the **CPU Architecture** field, amd64, arm, and arm64 are supported. Follow the steps on this page to finish adding the existing cluster. -7. Log in to the virtual machine or container environment hosting the EMQX cluster, such as the container named `emqx-69f4249c-emax-ee-0` in the namespace `emqx-69f4249c`. +8. Log in to the virtual machine or container environment hosting the EMQX cluster, such as the container named `emqx-69f4249c-emax-ee-0` in the namespace `emqx-69f4249c`. ```bash # Check the Pod name @@ -84,6 +88,26 @@ ECP also provides the capability to manage existing EMQX clusters. To add an exi 9. Upon returning to the **ECP Workbench - Cluster** page, you will find that the newly added existing cluster is now in the **Running** status.![](./_assets/cluster-existing.png) +### EMQX V5 Managed Cluster + +1. Log in to the ECP platform as a system admin, organization admin, or project admin. In **Workspace - Cluster** page, click **Add Cluster**. + +2. Click to select **Existing Cluster** under **Cluster Type**. + +3. Select **v5** under **Cluster Version**. + +4. Provide a name for the cluster in the **Cluster Name** field. The name should be between 1-200 characters long and can include "\_" and blank spaces. + +5. Fill in the **Cluster Address** (i.e. EMQX dashboard access address) and dashboard login username and password. + +6. Click **Confirm** to finish the adding process. The newly-created clusters will be listed in the **Cluster List** panel with the status **Created**. + +![cluster-v5](./_assets/cluster-v5.png) + +7. Click the **Details** button in the operation column to enter the cluster details, and then click the **Enter Dashboard** button. You will directly view the EMQX v5 dashboard in a new window. + +![cluster-v5-dashboard](./_assets/cluster-v5-dashboard.png) + ## Cluster Status diff --git a/ecp/en_US/edge_service/_assets/edge-agent-manage-btn.png b/ecp/en_US/edge_service/_assets/edge-agent-manage-btn.png new file mode 100644 index 0000000..0ced85b Binary files /dev/null and b/ecp/en_US/edge_service/_assets/edge-agent-manage-btn.png differ diff --git a/ecp/en_US/edge_service/_assets/edge-agent-manage-list.png b/ecp/en_US/edge_service/_assets/edge-agent-manage-list.png new file mode 100644 index 0000000..7109e1b Binary files /dev/null and b/ecp/en_US/edge_service/_assets/edge-agent-manage-list.png differ diff --git a/ecp/en_US/edge_service/_assets/edge-agent-svc-status.png b/ecp/en_US/edge_service/_assets/edge-agent-svc-status.png new file mode 100644 index 0000000..7bcef3d Binary files /dev/null and b/ecp/en_US/edge_service/_assets/edge-agent-svc-status.png differ diff --git a/ecp/en_US/edge_service/_assets/edge-service-mode.png b/ecp/en_US/edge_service/_assets/edge-service-mode.png new file mode 100644 index 0000000..eb4630a Binary files /dev/null and b/ecp/en_US/edge_service/_assets/edge-service-mode.png differ diff --git a/ecp/en_US/edge_service/_assets/install-neuronex-by-docker.png b/ecp/en_US/edge_service/_assets/install-neuronex-by-docker.png index befc8cb..4e3aeb6 100644 Binary files a/ecp/en_US/edge_service/_assets/install-neuronex-by-docker.png and b/ecp/en_US/edge_service/_assets/install-neuronex-by-docker.png differ diff --git a/ecp/en_US/edge_service/batch_import.md b/ecp/en_US/edge_service/batch_import.md index 3e44eb2..02958f1 100644 --- a/ecp/en_US/edge_service/batch_import.md +++ b/ecp/en_US/edge_service/batch_import.md @@ -1,10 +1,12 @@ -# Add Existing Edge Services +# Managed Edge Services -ECP provides the capability to manage existing edge services. However, these services must first be added to ECP. You can add [an externally created edge service](#add-an-existing-edge-service), or [import them from csv in bulk](#add-existing-edge-services-in-bulk). +ECP supports [Add an existing edge service](#add-an-existing-edge-service), or [Batch Import Existing Edge Services](#batch-import-existing-edge-services) via CSV files. -## Add an Existing Edge Service +## Add an existing service -ECP supports to add a NeuronEX instance in [direct connection mode](#direct-connection-mode). +ECP manages edge services and supports `Managed - Direct Connection` mode and `Managed - Agent` mode. To add an edge service under [Direct Connection Mode](#direct-connection-mode), the ECP and edge service need to be in the same network or VPC. + +![edge-service-mode](_assets/edge-service-mode.png) ### Direct Connection Mode @@ -22,23 +24,12 @@ Direct connection mode generally refers to the scenario where the cluster and al -### Proxy Mode +### Agent Mode -If the edge side and ECP are on separate networks and cannot establish a direct connection, it is necessary to install ECP Agent to establish a proxy connection. +If the ECP cannot directly access the edge service through the IP address, the edge service needs to be added through agent mode. For specific operation steps, please refer to [Manage edge services by Agent](./edge_agent_management.md). -#### Operation Steps -1. Log in as system admin, organization admin, or project admin. -2. Click the **Add Edge Service** button to enter the **Add Edge Service** page. -3. Choose **Add existing service** for the **Add Type**. -4. For **Category**, you can choose **eKuiper**, **Neuron**, **NanoMQ**, or **Customize**. -5. Choose **Proxy** for the **Connection Type**; -6. Give a name to the edge service; it should be 1 - 200 characters and also support "-" and blank spaces. -7. Enter the access address of the edge service. HTTP and HTTPS protocols are supported; -8. Add tags to facilitate future management. -9. Click the **Confirm** button to finish the creation. The newly-added edge service is now displayed in the **Edge Service** section. - -## Add Existing Edge Services in Bulk +## Batch Import Existing Edge Services ECP supports batch importing of existing edge services in CSV file format. @@ -55,13 +46,13 @@ The following table provides an overview of the column names of the .csv file an | Column | Explanation | | --------- | --------------------------------- | -| category | Edge product type | +| category | Edge service type | | name | Edge service name | -| nodeType | Direct/Proxy | +| nodeType | Direct | | endpoint | Edge service address | -| scheme | http/https/MQTT | -| agentID | Edge agent ID, optional | +| scheme | http/https | | tagName | Tag name, optional | -| serviceID | Edge service ID, optional | -| username | Authentication username, optional | -| password | Authentication password, optional | \ No newline at end of file + +:::tip +Batch importing existing edge services only supports the `Direct Connection Mode`. +::: diff --git a/ecp/en_US/edge_service/batch_intall.md b/ecp/en_US/edge_service/batch_intall.md index d2d907a..7f4e75a 100644 --- a/ecp/en_US/edge_service/batch_intall.md +++ b/ecp/en_US/edge_service/batch_intall.md @@ -1,13 +1,14 @@ -# Edge Services Batch Installation Docker Mode +# Host Edge Services By Docker Based on the ECP platform deployed by Docker, if the hardware of the edge service supports the deployment of Docker containers, edge services can be installed in batches on the ECP platform, shortening the installation and deployment time of edge services, and improving deployment efficiency and consistency. ## Prerequisites Before batch installation of edge services, you need to complete the following preparations: -- [Configure Docker environment](#configuration-docker-environment) - -- Add [edge node](./docker_node.md), the edge service will be installed on the edge node +- [Configure Docker Environment](#configuration-docker-environment) +- Configure [Docker Configuration](../system_admin/resource_config.md#docker-configuration) on ECP +- Add [Edge Service Image List](../system_admin/resource_config.md#edge-service-image-list) +- Add [edge node](#edge-node-management), the edge service will be installed on the edge node ### Configuration Docker Environment @@ -16,7 +17,7 @@ edge service is deployed by Docker, so you need to install Docker on the edge no After the installation is complete, you need to open the remote access port of the Docker API. The ECP platform manages the life cycle of the edge service through the Docker API, and supports two modes of Docker API to enable TLS authentication and not enable TLS authentication. -#### Do not enable TLS authentication +#### Not enable TLS authentication 1. Find the docker service configuration file, the default is: `/usr/lib/systemd/system/docker.service`, you can see the location of the file through the `systemctl status docker` command. ![docker_service](./_assets/docker_service.png) @@ -41,7 +42,7 @@ After the installation is complete, you need to open the remote access port of t This certificate is for testing only. Please use a self-signed certificate in a production environment. ::: - 2) Modify the IP address in extfile.cnf to the IP address exposed by the edge node that deploys the Docker Engine service externally. This IP address is also the IP address that needs to be entered in ECP when adding [edge nodes](./docker_node.md). + 2) Modify the IP address in extfile.cnf to the IP address exposed by the edge node that deploys the Docker Engine service externally. This IP address is also the IP address that needs to be entered in ECP when adding [edge nodes](#edge-node-management). ![extfile](./_assets/extfile.png) 3) Execute the gen-docker-cert.sh script to generate the server certificate: server-cert.pem, the default password: `1111`; ![gen-docker-cert](./_assets/gen-docker-cert.png) @@ -63,6 +64,35 @@ After the installation is complete, you need to open the remote access port of t upload the certificate file `ca.pem`, `cert.pem`, `key.pem` to ECP. ![docker_mode2](./_assets/docker_mode2.png) +### Edge Node Management +Through edge node management, you can add, edit, view and delete edge nodes. + +#### Edge Node Registration +Only after the Docker node is registered can the edge service be deployed on the specified Docker node through ECP. + +1. Select **Organization**; **Project**; +2. Click **Edge Management**, select **Edge Node Management**, and click **Create Edge Node**; +3. Enter the name, IP address, and description of the edge node. + +![docker_node_registry](./_assets/docker_node_registry.png) + + +#### Edge Node List Management +1. Select **Organization**; **Project**; +2. Click **Edge Management**, select **Edge Node Management**; +3. Select the node to be managed in the list, you can **edit**, **delete**, **view**, the view button indicates the list of edge services on the node; + +![docker_node_list](./_assets/docker_node_list.png) + + +#### List of edge services on the edge node +After clicking the **View** button on the right of an edge node in the edge node list, you can see the edge service list on the node. +You can see information such as the status of these edge services in the list. + +![docker_node_service_list](./_assets/docker_node_edge_service_list.png) + + + ## Batch Install Edge Services 1. Log in as system admin, organization admin, or project admin. Navigate to Workspace - Edge Service page. diff --git a/ecp/en_US/edge_service/batch_tag.md b/ecp/en_US/edge_service/batch_tag.md index eb1b89a..dc4849e 100644 --- a/ecp/en_US/edge_service/batch_tag.md +++ b/ecp/en_US/edge_service/batch_tag.md @@ -1,4 +1,4 @@ -# Tags and Grouping +# Tags ECP provides a tagging feature to classify, organize, and filter edge services. It allows you to group edge services for efficient management, facilitating operations such as managing edge services and batch deployment of edge configurations. diff --git a/ecp/en_US/edge_service/e2c.md b/ecp/en_US/edge_service/e2c.md index b971a24..1685f53 100644 --- a/ecp/en_US/edge_service/e2c.md +++ b/ecp/en_US/edge_service/e2c.md @@ -14,14 +14,19 @@ If authentication is enabled at the managed NeuronEX side, you can configure as 1. Log in to ECP as system admin, organization admin, or project admin. 2. Download the public key file: On the **Administration** page, navigate to **System Settings** -> **Resource Settings**. Click to expand the **Managed EdgeService Authentication Configuration**, and download the key file. - 2. Then Log in to the container or virtual machine where NeuronEX is located, and upload the downloaded public key file to the `etc` directory in the NeuronEX installation directory. + 3. Then Log in to the container or virtual machine where NeuronEX is located, and upload the downloaded public key file to the `etc` directory in the NeuronEX installation directory. + + Taking the NeuronEX container `neuronex-test` installed by Docker as an example, execute the following command on the machine where NeuronEX is deployed: + + ```bash + $ docker cp my-ecp.pub neuronex-test:/opt/neuronex/etc/my-ecp.pub + ``` + - - ## Test the Authentication Setting -To verify the authentication configuration, try [adding an existing NeuronEX service](./batch_import.md#add-an-existing-edge-service). If the authentication is correctly set up, the Neuron service should be added successfully. You can then click on the service to view its details. +To verify the authentication configuration, try [adding an existing NeuronEX service](./batch_import.md#add-an-existing-edge-service). If the authentication is correctly set up, the NeuronEX service should be added successfully. You can then click on the service to view its details. ## RSA Signature Authentication diff --git a/ecp/en_US/edge_service/edge_agent_management.md b/ecp/en_US/edge_service/edge_agent_management.md index 6bddba4..6f92abe 100644 --- a/ecp/en_US/edge_service/edge_agent_management.md +++ b/ecp/en_US/edge_service/edge_agent_management.md @@ -1 +1,158 @@ -# ECP Edge Agent +# Managed Edge Services by Agent + +If the IP of the edge service NeuronEX is not fixed, or NeuronEX is in its own VPC or LAN, ECP cannot actively obtain the IP information of edge service NeuronEX, and the direct connection mode to import the edge service will no longer be applicable. ECP provides an agent management method to manage and access NeuronEX in the above scenario. + +## Configure Agent Service + +In the agent management mode, the MQTT protocol is used to communicate between ECP and NeuronEX. NeuronEX serves as an MQTT client and connects to ECP's built-in MQTT proxy server NanoMQ. ECP communicates with NeuronEX through the MQTT proxy server to realize the management of NeuronEX. + +The process for NeuronEX agent management to access ECP (without turning on SSL/TLS) is as follows: + +1. [Configure MQTT proxy service on the ECP side](#configure-the-mqtt-proxy-service-on-the-ecp-side) +2. [NeuronEX agent function configuration](#neuronex-agent-function-configuration) +3. [Edge Proxy Management](#edge-proxy-management) +4. [Agent managed edge service](#agent-managed-edge-service) + +### Configure the MQTT proxy service on the ECP side +After the ECP installation is completed, the MQTT proxy service has been started by default and no additional configuration is required. The default port of the MQTT proxy service is 31883, and the IP is the server where the ECP is located. + +### NeuronEX agent function configuration +On the NeuronEX side, click **Administrator** -> **System Configuration**. +- Select to Enable Agent. +- the ECP Service Address is configured as `[IP of the server where the ECP is located]:31883`, and the username and password are `admin` and `public` by default. +- Description will be a brief description of the NeuronEX and displayed in the ECP agent management page. + +For the configuration of the agent management function on the NeuronEX side, please refer to [NeuronEX Agent Configuration](https://docs.emqx.com/en/neuronex/latest/admin/sys-configuration.html#agent-configuration). + +### Edge proxy management +Log in to ECP as the system/organization/project administrator, click **Workspace** -> **Edge Management** to enter the edge service page, click the **Agent Management** button on the right side of the page to open **Edge Agent Management** window. + +![agent-manage-btn](./_assets/edge-agent-manage-btn.png) + +You can view all NeuronEX agents registered to ECP in this window. Agents that have not yet been managed by ECP are displayed as "unmanaged", and the actual online status is also displayed. You can manage or delete this NeuronEX agents. + +![agent-manage-list](./_assets/edge-agent-manage-list.png) + +## Enable SSL/TLS in Agent Service + +The default agent server of ECP uses the TCP protocol for data transmission. If you want to use a more secure transmission method, you can configure it appropriately and enable SSL/TLS. The following will take the example of ECP deployed with Docker using the built-in NanoMQ to explain the configuration steps in detail. You can also refer to the [NanoMQ Docker Deployment Document](https://nanomq.io/docs/en/latest/installation/docker.html) for a more complete introduction. + +1. Prepare the SSL certificate files used by NanoMQ, including the CA file (cacert.pem), the certificate file used by NanoMQ (cert.pem), and the certificate key file used by NanoMQ (key.pem), and save them to the configs/nanomq subdirectory of the installation file directory. + +2. Prepare the SSL certificate files used by ECP, including the CA file (cacert.pem), the certificate file used by ECP (client-cert.pem), and the certificate key file used by NanoMQ (client-key.pem), and save them to the configs/main subdirectory of the installation file directory. + +3. Enter the installation file directory, modify configs/nanomq/nanomq.conf, add an SSL listener, and mainly configure the port and certificate location: + + - Use port 8883 in `bind`. + - `keyfile`, `certfile`, `cacertfile` are the paths where the NanoMQ SSL certificate files are mounted to the container. + + ``` + listeners.ssl { + bind = "0.0.0.0:8883" + keyfile = "/etc/certs/server.key" + certfile = "/etc/certs/server.pem" + cacertfile = "/etc/certs/cacert.pem" + } + ``` + +4. Enter the directory where the installation file is located and modify the mqtt part in the docker-compose.yaml file. The specific content that needs to be modified is as follows: + + - Confirm to use the full version of NanoMQ image in `image`, such as 0.21.2-full. + - Added mapping of SSL port 8883 in `ports`. In the example, it is mapped to port 38883 (port 38883 is used for external access such as NeuronEX, and ECP still uses the network port 8883 in the container) + - Mount the certificate file to the NanoMQ container in `volumes`. Please make sure it is consistent with the path in the container specified in nanomq.conf in the previous step. + - Configure SSL/TLS related environment variables in `environment` + - NANOMQ_TLS_ENABLE is set to true to enable TLS. + - If NANOMQ_TLS_VERIFY_PEER is set to false, it means NanoMQ does not verify the client certificate. If it is set to true, it means that the client certificate needs to be verified. Please set it according to actual needs. + - NANOMQ_TLS_FAIL_IF_NO_PEER_CERT If set to false, NanoMQ allows the client to not send a certificate or to send an empty certificate. If set to true, it means that the client will be refused to connect without a certificate. Please set it according to actual needs. + + +``` + mqtt: + container_name: emqx-ecp-nanomq + image: ${IMAGE_REGISTRY}/${IMAGE_NANOMQ}-full + restart: always + hostname: ecp-nanomq + ports: + - ${MQTT_EXTERNAL_PORT}:1883 + - 38883:8883 + volumes: + - ${ECP_CONFIG_DIR}/nanomq/nanomq.conf:/etc/nanomq.conf + - ${ECP_CONFIG_DIR}/nanomq/cacert.pem:/etc/certs/cacert.pem:ro + - ${ECP_CONFIG_DIR}/nanomq/cert.pem:/etc/certs/cert.pem:ro + - ${ECP_CONFIG_DIR}/nanomq/key.pem:/etc/certs/key.pem:ro + environment: + NANOMQ_TLS_ENABLE: 'true' + NANOMQ_TLS_VERIFY_PEER: 'false' + NANOMQ_TLS_FAIL_IF_NO_PEER_CERT: 'false' + networks: + emqx-ecp-network: + aliases: + - node1 +``` + +5. Modify the main section in the docker-compose.yaml file. The specific content that needs to be changed is as follows: + - Mount the certificate file in `volumes` to the ECP main container. In the example, the certificate files are mounted to the `/bc/certs` directory of the container. + +``` + main: + container_name: emqx-ecp-main + image: ${IMAGE_REGISTRY}/${IMAGE_ECP_MAIN} + restart: always + depends_on: + postgres: + condition: service_healthy + mqtt: + condition: service_started + emqxagentdlproxy: + condition: service_started + environment: + - GIN_MODE=release + - ECP_DEPLOYMENT_MODE=docker + volumes: + - ${ECP_MAIN_VOLUME}:/bc/assets/files + - ${ECP_CONFIG_DIR}/main/main.yaml:/bc/configs/conf.yaml + - ${ECP_CONFIG_DIR}/main/cacert.pem:/bc/certs/cacert.pem:ro + - ${ECP_CONFIG_DIR}/main/client-cert.pem:/bc/certs/client-cert.pem:ro + - ${ECP_CONFIG_DIR}/main/client-key.pem:/bc/certs/client-key.pem:ro + networks: + - emqx-ecp-network +``` + +6. Modify the mqtt section in the ECP configuration file configs/main/main.yaml: + - `useSSL` is set to true to enable TLS. + - The port in `addr` is set to 8883. + - `verifyCertificate` indicates whether to verify the NanoMQ side certificate. Please set it according to actual needs. + - `cacertFile`, `cacertFile`, `cacertFile` are respectively the path in the container to which the ECP certificate file is mounted. Please ensure that it is consistent with the path in the container specified in docker-compose.yaml in the previous step. + +``` +mqtt: + useSSL: true + addr: mqtt:8883 + username: "ecp-mqtt-cloud" + password: "ecp-mqtt-cloud1!" + maxReconnectInterval: 3 + connectTimeout: 8 + cleanSession: true + verifyCertificate: false + cacertFile: "/bc/certs/cacert.pem" + certFile: "/bc/certs/client-cert.pem" + keyFile: "/bc/certs/client-key.pem" +``` + +7. Restart the ECP service. + +```shell +./emqx_ecp_ctl start +``` + +::: tip + +To enable SSL/TLS in the agent management service, the NeuronEX agent configuration also needs to adjust the default port of the MQTT agent service to 38883. At the same time, SSL/TLS needs to be enabled and the corresponding certificate file needs to be filled in. For NeuronEX agent function configuration, please refer to [NeuronEX Agent Configuration](https://docs.emqx.com/en/neuronex/latest/admin/sys-configuration.html#agent-configuration). + +::: + +## Check Agent Service Status + +On the **Administrator** page, click **System Settings** -> **General Settings**, and click to expand the **Agent Setting** section to check whether the agent service NanoMQ is connected normally. If TLS/SSL connection is enabled, the corresponding certificate file can also be exported for use on the NeuronEX side. + +![agent-service-status](./_assets/edge-agent-svc-status.png) \ No newline at end of file diff --git a/ecp/en_US/edge_service/edge_ops.md b/ecp/en_US/edge_service/edge_ops.md index 77492fe..c3effd6 100644 --- a/ecp/en_US/edge_service/edge_ops.md +++ b/ecp/en_US/edge_service/edge_ops.md @@ -1,4 +1,4 @@ -# Edge Service Management & Operations +# Edge Service Operations ECP integrates the user interfaces of NeuronEX edge services, allowing for remote connectivity, configuration, monitoring, and management of individual edge services. Additionally, for deployments on Kubernetes, ECP provides a batch management feature for managing multiple hosted edge service instances collectively, further enhancing the efficiency and convenience of edge service management. diff --git a/ecp/en_US/edge_service/edge_project_statistics.md b/ecp/en_US/edge_service/edge_project_statistics.md index dd5e370..5bc811c 100644 --- a/ecp/en_US/edge_service/edge_project_statistics.md +++ b/ecp/en_US/edge_service/edge_project_statistics.md @@ -1,4 +1,4 @@ -# Project Level Overview +# Edge Service Monitoring After finishing creating the edge service instances or adding existing edge services, you can get a project-level overview of edge services. diff --git a/ecp/en_US/edge_service/edge_resource_management.md b/ecp/en_US/edge_service/edge_resource_management.md index 2cbb556..30e462a 100644 --- a/ecp/en_US/edge_service/edge_resource_management.md +++ b/ecp/en_US/edge_service/edge_resource_management.md @@ -1,4 +1,4 @@ -# Edge Config Management and Delivery +# Edge Service Config Management and Delivery In the edge computing environment, various softwares run on edge devices, each requiring custom configurations to meet specific business requirements. diff --git a/ecp/en_US/edge_service/introduction.md b/ecp/en_US/edge_service/introduction.md index 50bee80..ce89616 100644 --- a/ecp/en_US/edge_service/introduction.md +++ b/ecp/en_US/edge_service/introduction.md @@ -2,9 +2,44 @@ ## Edge Service -Edge computing is a distributed computing paradigm that brings computation and data storage closer to the sources of data, like IoT devices or local edge servers. This approach minimizes latency, reduces bandwidth usage, and enhances data privacy by processing data locally rather than transmitting it to a central data center or cloud. +Edge service NeuronEX can realize data collection, data preprocessing, edge computing and other capabilities. In many industrial scenarios, a large number of edge services need to be deployed to achieve data interconnection, global optimization and agile production. -ECP excels in efficiently managing various aspects of edge services as part of its primary functions. These include seamless deployment, streamlined management, flexible configuration, streamlined batch operations, and optimization for popular edge software such as Neuron, eKuiper, and NanoMQ. +ECP supports the batch creation and management of hundreds of edge service instances in Kubernetes, docker and other environments, and supports edge service configuration management and batch configuration distribution, and accelerate the rapid deployment and implementation of IIOT projects. + +ECP supports the management of edge services through managed and hosted: +- **Managed**: Managed refers to the edge service created and deployed by users. +- **Hosted**: Hosted refers to the edge service created and deployed by the ECP platform. The hosted edge service supports deployment, start, stop, and upgrade by ECP. + +Managed mode, ECP supports two methods: [Managed - Direct Connection](./batch_import.md) and [ Managed - Agent](./edge_agent_management.md). Users can choose the appropriate management method according to the actual scenario. + +Hosting mode, ECP supports [Host Edge Services By Docker](./batch_intall.md) method. + +There are functional differences between managed and hosted edge services, as follows: + +|Function Category| Function Name | Hosted Edge Service | Managed Edge Service | +| :--------------| :-------| :----| :----| +|Edge Services Management|Edge Services NeuronEX Management|✅|✅| +||Edge Service Monitoring|✅|✅| +||Edge service alarm|✅|✅| +||Edge service log management|✅|✅| +||Edge service configuration delivery|✅|✅| +||Tags|✅|✅| +||Edge service installation|✅|❌| +||Edge service upgrade|✅|❌| +||Edge service start and stop control|✅|❌| + +Please refer to the following chapters for the specific content of this chapter: + +- [Managed Edge Services](batch_import) +- [Managed Edge Services By Agent](edge_agent_management) +- [Host Edge Services By Docker](batch_intall) +- [Upgrade Edge Services](batch_upgrade) +- [Edge Service Operations](edge_ops) +- [Edge Service Config Management and Delivery](edge_resource_management) +- [Edge Service Monitoring](edge_project_statistics) +- [Authenticate Edge Services](e2c) +- [Tags](batch_tag) +- [Edge Cloud Tunnel](edge_cloud_tunnel) ## Access Edge Services Workspace @@ -24,16 +59,3 @@ There are some functional differences between ECP deployments based on Kubernete ![edge-list](./_assets/edge-list.png) - -Below are topics that will be covered in this chapter: - -- [Project Level Overview](./edge_project_statistics.md) -- [Install Edge Service in Bulk](./batch_intall) -- [Add Existing Edge Services](./batch_import) -- [Authenticate Edge Services](./e2c) -- [Tags and Grouping](./batch_tag) -- [Upgrade Edge Services](./batch_upgrade) -- [Edge Config Management and Delivery](./edge_resource_management) -- [Edge Service Management & Operations](./edge_ops) - - diff --git a/ecp/en_US/monitor/_assets/alarm-delete.png b/ecp/en_US/monitor/_assets/alarm-delete.png new file mode 100644 index 0000000..95e2f64 Binary files /dev/null and b/ecp/en_US/monitor/_assets/alarm-delete.png differ diff --git a/ecp/en_US/monitor/_assets/custom-alarm.png b/ecp/en_US/monitor/_assets/custom-alarm.png new file mode 100644 index 0000000..8b63da8 Binary files /dev/null and b/ecp/en_US/monitor/_assets/custom-alarm.png differ diff --git a/ecp/en_US/monitor/alarm_rules.md b/ecp/en_US/monitor/alarm_rules.md index 3c81b51..590899e 100644 --- a/ecp/en_US/monitor/alarm_rules.md +++ b/ecp/en_US/monitor/alarm_rules.md @@ -15,6 +15,15 @@ To streamline alarm management, ECP has provided 2 tabs on the **Alarm** page: * You can also use the filtering feature of ECP to filter alarms by type, message, node, level, or time. +### Historical Alarm Delete + +ECP supports cleaning of historical alarms. In the **History Alarms** tab, click the **History Alarm Delete** button to choose to delete historical alarm records by time or number. + +- **Delete by time**: All historical alarm records whose occurrence time exceeds the specified time range will be deleted. +- **Delete by number of items**: Delete the specified number of historical alarm records based on the alarm occurrence time, starting from the earliest. The maximum deletion limit for a single deletion operation is 50,000. + +![delete](./_assets/alarm-delete.png) + ## Basic Alarm Settings Log in as system admins, organization admins, or project admins, navigate to **Workspace** -> **Alarm**, and enter into the **Alarm rules and settings** tab. @@ -115,4 +124,40 @@ Please refer to the table below, which addresses the conditions triggering alarm After an alarm storm occurs, it will be prominently highlighted on the 'Alarms' page. Once you have resolved the system issues causing the alarms, clicking the 'Clear Alarm' button to restore the normal functionality of alarms for the current project. -![alarm-storm](./_assets/alarm-storm.png) \ No newline at end of file +![alarm-storm](./_assets/alarm-storm.png) + + +## Custom Alarm + +If your edge service wants to push other alarm information to the ECP during business processing, it can be achieved by integrating a custom alarm API. Log in to ECP as a system/organization/project administrator. In the **Alarm Rules & Notifications** tab of the **Alarm** page, you can view and copy the API information of the custom alarm, including the request URL and request specified secret. If you need to reset the secret, please regenerate it through the "Refresh" button. + + ![custom-alarm](./_assets/custom-alarm.png) + +### Example + +**POST** {custom alarm URL} + +- Request Header + +``` +X-ECP-Alarm-Token: {Custom Alarm Secret} +Content-Type: application/json +``` + +- Request Content + + - The `message` field must be specified, the type is a string, indicating the specific content of the alarm, which will be displayed in the **Active Alarms/History Alarms** list on the page. + - The `timestamp` field must be specified, the type is a string, indicating the timestamp of the alarm occurrence (in seconds). Alarm messages older than 10 minutes will not be received. + - The value of the `severity` field must be 0 or 1. 0 indicates that the alarm level is **Normal**, 1 indicates that the alarm level is **Critical**. The `severity` field value will affect the notification scope of the alarm. Please refer to the "**Basic Alarm Settings > Notification Scope**" section above. + - The `tag` field is an optional field, the type is string, indicating the tag name. If the `tag` field is specified, the push settings corresponding to the tag name will be used for alarm notification. Please refer to the "**Alarm Notification Settings**" section above. If the `tag` field is not specified or the specified tag name does not exist, the alarm will only be displayed in the **Active Alarms/History Alarms** list on the page and will not be pushed by email or Webhook. + - The `uuid` field is an optional field, type is string, and represents the unique identifier of the alarm. If multiple custom alarms use the same `uuid`, these alarms will be regarded as the same alarm and are subject to the control of silence duration. Please refer to the "**Basic Alarm Settings > Silence**" section above. If the `uuid` field is not specified, ECP will randomly generate a unique identifier for each custom alarm. + +```json +{ + "message": "message details for custom alarm", + "timestamp": "1711433603", + "severity": 1, + "tag": "customTag", + "uuid": "of9MHKAj", +} +``` \ No newline at end of file diff --git a/ecp/en_US/monitor/introduction.md b/ecp/en_US/monitor/introduction.md index a4d11ac..2d50228 100644 --- a/ecp/en_US/monitor/introduction.md +++ b/ecp/en_US/monitor/introduction.md @@ -86,3 +86,15 @@ This chapter covers the following topics: - [Alarms](./alarm_rules) The ECP Unified Alert Platform is a tool for monitoring and managing cloud-edge products. It collects and analyzes data to identify and alert users of system or application issues for prompt resolution. + +- [Alarm List](./rules.md) + + A list of alarms in ECP. + +- [Audit](../system_admin/operation_audit.md) + + ECP's Operation Audit ensures extensive monitoring of crucial user activities in the platform. + +- [API Documentation](https://docs.emqx.com/en/emqx-ecp/latest/api/api-docs.html) + + This is ECP's API documentation. \ No newline at end of file diff --git a/ecp/en_US/monitor/rules.md b/ecp/en_US/monitor/rules.md index 29d4e97..3459a57 100644 --- a/ecp/en_US/monitor/rules.md +++ b/ecp/en_US/monitor/rules.md @@ -1,4 +1,4 @@ -## Alarm List +# Alarm List See below for a list of alarms in ECP. diff --git a/ecp/en_US/system_admin/resource_config.md b/ecp/en_US/system_admin/resource_config.md index 2af27e5..88d5521 100644 --- a/ecp/en_US/system_admin/resource_config.md +++ b/ecp/en_US/system_admin/resource_config.md @@ -1,12 +1,70 @@ # Resource Settings -In **Resource Settings**, system admins can configure the Kubernetes connection, image versions, and storage class, as well as customize the specifications for EMQX clusters and edge services, like Neuron and eKuiper. +Resource settings is the configuration of the resource layer based on the ECP deployed by Docker or Kubernetes, which mainly includes [Basic Config](#basic-config), [Docker Deployment Mode Configuration](#docker-deployment-mode-configuration), Kubernetes Deployment Mode Configuration three categories. -## Configure **Kubernetes Connection** +## Basic Config + +### Image Repository Configuration + +ECP supports connecting to the internal private image registry. To configure the connection to an internal private image registry in ECP, follow these steps: + +1. Log in to ECP as a system admin and navigate to **System Settings** -> **Resource Settings**. +2. Expand the **Image Repository Configuration** panel. +3. Click **Edit** to enter the editing page. +4. Enter the repository URL, and the Email and password to access this repository. +5. Before confirming the setting, click **Test** to test the connection. +6. Click **Save** to complete the configuration. + + + +### Edge Service Image List + +System administrators can add, edit, or delete edge service image list. When there is a public network connection, the administrator can directly configure the public image repository. If you are using a private server image, you can also refer to the [Image Repository Configuration](#image-repository-configuration). + +1. Log in to ECP as a system administrator, and on the system management page, click **System Settings** -> **Resource Settings**. + +2. Click to expand the **Basic Config** section, and click to expand the **Edge Service Image List**. + +3. Click **Edit** under NeuronEX to enter the editing page. + +4. You can click `+` to add a new image and determine whether to set it as the default image. You can also click the delete icon to delete an image. + +5. Click **Confirm** to complete the settings. + +![eKuiper](./_assets/manager-setting-eKuiper.png) + +### Managed edge service authentication configuration + +The public key file automatically generated by ECP during installation can be obtained from **Managed Edge Service Authentication Configuration**, which is used as the secret key when **Managed** NeuronEX turns on authentication. For information on NeuronEX certification, please refer to [Authenticate Edge Services](../edge_service/e2c). + +![rsa](./_assets/manager-setting-rsa.png) + +## Docker Deployment Mode Configuration + +### Docker Configuration + +For edge services created using ECP, you have the option to customize the docker node connect configuration. However, you are recommended to keep the default settings as they are verified by EMQ technical team. Modify them only if necessary. + +1. Log in to ECP as a system admin and navigate to **System Settings** -> **Resource Settings**. +2. Expand the **Docker Mode** -> **Docker Configuration**. +3. Click **Edit** to enter the editing page, where you can modify the settings. + + + + +### Edge Service Setting + +1. Log in to ECP as a system admin and navigate to **System Settings** -> **Resource Settings**. +2. Expand the **Docker Mode** -> **Edge Service Setting**. +3. Click **Edit** to enter the editing page, where you can modify the settings. + +## Kubernetes Deployment Mode Configuration + +### Kubernetes Connection Setting Before ECP can manage, control or authenticate Kubernetes resources, system admins must first finish the Kubernetes Connection settings for ECP. -### Obtain `kuberconfig` File from Kubernetes +#### Obtain `kuberconfig` File from Kubernetes Download the `kubeconfig` from the Kubernetes master cluster, which is usually located in `~/.kube/config` @@ -34,7 +92,7 @@ Then update the `kubeconfig` file To ensure high availability, replace the server address in the kubeconfig file with the Loadbalancer or vip address. ::: -### Upload `kuberconfig` to ECP +#### Upload `kuberconfig` to ECP 1. Log in to ECP as a system admin and navigate to **System Settings** -> **Resource Settings**. 2. Expand the **Create Kubernetes Connect** panel. @@ -47,73 +105,29 @@ Once connected to Kubernetes and with active running clusters, it is recommended ::: -## Configure Image Server - -ECP supports connecting to the internal private image registry. To configure the connection to an internal private image registry in ECP, follow these steps: - -1. Log in to ECP as a system admin and navigate to **System Settings** -> **Resource Settings**. -2. Expand the **Image Pull Secret** panel. -3. Click **Edit** to enter the editing page. -4. Enter the repository URL, and the Email and password to access this repository. -5. Before confirming the setting, click **Test** to test the connection. -6. Click **Save** to complete the configuration. - - - -## Configure Storage Class - -A Storage Class provides a way for administrators to describe the "classes" of storage they offer. In ECP, StorageClass is used to specify the provisioning PVs for storage. - -### Obtain the Storage Class from Kubernetes - -1. Log in to the Kubernetes environment using the command line interface (CLI). - -2. Execute the following command to view the list of storage classes: - - ```bash - kubectl get sc - - NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE - alicloud-disk-ssd diskplugin.csi.alibabacloud.com Delete Immediate true 170d - alicloud-disk-topology diskplugin.csi.alibabacloud.com Delete WaitForFirstConsumer true 170d - local-path rancher.io/local-path Delete WaitForFirstConsumer false 169d - ``` - -### Configure EMQX Storage Class - -1. Log in to ECP as a system admin and navigate to **System Settings** -> **Resource Settings**. -2. Expand the **Cluster Setting** panel, and then the **EMQX Quota List** panel. -3. Click **Edit** to enter the editing page. -4. Enter the **Storage Class Name** obtained from Kubernetes, the desired **Storage Size** in MiB, and indicate whether it should be set as the default Storage Class. - - - -4. Click **Save** to complete the configuration. The changes will take effect immediately. - - -## Configure EMQX Clusters +### EMQX Cluster Setting This is to configure the EMQX cluster image versions, define quota, and annotate load balancing for public cloud clients. -### Configure EMQX Image Versions +#### Configure EMQX Image Versions -System admins can add, edit or delete EMQX image versions. If you have internet connectivity, you can use public image addresses. Additionally, you can also specify a private image registry service. On how to configure a Telegraf image, see [Configure Image List](#configure-image-server). +System admins can add, edit or delete EMQX image versions. If you have internet connectivity, you can use public image addresses. Additionally, you can also specify a private image registry service. On how to configure a Telegraf image, see [Image Repository Configuration](#image-repository-configuration). -1. Log in to ECP as a system admin and navigate to **System Settings** -> **Resource Settings**. -2. Expand the **Cluster Setting** -> **EMQX Image Versions**. +1. Log in to ECP as a system admin and navigate to **System Settings** -> **Resource Settings** -> **Kubernetes Mode**. +2. Expand the **EMQX Cluster Setting** -> **EMQX Image Versions**. 3. Click **Edit** to enter the editing page. 1. To add a new image version, click the "+" icon and provide the image version. You can choose to set it as the default version. To delete an image version, simply click the delete icon. 1. Click **Save** to complete the configuration. -### Configure EMQX Cluster Quota +#### Configure EMQX Cluster Quota For EMQX clusters created using ECP, you have the option to customize the quota according to your specific business requirements. However, you are recommended to keep the default settings as they are verified by EMQ technical team. Modify them only if necessary. -1. Log in to ECP as a system admin and navigate to **System Settings** -> **Resource Settings**. +1. Log in to ECP as a system admin and navigate to **System Settings** -> **Resource Settings** -> **Kubernetes Mode**. -2. Expand the **Cluster Setting** -> **EMQX Quota List**. +2. Expand the **EMQX Cluster Setting** -> **EMQX Quota List**. 3. Click **Edit** to enter the editing page, where you can modify the CPU and memory-related quota for each quota entry, delete an existing quota entry, or add a new quota entry. Note: Deleting a quota entry will not impact the currently running clusters. @@ -138,7 +152,7 @@ Below is the specification for an EMQX cluster with 1 CPU core, 1 GiB of memory, | Telegraf-cpu-request | 100m | Minimum CPU usage limit for Telegraf | | Telegraf-memory-request | 100Mi | Minimum memory usage limit for Telegraf | -### Configure Load Balancing +#### Configure Load Balancing For public cloud clients, you can configure load balancing by adding annotations. @@ -152,58 +166,37 @@ You can configure the annotation feature as follows: \ No newline at end of file diff --git a/ecp/zh_CN/edge_service/edge_project_statistics.md b/ecp/zh_CN/edge_service/edge_project_statistics.md index 517a1e3..95f81fe 100644 --- a/ecp/zh_CN/edge_service/edge_project_statistics.md +++ b/ecp/zh_CN/edge_service/edge_project_statistics.md @@ -1,4 +1,4 @@ -# 项目级监控统计 +# 边缘服务监控统计 完成边缘服务的批量创建/导入后,您可在 ECP 边缘服务页查看项目级别的边缘服务统计信息。 diff --git a/ecp/zh_CN/edge_service/edge_resource_management.md b/ecp/zh_CN/edge_service/edge_resource_management.md index 77e9879..221d9c3 100644 --- a/ecp/zh_CN/edge_service/edge_resource_management.md +++ b/ecp/zh_CN/edge_service/edge_resource_management.md @@ -1,4 +1,4 @@ -# 边缘配置管理和下发 +# 边缘服务配置管理和下发 针对工业场景大规模的设备接入需求,ECP 提供了边缘配置的管理和下发功能,支持将相同的配置信息下发给一个或多个边缘服务,通过该功能可以加快 IIOT 项目的快速部署实施及运维效率。 diff --git a/ecp/zh_CN/edge_service/introduction.md b/ecp/zh_CN/edge_service/introduction.md index 50ea175..2fd5f86 100644 --- a/ecp/zh_CN/edge_service/introduction.md +++ b/ecp/zh_CN/edge_service/introduction.md @@ -3,7 +3,29 @@ ## 边缘服务 边缘服务 NeuronEX 可以实现对工业设备的数据采集接入、数据预处理、边缘计算等能力,在许多工业场景下,需要部署大量的边缘服务,实现数据的互联互通、全局优化以及敏捷生产。 -边缘服务管理是 ECP 平台核心功能之一,ECP 支持在Kubernetes、docker等环境下批量创建并管理数百个边缘服务实例,完成实时数采、边缘计算任务,支持边缘服务配置管理及批量配置下发,加快 IIOT 项目的快速部署实施及项目落地。 +边缘服务管理是 ECP 平台核心功能之一,ECP 支持在 Kubernetes、Docker 等环境下批量创建并管理数百个边缘服务实例,完成实时数采、边缘计算任务,支持边缘服务配置管理及批量配置下发,加快 IIOT 项目的快速部署实施及项目落地。 + +ECP 支持通过纳管与托管的方式管理边缘服务,其中: +- **纳管**:纳管是指由用户自行创建部署的边缘服务 NeuronEX 。 +- **托管**:托管是指由 ECP 平台创建部署的边缘服务 NeuronEX ,托管服务支持由 ECP 部署、启停以及升级。 + +纳管模式,ECP支持[直连纳管](./batch_import.md)与[代理纳管](./edge_agent_management.md)两种方式,用户可以根据实际场景选择合适的纳管方式。 +托管模式,ECP支持[Docker方式托管边缘服务](./batch_install.md)方式。 + +关于托管与纳管的具体差异,请参考[托管与纳管边缘服务的功能差异](#托管与纳管边缘服务的功能差异)。 + +具体内容本章请查阅以下各章节: + +- [纳管边缘服务](batch_import) +- [代理纳管边缘服务](edge_agent_management) +- [Docker方式托管边缘服务](batch_install) +- [升级边缘服务](batch_upgrade) +- [边缘服务管理运维](edge_ops) +- [边缘服务配置管理和下发](edge_resource_management) +- [边缘服务监控统计](edge_project_statistics) +- [边缘服务认证](e2c) +- [标签及分组](batch_tag) +- [云边通道](edge_cloud_tunnel) ## 访问边缘服务页面 @@ -25,7 +47,7 @@ **纳管边缘服务**:纳管边缘服务是指由用户创建部署的边缘服务 NeuronEX,并在 ECP 平台添加纳入管理,称之为纳管边缘服务。 -ECP 支持通过 [Docker](batch_install) 和 Kubernetes的方式,直接由 ECP 来部署边缘服务。托管与纳管的边缘服务在功能上存在差异,具体如下: +ECP 支持通过 [Docker](batch_install) 和 Kubernetes(开发中)的方式,直接由 ECP 来部署边缘服务。托管与纳管的边缘服务在功能上存在差异,具体如下: |功能类别| 功能名称 | 托管边缘服务 | 纳管边缘服务 | | :--------------| :-------| :----| :----| @@ -42,14 +64,7 @@ ECP 支持通过 [Docker](batch_install) 和 Kubernetes的方式,直接由 ECP -本章将主要介绍以下主题: -- [Docker方式批量安装边缘服务](batch_install) -- [ECP 边缘节点](docker_node) -- [导入现有边缘服务](batch_import) -- [项目级监控统计](edge_project_statistics) -- [边缘服务认证](e2c) -- [标签及分组](batch_tag) -- [升级边缘服务](batch_upgrade) -- [边缘配置管理和下发](edge_resource_management) -- [边缘服务管理运维](edge_ops) + + + diff --git a/ecp/zh_CN/edge_service/kubernetes_install.md b/ecp/zh_CN/edge_service/kubernetes_install.md new file mode 100644 index 0000000..3f1df2f --- /dev/null +++ b/ecp/zh_CN/edge_service/kubernetes_install.md @@ -0,0 +1,38 @@ +# Kubernetes方式托管部署边缘服务 + +基于 Kubernetes 方式部署的 ECP 平台,ECP 可在 Kubernetes 平台内 批量安装部署边缘服务,缩短边缘服务的安装与部署时间,提高部署效率和一致性。 + +## 前置条件 + +- 已完成 ECP 平台与 Kubernetes 集群的[连接设置](../system_admin/resource_config.md#配置-kubernetes-连接设置)。 +- 已完成镜像仓库配置,参考[镜像仓库配置](../system_admin/resource_config.md#配置镜像仓库)。 +- 已完成边缘服务容器镜像列表的配置,参考[边缘服务容器镜像列表配置](../system_admin/resource_config.md#配置边缘服务容器镜像列表)。 + + +## 批量安装边缘服务 + +1. 以系统/组织/项目管理员的身份登录,在**工作台**页面,点击左侧导航栏的**边缘服务**。 + +2. 点击**添加边缘服务**按钮,进入添加边缘服务页。 + +3. **添加方式**选择**批量安装新服务**,安装类型选择**Kubernetes**。 + +4. **类型**可以选择 NeuronEX。 + +5. **连接方式**默认为**直连**,不可更改。 + +6. 输入边缘服务的名称前缀,系统会根据名称前缀自动生成唯一的服务名称;1-20 个字符,并支持 "-" 和空格。 + +7. 选择一个或多个**边缘节点**,ECP要会在每一个边缘节点部署一个所选择**类型**的边缘服务实例。 + +8. 对边缘服务的配置参数进行设置,不修改将默认使用全局配置中的参数。 + +9. 选择需要安装边缘服务的镜像。 + +10. [可选]安装 NeuronEX 实例,可选择是否开启认证,具体信息,可查看[边缘服务认证](./e2c.md)。 + +11. [可选] 可以选择为边缘服务实例添加标签,方便后续维护。 + +12. ECP 会根据以上设定自动在页面右侧生成本次安装的信息概览,您可在此进行确认,如信息确认无误,可点击**确认**按钮,进行批量边缘服务的安装。 + +![批量安装边缘服务](./_assets/install-neuronex-by-Kubernetes.png) \ No newline at end of file diff --git a/ecp/zh_CN/monitor/alarm_rules.md b/ecp/zh_CN/monitor/alarm_rules.md index 595f6b4..141f9dd 100644 --- a/ecp/zh_CN/monitor/alarm_rules.md +++ b/ecp/zh_CN/monitor/alarm_rules.md @@ -133,22 +133,22 @@ ECP 支持设置一个或多个告警推送,不同的告警推送通过指定 ### 示例 -*POST* {自定义告警 URL} +**POST** {自定义告警 URL} -请求头部: +- 请求头部: ``` X-ECP-Alarm-Token: {自定义告警 Secret} Content-Type: application/json ``` -请求内容: +- 请求内容: -- `message` 字段必须指定,类型为字符串,表示告警的具体内容,将展示在页面上当前告警/历史告警列表中。 -- `timestamp` 字段必须指定,类型为字符串,表示告警发生的时间戳(以秒为单位)。超过 10 分钟的告警信息将不会被接收。 -- `severity` 字段值需为 0 或 1,0 表示该条告警级别为一般,1表示告警级别为严重,默认值为 0。`severity` 字段值将影响告警的通知范围,请参考上文“**告警基础设置 > 通知范围**”部分。 -- `tag` 字段为可选字段,类型为字符串,表示标签名称。如果指定 `tag` 字段,将使用该标签名称对应的推送设置进行告警推送,请参考上文“**告警推送设置**”部分。如果未指定 `tag` 字段或指定的标签名称不存在,则该告警只显示在页面当前告警/历史告警列表中,不会进行邮件或 Webhook 的推送。 -- `uuid` 字段为可选字段,类型为字符串,表示该条告警的唯一标识。如果多条自定义告警使用了相同的 UUID,则这些告警将被视为同一条告警,受到沉默时效的控制,请参考上文“**告警基础设置 > 通知沉默时效**”部分。如果未指定 `uuid` 字段,ECP 将为每条自定义告警随机生成唯一标识。 + - `message` 字段必须指定,类型为字符串,表示告警的具体内容,将展示在页面上当前告警/历史告警列表中。 + - `timestamp` 字段必须指定,类型为字符串,表示告警发生的时间戳(以秒为单位)。超过 10 分钟的告警信息将不会被接收。 + - `severity` 字段值需为 0 或 1,0 表示该条告警级别为一般,1表示告警级别为严重,默认值为 0。`severity` 字段值将影响告警的通知范围,请参考上文“**告警基础设置 > 通知范围**”部分。 + - `tag` 字段为可选字段,类型为字符串,表示标签名称。如果指定 `tag` 字段,将使用该标签名称对应的推送设置进行告警推送,请参考上文“**告警推送设置**”部分。如果未指定 `tag` 字段或指定的标签名称不存在,则该告警只显示在页面当前告警/历史告警列表中,不会进行邮件或 Webhook 的推送。 + - `uuid` 字段为可选字段,类型为字符串,表示该条告警的唯一标识。如果多条自定义告警使用了相同的 UUID,则这些告警将被视为同一条告警,受到沉默时效的控制,请参考上文“**告警基础设置 > 通知沉默时效**”部分。如果未指定 `uuid` 字段,ECP 将为每条自定义告警随机生成唯一标识。 ```json { diff --git a/ecp/zh_CN/monitor/introduction.md b/ecp/zh_CN/monitor/introduction.md index e5f9a57..588e4e2 100644 --- a/ecp/zh_CN/monitor/introduction.md +++ b/ecp/zh_CN/monitor/introduction.md @@ -94,6 +94,14 @@ ECP 的告警服务,可以自定义 Webhook 通知的模版,当然,您也 ECP 统一告警用于监控和管理云边产品,通过收集和分析各种系统和应用程序的数据,识别并通知用户系统或应用程序中的异常或故障,以便及时处理。 +- [告警列表](./rules.md) + + ECP 的告警列表。 + - [操作审计](../system_admin/operation_audit) - ECP 的操作审计功能会记录下平台中所有用户的关键操作,并对其进行审计和监控。它可以帮助管理员详细了解用户对平台的操作,包括访问记录、操作记录和异常行为等,以及针对这些行为进行分析和监控,及时发现和处理安全问题。 \ No newline at end of file + ECP 的操作审计功能会记录下平台中所有用户的关键操作,并对其进行审计和监控。它可以帮助管理员详细了解用户对平台的操作,包括访问记录、操作记录和异常行为等,以及针对这些行为进行分析和监控,及时发现和处理安全问题。 + +- [API 文档](https://docs.emqx.com/zh/emqx-ecp/latest/api/api-docs.html) + + ECP 的 API 文档。 \ No newline at end of file diff --git a/ecp/zh_CN/system_admin/resource_config.md b/ecp/zh_CN/system_admin/resource_config.md index 9ce0d77..f6ddf82 100644 --- a/ecp/zh_CN/system_admin/resource_config.md +++ b/ecp/zh_CN/system_admin/resource_config.md @@ -1,7 +1,6 @@ # 资源配置 -资源配置是基于 Docker 或 Kubernetes 安装部署的 ECP 对资源层的配置,主要包括 Docker 与 Kubernetes 连接设置、镜像仓库、云端集群和边缘服务的初始化配置工作。 -是基于 Kubernetes 安装部署的 ECP 初始化 Paas 层配置的模块,主要包括 [基础配置](#基础配置)、[Docker部署模式配置](#Docker部署模式配置)、[Kubernetes部署模式配置](#Kubernetes部署模式配置)三大类。 +资源配置是基于 Docker 或 Kubernetes 安装部署的 ECP 对资源层的配置,主要包括 [基础配置](#基础配置)、[Docker部署模式配置](#Docker部署模式配置)、[Kubernetes部署模式配置](#Kubernetes部署模式配置)三大类。 ## 基础配置 @@ -148,7 +147,7 @@ Kubernetes 的连接已建立、并有实际集群运行后,请尽量避免修 #### EMQX 容器镜像列表 -系统管理员可以新增、编辑或删除 EMQX 集群的镜像地址。在有公网连接的情况下,管理员可以直接配置公有镜像地址。如在使用私服镜像,也可以参照[配置镜像服务信息部分](#配置镜像服务信息)。 +系统管理员可以新增、编辑或删除 EMQX 集群的镜像地址。在有公网连接的情况下,管理员可以直接配置公有镜像地址。如在使用私服镜像,也可以参照[镜像服务信息](#镜像服务信息)。 1. 以系统管理员的身份登录 ECP,在系统管理页面,点击**系统设置** -> **资源配置**。 2. 点击展开**云端集群配置**部分,点击展开**EMQX容器镜像列表**。