diff --git a/packages/i18n/CHANGELOG.md b/packages/i18n/CHANGELOG.md index fbeb9d7..44a3521 100644 --- a/packages/i18n/CHANGELOG.md +++ b/packages/i18n/CHANGELOG.md @@ -1,5 +1,11 @@ # @emqx/shared-ui-i18n +## 0.0.23 + +### Patch Changes + +- New fields for IoTDB, Datalayers and Snowflake + ## 0.0.22 ### Patch Changes diff --git a/packages/i18n/lib/enActionsLabel.ts b/packages/i18n/lib/enActionsLabel.ts index 554c831..5ad6b5d 100644 --- a/packages/i18n/lib/enActionsLabel.ts +++ b/packages/i18n/lib/enActionsLabel.ts @@ -34,6 +34,9 @@ export const enActionsLabel: Record> = { partitions_limit: 'Partitions Limit', precision: 'Time Precision', write_syntax: 'Write Syntax', + column_order: 'Column Order', + time_interval: 'Time Interval', + max_records: 'Max Records', }, kafka_producer: { timestamp: 'Message Timestamp', @@ -185,25 +188,28 @@ export const enActionsLabel: Record> = { key: 'Object Key', parameters: 'Upload Mode', type: 'Aggregation Type', - column_order: 'Column Order', - time_interval: 'Time Interval', - max_records: 'Max Records', min_part_size: 'Min Part Size', max_part_size: 'Max Part Size', }, azure_blob_storage: { parameters: 'Upload Mode', - column_order: 'Column Order', - time_interval: 'Time Interval', - max_records: 'Max Records', content: 'Object Content', type: 'Aggregation Type', blob: 'Blob Name', container: 'Container', }, dynamo: { - table: 'Table ', + table: 'Table', hash_key: 'Hash Key', range_key: 'Range Key', }, + snowflake: { + mode: 'Upload Mode', + type: 'Aggregation Type', + private_key: 'Private Key', + schema: 'Schema', + stage: 'Stage', + pipe: 'Pipe', + pipe_user: 'Pipe User', + }, } diff --git a/packages/i18n/lib/enConnectorsLabel.ts b/packages/i18n/lib/enConnectorsLabel.ts index 9be5001..406e92b 100644 --- a/packages/i18n/lib/enConnectorsLabel.ts +++ b/packages/i18n/lib/enConnectorsLabel.ts @@ -175,4 +175,8 @@ export const enConnectorsLabel: Record> = { datalayers: { driver_type: 'Driver Type', }, + snowflake: { + account: 'Account', + dsn: 'Data Source Name(DSN)', + }, } diff --git a/packages/i18n/lib/enIntegrationDesc.ts b/packages/i18n/lib/enIntegrationDesc.ts index 71d1c0b..63190db 100644 --- a/packages/i18n/lib/enIntegrationDesc.ts +++ b/packages/i18n/lib/enIntegrationDesc.ts @@ -37,6 +37,7 @@ export const enIntegrationDesc: Record> = { disable_prepared_statements: 'Disables the usage of prepared statements in the connections. Some endpoints, like PGBouncer or Supabase in Transaction mode, do not support session features such as prepared statements. For such connections, this option should be enabled.', health_check_topic: 'Topic name used exclusively for more accurate health checks.', + max_records: `Number of records (events) allowed per each aggregated object. Each aggregated upload will contain no more than that number of events, but may contain less.
If event rate is high enough, there obviously may be more than one aggregated upload during the same time interval. These uploads will have different, but consecutive sequence numbers, which will be a part of S3 object key.`, }, mqtt: { bridge_mode: @@ -513,16 +514,24 @@ export const enIntegrationDesc: Record> = { key: "The content of the object to be stored. By default, it is in JSON text format containing all fields. Supports placeholder settings such as ${'{'}payload{'}'}. The storage format depends on the format of the variable and can be stored in binary format.", column_order: `Event fields that will be ordered first as columns in the resulting CSV file.
Regardless of this setting, resulting CSV will contain all the fields of aggregated events, but all the columns not explicitly mentioned here will be ordered after the ones listed here in the lexicographical order.`, time_interval: 'Amount of time events will be aggregated in a single object before uploading.', - max_records: `Number of records (events) allowed per each aggregated object. Each aggregated upload will contain no more than that number of events, but may contain less.
If event rate is high enough, there obviously may be more than one aggregated upload during the same time interval. These uploads will have different, but consecutive sequence numbers, which will be a part of S3 object key.`, min_part_size: `The minimum part size for multipart uploads.
Uploaded data will be accumulated in memory until this size is reached.`, max_part_size: `The maximum part size for multipart uploads.
S3 uploader won't try to upload parts larger than this size.`, }, azure_blob_storage: { column_order: `Event fields that will be ordered first as columns in the resulting CSV file.
Regardless of this setting, resulting CSV will contain all the fields of aggregated events, but all the columns not explicitly mentioned here will be ordered after the ones listed here in the lexicographical order.`, time_interval: 'Amount of time events will be aggregated in a single object before uploading.', - max_records: `Number of records (events) allowed per each aggregated object. Each aggregated upload will contain no more than that number of events, but may contain less.
If event rate is high enough, there obviously may be more than one aggregated upload during the same time interval. These uploads will have different, but consecutive sequence numbers, which will be a part of Azure Blob Storage blob name.`, content: 'The content of the object to be uploaded supports placeholders.', blob: 'Azure Blob Storage blob name.', container: 'Azure Blob Storage container name.', }, + snowflake: { + private_key: + 'The private key configured for the Pipe User. This supports the input formats below:\n- Plain key: Enter the private key contents in PEM format directly as a string value.\n- File Path: Specify the path to a file that contains the private key. Ensure the path starts with file://. The file path must be the same on all nodes in the cluster.', + database: 'Name of the Database that contains the Snowflake resources.', + schema: 'Name of the Schema that contains the Snowflake resources.', + stage: "Name of the Stage that'll be used for loading data files into Snowflake.", + pipe: "Name of the Pipe that'll be used to ingest data into the table.", + pipe_user: + 'A username which has a role with permissions over the Pipe to be used. The minimum permissions are `operate` and `monitor`.', + }, } diff --git a/packages/i18n/lib/enSymbolLabel.ts b/packages/i18n/lib/enSymbolLabel.ts index 93f2ce1..47f2efc 100644 --- a/packages/i18n/lib/enSymbolLabel.ts +++ b/packages/i18n/lib/enSymbolLabel.ts @@ -48,4 +48,5 @@ export const enSymbolLabel: Record = { protocol_v2: 'v2', protocol_v3: 'v3', influxdb_v1: 'InfluxDB Line Protocol', + aggregated: 'Aggregated Upload', } diff --git a/packages/i18n/lib/zhActionsLabel.ts b/packages/i18n/lib/zhActionsLabel.ts index c43b58c..94c0f09 100644 --- a/packages/i18n/lib/zhActionsLabel.ts +++ b/packages/i18n/lib/zhActionsLabel.ts @@ -34,6 +34,9 @@ export const zhActionsLabel: Record> = { partitions_limit: '分区限制', precision: '时间精度', write_syntax: '写语句', + column_order: '列排序', + time_interval: '时间间隔', + max_records: '最大记录数', }, kafka_producer: { timestamp: '消息的时间戳', @@ -185,17 +188,11 @@ export const zhActionsLabel: Record> = { key: '对象键', parameters: '上传方式', type: '增强类型', - column_order: '列排序', - time_interval: '时间间隔', - max_records: '最大记录数', min_part_size: '最小分块大小', max_part_size: '最大分块大小', }, azure_blob_storage: { parameters: '上传方式', - column_order: '列排序', - time_interval: '时间间隔', - max_records: '最大记录数', content: '对象内容', type: '增强类型', blob: 'Blob 名称', @@ -206,4 +203,13 @@ export const zhActionsLabel: Record> = { hash_key: '哈希键', range_key: '范围键', }, + snowflake: { + mode: '上传方式', + type: '增强类型', + private_key: '私钥', + schema: '模式', + stage: '存储区', + pipe: '管道', + pipe_user: '管道用户', + }, } diff --git a/packages/i18n/lib/zhConnectorsLabel.ts b/packages/i18n/lib/zhConnectorsLabel.ts index 7f1a36f..2b69bdf 100644 --- a/packages/i18n/lib/zhConnectorsLabel.ts +++ b/packages/i18n/lib/zhConnectorsLabel.ts @@ -175,4 +175,8 @@ export const zhConnectorsLabel: Record> = { datalayers: { driver_type: '驱动类型', }, + snowflake: { + account: '账户', + dsn: '数据源名称', + }, } diff --git a/packages/i18n/lib/zhIntegrationDesc.ts b/packages/i18n/lib/zhIntegrationDesc.ts index 41022d1..493c0d4 100644 --- a/packages/i18n/lib/zhIntegrationDesc.ts +++ b/packages/i18n/lib/zhIntegrationDesc.ts @@ -33,6 +33,7 @@ export const zhIntegrationDesc: Record> = { disable_prepared_statements: '在连接中禁用预处理语句。某些端点(如事务模式下的 PGBouncer 或 Supabase)不支持会话功能(如预处理语句)。对于此类连接,应启用此选项。', health_check_topic: '专用于精确检查健康状态的主题名称。', + max_records: `每个聚合对象允许的记录(事件)数量。每次聚合上传包含的事件数量不会超过此数值,但可能会更少。
如果事件速率足够高,在同一时间间隔内显然可能会有多个聚合上传。这些上传将具有不同但连续的序列号,这些序列号将是 S3 对象键的一部分。`, }, mqtt: { bridge_mode: @@ -463,17 +464,24 @@ export const zhIntegrationDesc: Record> = { key: "要存储的对象的键。支持如 ${'{'}var{'}'} 的占位符设置。", column_order: `在生成的 CSV 文件中首先按列排序的事件字段。
无论此设置如何,生成的 CSV 都将包含聚合事件的所有字段,但此处未明确提及的所有列将按字典顺序排在这里列出的字段之后。`, time_interval: '在上传前将事件聚合到单个对象中的时间量。', - max_records: `每个聚合对象允许的记录(事件)数量。每次聚合上传包含的事件数量不会超过此数值,但可能会更少。
如果事件速率足够高,在同一时间间隔内显然可能会有多个聚合上传。这些上传将具有不同但连续的序列号,这些序列号将是 S3 对象键的一部分。`, min_part_size: `分块上传的最小分块大小。
上传的数据将在内存中累积,直到达到此大小。`, max_part_size: `分块上传的最大分块大小。
S3 上传程序不会尝试上传超过此大小的部分。`, }, azure_blob_storage: { column_order: `在生成的 CSV 文件中首先按列排序的事件字段。
无论此设置如何,生成的 CSV 都将包含聚合事件的所有字段,但此处未明确提及的所有列将按字典顺序排在这里列出的字段之后。`, time_interval: '在上传前将事件聚合到单个对象中的时间量。', - max_records: `每个聚合对象允许的记录(事件)数量。每次聚合上传包含的事件数量不会超过此数值,但可能会更少。
如果事件速率足够高,在同一时间间隔内显然可能会有多个聚合上传。这些上传将具有不同但连续的序列号,这些序列号将是 Azure Blob Storage blob 名称的一部分。`, content: "要存储的对象的内容。默认情况下,它是包含所有字段的 JSON 文本格式。支持如 ${'{'}payload{'}'} 的占位符设置。存储格式取决于变量的格式,支持二进制内容。", blob: 'Azure Blob Storage blob 名称。', container: 'Azure Blob Storage 容器名称。', }, + snowflake: { + private_key: + '为 Pipe User 配置的私钥。支持以下输入格式:\n- 明文密钥:直接以字符串形式输入PEM格式的私钥内容。\n- 文件路径:指定包含私钥的文件路径。确保路径以file://开头。文件路径必须在集群的所有节点上相同。', + database: '包含 Snowflake 资源的数据库名称。', + schema: '包含 Snowflake 资源的模式名称。', + stage: '用于将数据文件加载到 Snowflake 的 Stage 名称。', + pipe: '用于将数据摄取到表中的 Pipe 名称。', + pipe_user: '具有使用 Pipe 权限的角色的用户名。最低要求的权限是`operate`和`monitor`。', + }, } diff --git a/packages/i18n/lib/zhSymbolLabel.ts b/packages/i18n/lib/zhSymbolLabel.ts index f300bb9..209c0b1 100644 --- a/packages/i18n/lib/zhSymbolLabel.ts +++ b/packages/i18n/lib/zhSymbolLabel.ts @@ -48,4 +48,5 @@ export const zhSymbolLabel: Record = { protocol_v2: 'v2', protocol_v3: 'v3', influxdb_v1: 'InfluxDB 行协议', + aggregated: '聚合上传', } diff --git a/packages/i18n/package.json b/packages/i18n/package.json index b47d94f..4577d1f 100644 --- a/packages/i18n/package.json +++ b/packages/i18n/package.json @@ -1,6 +1,6 @@ { "name": "@emqx/shared-ui-i18n", - "version": "0.0.22", + "version": "0.0.23", "homepage": "https://emqx.io", "license": "Apache-2.0", "repository": {