Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor(i18n): snowflake connector and action #59

Merged
merged 2 commits into from
Sep 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions packages/i18n/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
# @emqx/shared-ui-i18n

## 0.0.23

### Patch Changes

- New fields for IoTDB, Datalayers and Snowflake

## 0.0.22

### Patch Changes
Expand Down
20 changes: 13 additions & 7 deletions packages/i18n/lib/enActionsLabel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ export const enActionsLabel: Record<string, Record<string, string>> = {
partitions_limit: 'Partitions Limit',
precision: 'Time Precision',
write_syntax: 'Write Syntax',
column_order: 'Column Order',
time_interval: 'Time Interval',
max_records: 'Max Records',
},
kafka_producer: {
timestamp: 'Message Timestamp',
Expand Down Expand Up @@ -185,25 +188,28 @@ export const enActionsLabel: Record<string, Record<string, string>> = {
key: 'Object Key',
parameters: 'Upload Mode',
type: 'Aggregation Type',
column_order: 'Column Order',
time_interval: 'Time Interval',
max_records: 'Max Records',
min_part_size: 'Min Part Size',
max_part_size: 'Max Part Size',
},
azure_blob_storage: {
parameters: 'Upload Mode',
column_order: 'Column Order',
time_interval: 'Time Interval',
max_records: 'Max Records',
content: 'Object Content',
type: 'Aggregation Type',
blob: 'Blob Name',
container: 'Container',
},
dynamo: {
table: 'Table ',
table: 'Table',
hash_key: 'Hash Key',
range_key: 'Range Key',
},
snowflake: {
mode: 'Upload Mode',
type: 'Aggregation Type',
private_key: 'Private Key',
schema: 'Schema',
stage: 'Stage',
pipe: 'Pipe',
pipe_user: 'Pipe User',
},
}
4 changes: 4 additions & 0 deletions packages/i18n/lib/enConnectorsLabel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -175,4 +175,8 @@ export const enConnectorsLabel: Record<string, Record<string, string>> = {
datalayers: {
driver_type: 'Driver Type',
},
snowflake: {
account: 'Account',
dsn: 'Data Source Name(DSN)',
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
dsn: 'Data Source Name(DSN)',
dsn: 'Data Source Name (DSN)',

},
}
13 changes: 11 additions & 2 deletions packages/i18n/lib/enIntegrationDesc.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ export const enIntegrationDesc: Record<string, Record<string, string>> = {
disable_prepared_statements:
'Disables the usage of prepared statements in the connections. Some endpoints, like PGBouncer or Supabase in Transaction mode, do not support session features such as prepared statements. For such connections, this option should be enabled.',
health_check_topic: 'Topic name used exclusively for more accurate health checks.',
max_records: `Number of records (events) allowed per each aggregated object. Each aggregated upload will contain no more than that number of events, but may contain less.<br/>If event rate is high enough, there obviously may be more than one aggregated upload during the same time interval. These uploads will have different, but consecutive sequence numbers, which will be a part of S3 object key.`,
},
mqtt: {
bridge_mode:
Expand Down Expand Up @@ -513,16 +514,24 @@ export const enIntegrationDesc: Record<string, Record<string, string>> = {
key: "The content of the object to be stored. By default, it is in JSON text format containing all fields. Supports placeholder settings such as ${'{'}payload{'}'}. The storage format depends on the format of the variable and can be stored in binary format.",
column_order: `Event fields that will be ordered first as columns in the resulting CSV file.<br/>Regardless of this setting, resulting CSV will contain all the fields of aggregated events, but all the columns not explicitly mentioned here will be ordered after the ones listed here in the lexicographical order.`,
time_interval: 'Amount of time events will be aggregated in a single object before uploading.',
max_records: `Number of records (events) allowed per each aggregated object. Each aggregated upload will contain no more than that number of events, but may contain less.<br/>If event rate is high enough, there obviously may be more than one aggregated upload during the same time interval. These uploads will have different, but consecutive sequence numbers, which will be a part of S3 object key.`,
min_part_size: `The minimum part size for multipart uploads.<br/>Uploaded data will be accumulated in memory until this size is reached.`,
max_part_size: `The maximum part size for multipart uploads.<br/>S3 uploader won't try to upload parts larger than this size.`,
},
azure_blob_storage: {
column_order: `Event fields that will be ordered first as columns in the resulting CSV file.<br/>Regardless of this setting, resulting CSV will contain all the fields of aggregated events, but all the columns not explicitly mentioned here will be ordered after the ones listed here in the lexicographical order.`,
time_interval: 'Amount of time events will be aggregated in a single object before uploading.',
max_records: `Number of records (events) allowed per each aggregated object. Each aggregated upload will contain no more than that number of events, but may contain less.<br/> If event rate is high enough, there obviously may be more than one aggregated upload during the same time interval. These uploads will have different, but consecutive sequence numbers, which will be a part of Azure Blob Storage blob name.`,
content: 'The content of the object to be uploaded supports placeholders.',
blob: 'Azure Blob Storage blob name.',
container: 'Azure Blob Storage container name.',
},
snowflake: {
private_key:
'The private key configured for the Pipe User. This supports the input formats below:\n- Plain key: Enter the private key contents in PEM format directly as a string value.\n- File Path: Specify the path to a file that contains the private key. Ensure the path starts with <code>file://</code>. The file path must be the same on all nodes in the cluster.',
database: 'Name of the Database that contains the Snowflake resources.',
schema: 'Name of the Schema that contains the Snowflake resources.',
stage: "Name of the Stage that'll be used for loading data files into Snowflake.",
pipe: "Name of the Pipe that'll be used to ingest data into the table.",
pipe_user:
'A username which has a role with permissions over the Pipe to be used. The minimum permissions are `operate` and `monitor`.',
},
}
1 change: 1 addition & 0 deletions packages/i18n/lib/enSymbolLabel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,4 +48,5 @@ export const enSymbolLabel: Record<string, string> = {
protocol_v2: 'v2',
protocol_v3: 'v3',
influxdb_v1: 'InfluxDB Line Protocol',
aggregated: 'Aggregated Upload',
}
18 changes: 12 additions & 6 deletions packages/i18n/lib/zhActionsLabel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ export const zhActionsLabel: Record<string, Record<string, string>> = {
partitions_limit: '分区限制',
precision: '时间精度',
write_syntax: '写语句',
column_order: '列排序',
time_interval: '时间间隔',
max_records: '最大记录数',
},
kafka_producer: {
timestamp: '消息的时间戳',
Expand Down Expand Up @@ -185,17 +188,11 @@ export const zhActionsLabel: Record<string, Record<string, string>> = {
key: '对象键',
parameters: '上传方式',
type: '增强类型',
column_order: '列排序',
time_interval: '时间间隔',
max_records: '最大记录数',
min_part_size: '最小分块大小',
max_part_size: '最大分块大小',
},
azure_blob_storage: {
parameters: '上传方式',
column_order: '列排序',
time_interval: '时间间隔',
max_records: '最大记录数',
content: '对象内容',
type: '增强类型',
blob: 'Blob 名称',
Expand All @@ -206,4 +203,13 @@ export const zhActionsLabel: Record<string, Record<string, string>> = {
hash_key: '哈希键',
range_key: '范围键',
},
snowflake: {
mode: '上传方式',
type: '增强类型',
private_key: '私钥',
schema: '模式',
stage: '存储区',
pipe: '管道',
pipe_user: '管道用户',
},
}
4 changes: 4 additions & 0 deletions packages/i18n/lib/zhConnectorsLabel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -175,4 +175,8 @@ export const zhConnectorsLabel: Record<string, Record<string, string>> = {
datalayers: {
driver_type: '驱动类型',
},
snowflake: {
account: '账户',
dsn: '数据源名称',
},
}
12 changes: 10 additions & 2 deletions packages/i18n/lib/zhIntegrationDesc.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ export const zhIntegrationDesc: Record<string, Record<string, string>> = {
disable_prepared_statements:
'在连接中禁用预处理语句。某些端点(如事务模式下的 PGBouncer 或 Supabase)不支持会话功能(如预处理语句)。对于此类连接,应启用此选项。',
health_check_topic: '专用于精确检查健康状态的主题名称。',
max_records: `每个聚合对象允许的记录(事件)数量。每次聚合上传包含的事件数量不会超过此数值,但可能会更少。<br/>如果事件速率足够高,在同一时间间隔内显然可能会有多个聚合上传。这些上传将具有不同但连续的序列号,这些序列号将是 S3 对象键的一部分。`,
},
mqtt: {
bridge_mode:
Expand Down Expand Up @@ -463,17 +464,24 @@ export const zhIntegrationDesc: Record<string, Record<string, string>> = {
key: "要存储的对象的键。支持如 ${'{'}var{'}'} 的占位符设置。",
column_order: `在生成的 CSV 文件中首先按列排序的事件字段。<br/>无论此设置如何,生成的 CSV 都将包含聚合事件的所有字段,但此处未明确提及的所有列将按字典顺序排在这里列出的字段之后。`,
time_interval: '在上传前将事件聚合到单个对象中的时间量。',
max_records: `每个聚合对象允许的记录(事件)数量。每次聚合上传包含的事件数量不会超过此数值,但可能会更少。<br/>如果事件速率足够高,在同一时间间隔内显然可能会有多个聚合上传。这些上传将具有不同但连续的序列号,这些序列号将是 S3 对象键的一部分。`,
min_part_size: `分块上传的最小分块大小。<br/>上传的数据将在内存中累积,直到达到此大小。`,
max_part_size: `分块上传的最大分块大小。<br/>S3 上传程序不会尝试上传超过此大小的部分。`,
},
azure_blob_storage: {
column_order: `在生成的 CSV 文件中首先按列排序的事件字段。<br/>无论此设置如何,生成的 CSV 都将包含聚合事件的所有字段,但此处未明确提及的所有列将按字典顺序排在这里列出的字段之后。`,
time_interval: '在上传前将事件聚合到单个对象中的时间量。',
max_records: `每个聚合对象允许的记录(事件)数量。每次聚合上传包含的事件数量不会超过此数值,但可能会更少。<br/>如果事件速率足够高,在同一时间间隔内显然可能会有多个聚合上传。这些上传将具有不同但连续的序列号,这些序列号将是 Azure Blob Storage blob 名称的一部分。`,
content:
"要存储的对象的内容。默认情况下,它是包含所有字段的 JSON 文本格式。支持如 ${'{'}payload{'}'} 的占位符设置。存储格式取决于变量的格式,支持二进制内容。",
blob: 'Azure Blob Storage blob 名称。',
container: 'Azure Blob Storage 容器名称。',
},
snowflake: {
private_key:
'为 Pipe User 配置的私钥。支持以下输入格式:\n- 明文密钥:直接以字符串形式输入PEM格式的私钥内容。\n- 文件路径:指定包含私钥的文件路径。确保路径以<code>file://</code>开头。文件路径必须在集群的所有节点上相同。',
database: '包含 Snowflake 资源的数据库名称。',
schema: '包含 Snowflake 资源的模式名称。',
stage: '用于将数据文件加载到 Snowflake 的 Stage 名称。',
pipe: '用于将数据摄取到表中的 Pipe 名称。',
pipe_user: '具有使用 Pipe 权限的角色的用户名。最低要求的权限是`operate`和`monitor`。',
},
}
1 change: 1 addition & 0 deletions packages/i18n/lib/zhSymbolLabel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,4 +48,5 @@ export const zhSymbolLabel: Record<string, string> = {
protocol_v2: 'v2',
protocol_v3: 'v3',
influxdb_v1: 'InfluxDB 行协议',
aggregated: '聚合上传',
}
2 changes: 1 addition & 1 deletion packages/i18n/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@emqx/shared-ui-i18n",
"version": "0.0.22",
"version": "0.0.23",
"homepage": "https://emqx.io",
"license": "Apache-2.0",
"repository": {
Expand Down
Loading