From b622adf0e440ba9b2299eb25440dae519e98eaba Mon Sep 17 00:00:00 2001 From: Chris Chen Date: Wed, 19 Jun 2024 18:26:38 +0800 Subject: [PATCH] add (#2573) --- docs-2.0-en/connector/nebula-spark-connector.md | 3 ++- docs-2.0-zh/connector/nebula-spark-connector.md | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs-2.0-en/connector/nebula-spark-connector.md b/docs-2.0-en/connector/nebula-spark-connector.md index f6f18d25cd..86933774b8 100644 --- a/docs-2.0-en/connector/nebula-spark-connector.md +++ b/docs-2.0-en/connector/nebula-spark-connector.md @@ -202,7 +202,8 @@ val edge = spark.read.nebula(config, nebulaReadEdgeConfig).loadEdgesToDF() !!! note - The values of columns in a dataframe are automatically written to NebulaGraph as property values. + - The values of columns in a DataFrame are automatically written to NebulaGraph as property values. + - Make sure that the column names in the DataFrame are consistent with the property names in NebulaGraph. If they are inconsistent, you can use `DataFrame.withColumnRenamed` to rename the column names first. ```scala val config = NebulaConnectionConfig diff --git a/docs-2.0-zh/connector/nebula-spark-connector.md b/docs-2.0-zh/connector/nebula-spark-connector.md index 1d0907bdfe..0d0276dbe3 100644 --- a/docs-2.0-zh/connector/nebula-spark-connector.md +++ b/docs-2.0-zh/connector/nebula-spark-connector.md @@ -202,7 +202,8 @@ val edge = spark.read.nebula(config, nebulaReadEdgeConfig).loadEdgesToDF() !!! note - DataFrame 中的列会自动作为属性写入 {{nebula.name}} 。 + - DataFrame 中的列会自动作为属性写入 {{nebula.name}} 。 + - 请确保 DataFrame 中的列名和{{nebula.name}}中的属性名一致。若不一致,可通过`DataFrame.withColumnRenamed`方法修改列名。 ```scala val config = NebulaConnectionConfig