From 5cf23edc7e71585ecfbb8f19e2f74a71686a1619 Mon Sep 17 00:00:00 2001 From: Justin Mclean Date: Thu, 17 Oct 2024 16:21:12 -0700 Subject: [PATCH 001/123] update readme to new syntax --- clients/cli/docs/README.md | 112 ++++++++++++++++++++++++++++++++++--- 1 file changed, 104 insertions(+), 8 deletions(-) diff --git a/clients/cli/docs/README.md b/clients/cli/docs/README.md index 1f3224d9b77..f46bc576d5a 100644 --- a/clients/cli/docs/README.md +++ b/clients/cli/docs/README.md @@ -128,49 +128,145 @@ properties: Used to list properties List All Metalakes ```bash -gcli list +gcli --list ``` Get Details of a Specific Metalake ```bash -gcli metalake details -name my-metalake +gcli metalake --details -name my_metalake ``` List Tables in a Catalog ```bash -gcli metalake list -name my_metalake.my_catalog +gcli metalake --list -name my_metalake.my_catalog ``` Create a Metalake ```bash -gcli metalake create -name my_metalake -comment "This is my metalake" +gcli metalake --create -name my_metalake -comment "This is my metalake" ``` Create a Catalog ```bash -gcli catalog create -name metalake_demo.iceberg --provider iceberg --metastore thrift://hive-host:9083 --warehouse hdfs://hdfs-host:9000/user/iceberg/warehouse +gcli catalog --create -name metalake_demo.iceberg --provider iceberg --metastore thrift://hive-host:9083 --warehouse hdfs://hdfs-host:9000/user/iceberg/warehouse ``` Delete a Catalog ```bash -gcli catalog delete -name my_metalake.my_catalog +gcli catalog --delete -name my_metalake.my_catalog ``` Rename a Metalake ```bash -gcli metalake update -name metalake_demo -rename demo +gcli metalake --update -name metalake_demo -rename demo ``` Update a Metalake's comment ```bash -gcli metalake update -name metalake_demo -comment "new comment" +gcli metalake --update -name metalake_demo -comment "new comment" +``` + +List the properties of a Metalake + +```bash +gcli metalake --properties --name metalake_demo +``` + +Set a Metalake's property + +```bash +gcli metalake --set --name metalake_demo --property color --value red +``` + +Remove a Metalake's property + +```bash +gcli metalake --remove --name metalake_demo --property color +``` + +Create a new User + +```bash +gcli user --create --name metalake_demo --user new_user +``` + +Show a User's information + +```bash +gcli user --details --name metalake_demo --user new_user +``` + +Show all Users + +```bash +gcli user --list --name metalake_demo +``` + +Delete a User + +```bash +gcli user --delete --name metalake_demo --user new_user +``` + +Create a Tag + +```bash +gcli tag --create --name metalake_demo --tag tagA +``` + +List all Tags in a Metalake + +```bash +gcli tag --list --name metalake_demo +``` + +Delete a Tag + +```bash +gcli tag --delete --name metalake_demo --tag tagA +``` + +Add a Tag to a Schema + +```bash +gcli tag --set --name metalake_demo.catalog_postgres.hr --tag hr +``` + +Remove a Tag from a Schema + +```bash +gcli tag --remove --name metalake_demo.catalog_postgres.hr --tag hr +``` + +List all tags on a Schema + +```bash +gcli tag --list --name metalake_demo.catalog_postgres.hr +``` + +Show all Roles + +```bash +gcli role --list -name metalake_demo +``` + +Create an empty Role + +```bash +gcli role --create --name metalake_demo --role admin +``` + +Delete a Role + +```bash +gcli role --delete -name metalake_demo --role admin ``` ### Setting Metalake name From fa9ccd29a908a51d7b45716b3313e7f0b9ff7b5a Mon Sep 17 00:00:00 2001 From: lsyulong Date: Thu, 24 Oct 2024 14:59:36 +0800 Subject: [PATCH 002/123] [#5114] Improvement:use incubating version in docs (#5207) ### What changes were proposed in this pull request? [#5114] Improvement:use incubating version in docs ### Why are the changes needed? Fix: #5114 ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? No need --------- Co-authored-by: yuqi --- docs/apache-hive-catalog.md | 28 +++---- docs/flink-connector/flink-catalog-hive.md | 16 ++-- docs/flink-connector/flink-connector.md | 64 +++++++-------- docs/hadoop-catalog.md | 14 ++-- docs/how-to-use-gvfs.md | 41 +++++----- docs/iceberg-rest-service.md | 81 +++++++++---------- docs/security/authorization-pushdown.md | 16 ++-- docs/security/how-to-authenticate.md | 28 +++---- docs/spark-connector/spark-catalog-iceberg.md | 24 +++--- .../spark-connector/spark-integration-test.md | 20 ++--- 10 files changed, 165 insertions(+), 167 deletions(-) diff --git a/docs/apache-hive-catalog.md b/docs/apache-hive-catalog.md index 53659355da5..732183b3d34 100644 --- a/docs/apache-hive-catalog.md +++ b/docs/apache-hive-catalog.md @@ -133,25 +133,25 @@ Since 0.6.0-incubating, the data types other than listed above are mapped to Gra Table properties supply or set metadata for the underlying Hive tables. The following table lists predefined table properties for a Hive table. Additionally, you can define your own key-value pair properties and transmit them to the underlying Hive database. -| Property Name | Description | Default Value | Required | Since version | -|--------------------|-----------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------| -| `location` | The location for table storage, such as `/user/hive/warehouse/test_table`. | HMS uses the database location as the parent directory by default. | No | 0.2.0 | -| `table-type` | Type of the table. Valid values include `MANAGED_TABLE` and `EXTERNAL_TABLE`. | `MANAGED_TABLE` | No | 0.2.0 | -| `format` | The table file format. Valid values include `TEXTFILE`, `SEQUENCEFILE`, `RCFILE`, `ORC`, `PARQUET`, `AVRO`, `JSON`, `CSV`, and `REGEX`. | `TEXTFILE` | No | 0.2.0 | -| `input-format` | The input format class for the table, such as `org.apache.hadoop.hive.ql.io.orc.OrcInputFormat`. | The property `format` sets the default value `org.apache.hadoop.mapred.TextInputFormat` and can change it to a different default. | No | 0.2.0 | -| `output-format` | The output format class for the table, such as `org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat`. | The property `format` sets the default value `org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat` and can change it to a different default. | No | 0.2.0 | -| `serde-lib` | The serde library class for the table, such as `org.apache.hadoop.hive.ql.io.orc.OrcSerde`. | The property `format` sets the default value `org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe` and can change it to a different default. | No | 0.2.0 | +| Property Name | Description | Default Value | Required | Since version | +|--------------------|--------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------------| +| `location` | The location for table storage, such as `/user/hive/warehouse/test_table`. | HMS uses the database location as the parent directory by default. | No | 0.2.0 | +| `table-type` | Type of the table. Valid values include `MANAGED_TABLE` and `EXTERNAL_TABLE`. | `MANAGED_TABLE` | No | 0.2.0 | +| `format` | The table file format. Valid values include `TEXTFILE`, `SEQUENCEFILE`, `RCFILE`, `ORC`, `PARQUET`, `AVRO`, `JSON`, `CSV`, and `REGEX`. | `TEXTFILE` | No | 0.2.0 | +| `input-format` | The input format class for the table, such as `org.apache.hadoop.hive.ql.io.orc.OrcInputFormat`. | The property `format` sets the default value `org.apache.hadoop.mapred.TextInputFormat` and can change it to a different default. | No | 0.2.0 | +| `output-format` | The output format class for the table, such as `org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat`. | The property `format` sets the default value `org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat` and can change it to a different default. | No | 0.2.0 | +| `serde-lib` | The serde library class for the table, such as `org.apache.hadoop.hive.ql.io.orc.OrcSerde`. | The property `format` sets the default value `org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe` and can change it to a different default. | No | 0.2.0 | | `serde.parameter.` | The prefix of the serde parameter, such as `"serde.parameter.orc.create.index" = "true"`, indicating `ORC` serde lib to create row indexes | (none) | No | 0.2.0 | Hive automatically adds and manages some reserved properties. Users aren't allowed to set these properties. -| Property Name | Description | Since Version | -|-------------------------|---------------------------------------------------|---------------| +| Property Name | Description | Since Version | +|-------------------------|-------------------------------------------------|---------------| | `comment` | Used to store a table comment. | 0.2.0 | -| `numFiles` | Used to store the number of files in the table. | 0.2.0 | -| `totalSize` | Used to store the total size of the table. | 0.2.0 | -| `EXTERNAL` | Indicates whether the table is external. | 0.2.0 | -| `transient_lastDdlTime` | Used to store the last DDL time of the table. | 0.2.0 | +| `numFiles` | Used to store the number of files in the table. | 0.2.0 | +| `totalSize` | Used to store the total size of the table. | 0.2.0 | +| `EXTERNAL` | Indicates whether the table is external. | 0.2.0 | +| `transient_lastDdlTime` | Used to store the last DDL time of the table. | 0.2.0 | ### Table indexes diff --git a/docs/flink-connector/flink-catalog-hive.md b/docs/flink-connector/flink-catalog-hive.md index 136dac3ed20..ae55817067d 100644 --- a/docs/flink-connector/flink-catalog-hive.md +++ b/docs/flink-connector/flink-catalog-hive.md @@ -11,7 +11,7 @@ With the Apache Gravitino Flink connector, accessing data or managing metadata i Supports most DDL and DML operations in Flink SQL, except such operations: -- Function operations +- Function operations - Partition operations - View operations - Querying UDF @@ -59,13 +59,13 @@ The configuration of Flink Hive Connector is the same with the original Flink Hi Gravitino catalog property names with the prefix `flink.bypass.` are passed to Flink Hive connector. For example, using `flink.bypass.hive-conf-dir` to pass the `hive-conf-dir` to the Flink Hive connector. The validated catalog properties are listed below. Any other properties with the prefix `flink.bypass.` in Gravitino Catalog will be ignored by Gravitino Flink Connector. -| Property name in Gravitino catalog properties | Flink Hive connector configuration | Description | Since Version | -|-----------------------------------------------|------------------------------------|-----------------------|---------------| -| `flink.bypass.default-database` | `default-database` | Hive default database | 0.6.0 | -| `flink.bypass.hive-conf-dir` | `hive-conf-dir` | Hive conf dir | 0.6.0 | -| `flink.bypass.hive-version` | `hive-version` | Hive version | 0.6.0 | -| `flink.bypass.hadoop-conf-dir` | `hadoop-conf-dir` | Hadoop conf dir | 0.6.0 | -| `metastore.uris` | `hive.metastore.uris` | Hive metastore uri | 0.6.0 | +| Property name in Gravitino catalog properties | Flink Hive connector configuration | Description | Since Version | +|-----------------------------------------------|------------------------------------|-----------------------|------------------| +| `flink.bypass.default-database` | `default-database` | Hive default database | 0.6.0-incubating | +| `flink.bypass.hive-conf-dir` | `hive-conf-dir` | Hive conf dir | 0.6.0-incubating | +| `flink.bypass.hive-version` | `hive-version` | Hive version | 0.6.0-incubating | +| `flink.bypass.hadoop-conf-dir` | `hadoop-conf-dir` | Hadoop conf dir | 0.6.0-incubating | +| `metastore.uris` | `hive.metastore.uris` | Hive metastore uri | 0.6.0-incubating | :::caution You can set other hadoop properties (with the prefix `hadoop.`, `dfs.`, `fs.`, `hive.`) in Gravitino Catalog properties. If so, it will override diff --git a/docs/flink-connector/flink-connector.md b/docs/flink-connector/flink-connector.md index 639dd0d682a..8c74a617064 100644 --- a/docs/flink-connector/flink-connector.md +++ b/docs/flink-connector/flink-connector.md @@ -7,7 +7,7 @@ license: "This software is licensed under the Apache License version 2." ## Overview -The Apache Gravitino Flink connector implements the [Catalog Store](https://nightlies.apache.org/flink/flink-docs-release-1.18/docs/dev/table/catalogs/#catalog-store) to manage the catalogs under Gravitino. +The Apache Gravitino Flink connector implements the [Catalog Store](https://nightlies.apache.org/flink/flink-docs-release-1.18/docs/dev/table/catalogs/#catalog-store) to manage the catalogs under Gravitino. This capability allows users to perform federation queries, accessing data from various catalogs through a unified interface and consistent access control. ## Capabilities @@ -26,11 +26,11 @@ This capability allows users to perform federation queries, accessing data from 1. [Build](../how-to-build.md) or [download](https://mvnrepository.com/artifact/org.apache.gravitino/gravitino-flink-connector-runtime-1.18) the Gravitino flink connector runtime jar, and place it to the classpath of Flink. 2. Configure the Flink configuration to use the Gravitino flink connector. -| Property | Type | Default Value | Description | Required | Since Version | -|--------------------------------------------------|--------|-------------------|----------------------------------------------------------------------|----------|---------------| -| table.catalog-store.kind | string | generic_in_memory | The Catalog Store name, it should set to `gravitino`. | Yes | 0.6.0 | -| table.catalog-store.gravitino.gravitino.metalake | string | (none) | The metalake name that flink connector used to request to Gravitino. | Yes | 0.6.0 | -| table.catalog-store.gravitino.gravitino.uri | string | (none) | The uri of Gravitino server address. | Yes | 0.6.0 | +| Property | Type | Default Value | Description | Required | Since Version | +|--------------------------------------------------|--------|-------------------|----------------------------------------------------------------------|----------|------------------| +| table.catalog-store.kind | string | generic_in_memory | The Catalog Store name, it should set to `gravitino`. | Yes | 0.6.0-incubating | +| table.catalog-store.gravitino.gravitino.metalake | string | (none) | The metalake name that flink connector used to request to Gravitino. | Yes | 0.6.0-incubating | +| table.catalog-store.gravitino.gravitino.uri | string | (none) | The uri of Gravitino server address. | Yes | 0.6.0-incubating | Set the flink configuration in flink-conf.yaml. ```yaml @@ -48,7 +48,7 @@ EnvironmentSettings.Builder builder = EnvironmentSettings.newInstance().withConf TableEnvironment tableEnv = TableEnvironment.create(builder.inBatchMode().build()); ``` -3. Execute the Flink SQL query. +3. Execute the Flink SQL query. Suppose there is only one hive catalog with the name `hive` in the metalake `test`. @@ -66,28 +66,28 @@ SELECT * FROM hive_students; Gravitino flink connector support the following datatype mapping between Flink and Gravitino. -| Flink Type | Gravitino Type | Since Version | -|----------------------------------|-------------------------------|---------------| -| `array` | `array` | 0.6.0 | -| `bigint` | `long` | 0.6.0 | -| `binary` | `fixed` | 0.6.0 | -| `boolean` | `boolean` | 0.6.0 | -| `char` | `char` | 0.6.0 | -| `date` | `date` | 0.6.0 | -| `decimal` | `decimal` | 0.6.0 | -| `double` | `double` | 0.6.0 | -| `float` | `float` | 0.6.0 | -| `integer` | `integer` | 0.6.0 | -| `map` | `map` | 0.6.0 | -| `null` | `null` | 0.6.0 | -| `row` | `struct` | 0.6.0 | -| `smallint` | `short` | 0.6.0 | -| `time` | `time` | 0.6.0 | -| `timestamp` | `timestamp without time zone` | 0.6.0 | -| `timestamp without time zone` | `timestamp without time zone` | 0.6.0 | -| `timestamp with time zone` | `timestamp with time zone` | 0.6.0 | -| `timestamp with local time zone` | `timestamp with time zone` | 0.6.0 | -| `timestamp_ltz` | `timestamp with time zone` | 0.6.0 | -| `tinyint` | `byte` | 0.6.0 | -| `varbinary` | `binary` | 0.6.0 | -| `varchar` | `string` | 0.6.0 | +| Flink Type | Gravitino Type | Since Version | +|----------------------------------|-------------------------------|------------------| +| `array` | `list` | 0.6.0-incubating | +| `bigint` | `long` | 0.6.0-incubating | +| `binary` | `fixed` | 0.6.0-incubating | +| `boolean` | `boolean` | 0.6.0-incubating | +| `char` | `char` | 0.6.0-incubating | +| `date` | `date` | 0.6.0-incubating | +| `decimal` | `decimal` | 0.6.0-incubating | +| `double` | `double` | 0.6.0-incubating | +| `float` | `float` | 0.6.0-incubating | +| `integer` | `integer` | 0.6.0-incubating | +| `map` | `map` | 0.6.0-incubating | +| `null` | `null` | 0.6.0-incubating | +| `row` | `struct` | 0.6.0-incubating | +| `smallint` | `short` | 0.6.0-incubating | +| `time` | `time` | 0.6.0-incubating | +| `timestamp` | `timestamp without time zone` | 0.6.0-incubating | +| `timestamp without time zone` | `timestamp without time zone` | 0.6.0-incubating | +| `timestamp with time zone` | `timestamp with time zone` | 0.6.0-incubating | +| `timestamp with local time zone` | `timestamp with time zone` | 0.6.0-incubating | +| `timestamp_ltz` | `timestamp with time zone` | 0.6.0-incubating | +| `tinyint` | `byte` | 0.6.0-incubating | +| `varbinary` | `binary` | 0.6.0-incubating | +| `varchar` | `string` | 0.6.0-incubating | diff --git a/docs/hadoop-catalog.md b/docs/hadoop-catalog.md index d28e6d93b04..4453cb317bf 100644 --- a/docs/hadoop-catalog.md +++ b/docs/hadoop-catalog.md @@ -47,7 +47,7 @@ The Hadoop catalog supports multi-level authentication to control access, allowi - **Schema**: Inherits the authentication setting from the catalog if not explicitly set. For more information about schema settings, please refer to [Schema properties](#schema-properties). - **Fileset**: Inherits the authentication setting from the schema if not explicitly set. For more information about fileset settings, please refer to [Fileset properties](#fileset-properties). -The default value of `authentication.impersonation-enable` is false, and the default value for catalogs about this configuration is false, for +The default value of `authentication.impersonation-enable` is false, and the default value for catalogs about this configuration is false, for schemas and filesets, the default value is inherited from the parent. Value set by the user will override the parent value, and the priority mechanism is the same as authentication. ### Catalog operations @@ -82,12 +82,12 @@ Refer to [Schema operation](./manage-fileset-metadata-using-gravitino.md#schema- ### Fileset properties -| Property name | Description | Default value | Required | Since Version | -|----------------------------------------------------|--------------------------------------------------------------------------------------------------------|--------------------------|----------|-----------------| -| `authentication.impersonation-enable` | Whether to enable impersonation for the Hadoop catalog fileset. | The parent(schema) value | No | 0.6.0 | -| `authentication.type` | The type of authentication for Hadoop catalog fileset, currently we only support `kerberos`, `simple`. | The parent(schema) value | No | 0.6.0 | -| `authentication.kerberos.principal` | The principal of the Kerberos authentication for the fileset. | The parent(schema) value | No | 0.6.0 | -| `authentication.kerberos.keytab-uri` | The URI of The keytab for the Kerberos authentication for the fileset. | The parent(schema) value | No | 0.6.0 | +| Property name | Description | Default value | Required | Since Version | +|---------------------------------------|--------------------------------------------------------------------------------------------------------|--------------------------|----------|------------------| +| `authentication.impersonation-enable` | Whether to enable impersonation for the Hadoop catalog fileset. | The parent(schema) value | No | 0.6.0-incubating | +| `authentication.type` | The type of authentication for Hadoop catalog fileset, currently we only support `kerberos`, `simple`. | The parent(schema) value | No | 0.6.0-incubating | +| `authentication.kerberos.principal` | The principal of the Kerberos authentication for the fileset. | The parent(schema) value | No | 0.6.0-incubating | +| `authentication.kerberos.keytab-uri` | The URI of The keytab for the Kerberos authentication for the fileset. | The parent(schema) value | No | 0.6.0-incubating | ### Fileset operations diff --git a/docs/how-to-use-gvfs.md b/docs/how-to-use-gvfs.md index f366297375e..3a993e7083f 100644 --- a/docs/how-to-use-gvfs.md +++ b/docs/how-to-use-gvfs.md @@ -14,7 +14,7 @@ To use `fileset` managed by Gravitino, Gravitino provides a virtual file system the Gravitino Virtual File System (GVFS): * In Java, it's built on top of the Hadoop Compatible File System(HCFS) interface. * In Python, it's built on top of the [fsspec](https://filesystem-spec.readthedocs.io/en/stable/index.html) -interface. + interface. GVFS is a virtual layer that manages the files and directories in the fileset through a virtual path, without needing to understand the specific storage details of the fileset. You can access @@ -164,18 +164,18 @@ fs.getFileStatus(filesetPath); 1. Add the GVFS runtime jar to the Spark environment. - You can use `--packages` or `--jars` in the Spark submit shell to include the Gravitino Virtual - File System runtime jar, like so: + You can use `--packages` or `--jars` in the Spark submit shell to include the Gravitino Virtual + File System runtime jar, like so: ```shell ./${SPARK_HOME}/bin/spark-submit --packages org.apache.gravitino:filesystem-hadoop3-runtime:${version} ``` - If you want to include the Gravitino Virtual File System runtime jar in your Spark installation, add it to the `${SPARK_HOME}/jars/` folder. + If you want to include the Gravitino Virtual File System runtime jar in your Spark installation, add it to the `${SPARK_HOME}/jars/` folder. 2. Configure the Hadoop configuration when submitting the job. - You can configure in the shell command in this way: + You can configure in the shell command in this way: ```shell --conf spark.hadoop.fs.AbstractFileSystem.gvfs.impl=org.apache.gravitino.filesystem.hadoop.Gvfs @@ -186,7 +186,7 @@ fs.getFileStatus(filesetPath); 3. Perform operations on the fileset storage in your code. - Finally, you can access the fileset storage in your Spark program: + Finally, you can access the fileset storage in your Spark program: ```scala // Scala code @@ -206,9 +206,9 @@ For Tensorflow to support GVFS, you need to recompile the [tensorflow-io](https: 1. First, add a patch and recompile tensorflow-io. - You need to add a [patch](https://github.com/tensorflow/io/pull/1970) to support GVFS on - tensorflow-io. Then you can follow the [tutorial](https://github.com/tensorflow/io/blob/master/docs/development.md) - to recompile your code and release the tensorflow-io module. + You need to add a [patch](https://github.com/tensorflow/io/pull/1970) to support GVFS on + tensorflow-io. Then you can follow the [tutorial](https://github.com/tensorflow/io/blob/master/docs/development.md) + to recompile your code and release the tensorflow-io module. 2. Then you need to configure the Hadoop configuration. @@ -335,18 +335,17 @@ to recompile the native libraries like `libhdfs` and others, and completely repl ### Configuration -| Configuration item | Description | Default value | Required | Since version | -|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|-----------------------------------|---------------| -| `server_uri` | The Gravitino server uri, e.g. `http://localhost:8090`. | (none) | Yes | 0.6.0 | -| `metalake_name` | The metalake name which the fileset belongs to. | (none) | Yes | 0.6.0 | -| `cache_size` | The cache capacity of the Gravitino Virtual File System. | `20` | No | 0.6.0 | -| `cache_expired_time` | The value of time that the cache expires after accessing in the Gravitino Virtual File System. The value is in `seconds`. | `3600` | No | 0.6.0 | -| `auth_type` | The auth type to initialize the Gravitino client to use with the Gravitino Virtual File System. Currently supports `simple` and `oauth2` auth types. | `simple` | No | 0.6.0 | -| `oauth2_server_uri` | The auth server URI for the Gravitino client when using `oauth2` auth type. | (none) | Yes if you use `oauth2` auth type | 0.7.0 | -| `oauth2_credential` | The auth credential for the Gravitino client when using `oauth2` auth type. | (none) | Yes if you use `oauth2` auth type | 0.7.0 | -| `oauth2_path` | The auth server path for the Gravitino client when using `oauth2` auth type. Please remove the first slash `/` from the path, for example `oauth/token`. | (none) | Yes if you use `oauth2` auth type | 0.7.0 | -| `oauth2_scope` | The auth scope for the Gravitino client when using `oauth2` auth type with the Gravitino Virtual File System. | (none) | Yes if you use `oauth2` auth type | 0.7.0 | - +| Configuration item | Description | Default value | Required | Since version | +|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|-----------------------------------|------------------| +| `server_uri` | The Gravitino server uri, e.g. `http://localhost:8090`. | (none) | Yes | 0.6.0-incubating | +| `metalake_name` | The metalake name which the fileset belongs to. | (none) | Yes | 0.6.0-incubating | +| `cache_size` | The cache capacity of the Gravitino Virtual File System. | `20` | No | 0.6.0-incubating | +| `cache_expired_time` | The value of time that the cache expires after accessing in the Gravitino Virtual File System. The value is in `seconds`. | `3600` | No | 0.6.0-incubating | +| `auth_type` | The auth type to initialize the Gravitino client to use with the Gravitino Virtual File System. Currently supports `simple` and `oauth2` auth types. | `simple` | No | 0.6.0-incubating | +| `oauth2_server_uri` | The auth server URI for the Gravitino client when using `oauth2` auth type. | (none) | Yes if you use `oauth2` auth type | 0.7.0-incubating | +| `oauth2_credential` | The auth credential for the Gravitino client when using `oauth2` auth type. | (none) | Yes if you use `oauth2` auth type | 0.7.0-incubating | +| `oauth2_path` | The auth server path for the Gravitino client when using `oauth2` auth type. Please remove the first slash `/` from the path, for example `oauth/token`. | (none) | Yes if you use `oauth2` auth type | 0.7.0-incubating | +| `oauth2_scope` | The auth scope for the Gravitino client when using `oauth2` auth type with the Gravitino Virtual File System. | (none) | Yes if you use `oauth2` auth type | 0.7.0-incubating | You can configure these properties when obtaining the `Gravitino Virtual FileSystem` in Python like this: diff --git a/docs/iceberg-rest-service.md b/docs/iceberg-rest-service.md index 4217350dac9..ad44a20145d 100644 --- a/docs/iceberg-rest-service.md +++ b/docs/iceberg-rest-service.md @@ -27,7 +27,7 @@ There are three deployment scenarios for Gravitino Iceberg REST server: - A standalone server in a standalone Gravitino Iceberg REST server package. - A standalone server in the Gravitino server package. - An auxiliary service embedded in the Gravitino server. - + For detailed instructions on how to build and install the Gravitino server package, please refer to [How to build](./how-to-build.md) and [How to install](./how-to-install.md). To build the Gravitino Iceberg REST server package, use the command `./gradlew compileIcebergRESTServer -x test`. Alternatively, to create the corresponding compressed package in the distribution directory, use `./gradlew assembleIcebergRESTServer -x test`. The Gravitino Iceberg REST server package includes the following files: ```text @@ -46,7 +46,7 @@ For detailed instructions on how to build and install the Gravitino server packa ## Apache Gravitino Iceberg REST catalog server configuration -There are distinct configuration files for standalone and auxiliary server: `gravitino-iceberg-rest-server.conf` is used for the standalone server, while `gravitino.conf` is for the auxiliary server. Although the configuration files differ, the configuration items remain the same. +There are distinct configuration files for standalone and auxiliary server: `gravitino-iceberg-rest-server.conf` is used for the standalone server, while `gravitino.conf` is for the auxiliary server. Although the configuration files differ, the configuration items remain the same. Starting with version `0.6.0-incubating`, the prefix `gravitino.auxService.iceberg-rest.` for auxiliary server configurations has been deprecated. If both `gravitino.auxService.iceberg-rest.key` and `gravitino.iceberg-rest.key` are present, the latter will take precedence. The configurations listed below use the `gravitino.iceberg-rest.` prefix. @@ -102,13 +102,13 @@ The detailed configuration items are as follows: Gravitino Iceberg REST service supports using static access-key-id and secret-access-key to access S3 data. -| Configuration item | Description | Default value | Required | Since Version | -|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|---------------| -| `gravitino.iceberg-rest.io-impl` | The IO implementation for `FileIO` in Iceberg, use `org.apache.iceberg.aws.s3.S3FileIO` for S3. | (none) | No | 0.6.0 | -| `gravitino.iceberg-rest.s3-access-key-id` | The static access key ID used to access S3 data. | (none) | No | 0.6.0 | -| `gravitino.iceberg-rest.s3-secret-access-key` | The static secret access key used to access S3 data. | (none) | No | 0.6.0 | -| `gravitino.iceberg-rest.s3-endpoint` | An alternative endpoint of the S3 service, This could be used for S3FileIO with any s3-compatible object storage service that has a different endpoint, or access a private S3 endpoint in a virtual private cloud. | (none) | No | 0.6.0 | -| `gravitino.iceberg-rest.s3-region` | The region of the S3 service, like `us-west-2`. | (none) | No | 0.6.0 | +| Configuration item | Description | Default value | Required | Since Version | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------| +| `gravitino.iceberg-rest.io-impl` | The IO implementation for `FileIO` in Iceberg, use `org.apache.iceberg.aws.s3.S3FileIO` for S3. | (none) | No | 0.6.0-incubating | +| `gravitino.iceberg-rest.s3-access-key-id` | The static access key ID used to access S3 data. | (none) | No | 0.6.0-incubating | +| `gravitino.iceberg-rest.s3-secret-access-key` | The static secret access key used to access S3 data. | (none) | No | 0.6.0-incubating | +| `gravitino.iceberg-rest.s3-endpoint` | An alternative endpoint of the S3 service, This could be used for S3FileIO with any s3-compatible object storage service that has a different endpoint, or access a private S3 endpoint in a virtual private cloud. | (none) | No | 0.6.0-incubating | +| `gravitino.iceberg-rest.s3-region` | The region of the S3 service, like `us-west-2`. | (none) | No | 0.6.0-incubating | For other Iceberg s3 properties not managed by Gravitino like `s3.sse.type`, you could config it directly by `gravitino.iceberg-rest.s3.sse.type`. @@ -120,12 +120,12 @@ To configure the JDBC catalog backend, set the `gravitino.iceberg-rest.warehouse Gravitino Iceberg REST service supports using static access-key-id and secret-access-key to access OSS data. -| Configuration item | Description | Default value | Required | Since Version | -|------------------------------------------------|-------------------------------------------------------------------------------------------------------|---------------|----------|---------------| -| `gravitino.iceberg-rest.io-impl` | The IO implementation for `FileIO` in Iceberg, use `org.apache.iceberg.aliyun.oss.OSSFileIO` for OSS. | (none) | No | 0.6.0 | -| `gravitino.iceberg-rest.oss-access-key-id` | The static access key ID used to access OSS data. | (none) | No | 0.7.0 | -| `gravitino.iceberg-rest.oss-secret-access-key` | The static secret access key used to access OSS data. | (none) | No | 0.7.0 | -| `gravitino.iceberg-rest.oss-endpoint` | The endpoint of Aliyun OSS service. | (none) | No | 0.7.0 | +| Configuration item | Description | Default value | Required | Since Version | +|------------------------------------------------|-------------------------------------------------------------------------------------------------------|---------------|----------|------------------| +| `gravitino.iceberg-rest.io-impl` | The IO implementation for `FileIO` in Iceberg, use `org.apache.iceberg.aliyun.oss.OSSFileIO` for OSS. | (none) | No | 0.6.0-incubating | +| `gravitino.iceberg-rest.oss-access-key-id` | The static access key ID used to access OSS data. | (none) | No | 0.7.0-incubating | +| `gravitino.iceberg-rest.oss-secret-access-key` | The static secret access key used to access OSS data. | (none) | No | 0.7.0-incubating | +| `gravitino.iceberg-rest.oss-endpoint` | The endpoint of Aliyun OSS service. | (none) | No | 0.7.0-incubating | For other Iceberg OSS properties not managed by Gravitino like `client.security-token`, you could config it directly by `gravitino.iceberg-rest.client.security-token`. @@ -137,9 +137,9 @@ Please set the `gravitino.iceberg-rest.warehouse` parameter to `oss://{bucket_na Supports using google credential file to access GCS data. -| Configuration item | Description | Default value | Required | Since Version | -|----------------------------------|----------------------------------------------------------------------------------------------------|---------------|----------|---------------| -| `gravitino.iceberg-rest.io-impl` | The io implementation for `FileIO` in Iceberg, use `org.apache.iceberg.gcp.gcs.GCSFileIO` for GCS. | (none) | No | 0.6.0 | +| Configuration item | Description | Default value | Required | Since Version | +|----------------------------------|----------------------------------------------------------------------------------------------------|---------------|----------|------------------| +| `gravitino.iceberg-rest.io-impl` | The io implementation for `FileIO` in Iceberg, use `org.apache.iceberg.gcp.gcs.GCSFileIO` for GCS. | (none) | No | 0.6.0-incubating | For other Iceberg GCS properties not managed by Gravitino like `gcs.project-id`, you could config it directly by `gravitino.iceberg-rest.gcs.project-id`. @@ -161,9 +161,9 @@ Builds with Hadoop 2.10.x. There may be compatibility issues when accessing Hado For other storages that are not managed by Gravitino directly, you can manage them through custom catalog properties. -| Configuration item | Description | Default value | Required | Since Version | -|----------------------------------|-----------------------------------------------------------------------------------------|---------------|----------|---------------| -| `gravitino.iceberg-rest.io-impl` | The IO implementation for `FileIO` in Iceberg, please use the full qualified classname. | (none) | No | 0.6.0 | +| Configuration item | Description | Default value | Required | Since Version | +|----------------------------------|-----------------------------------------------------------------------------------------|---------------|----------|------------------| +| `gravitino.iceberg-rest.io-impl` | The IO implementation for `FileIO` in Iceberg, please use the full qualified classname. | (none) | No | 0.6.0-incubating | To pass custom properties such as `security-token` to your custom `FileIO`, you can directly configure it by `gravitino.iceberg-rest.security-token`. `security-token` will be included in the properties when the initialize method of `FileIO` is invoked. @@ -206,10 +206,11 @@ You must download the corresponding JDBC driver to the `iceberg-rest-server/libs ::: #### Custom backend configuration -| Configuration item | Description | Default value | Required | Since Version | -|------------------------------------------------|---------------------------------------------------------------------------------------------------------------------|------------------|----------|---------------| -| `gravitino.iceberg-rest.catalog-backend` | The Catalog backend of the Gravitino Iceberg REST catalog service. Use the value **`custom`** for a Custom catalog. | `memory` | Yes | 0.2.0 | -| `gravitino.iceberg-rest.catalog-backend-impl` | The fully-qualified class name of a custom catalog implementation, only worked if `catalog-backend` is `custom`. | (none) | No | 0.7.0 | + +| Configuration item | Description | Default value | Required | Since Version | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------| +| `gravitino.iceberg-rest.catalog-backend` | The Catalog backend of the Gravitino Iceberg REST catalog service. Use the value **`custom`** for a Custom catalog. | `memory` | Yes | 0.2.0 | +| `gravitino.iceberg-rest.catalog-backend-impl` | The fully-qualified class name of a custom catalog implementation, only worked if `catalog-backend` is `custom`. | (none) | No | 0.7.0-incubating | If you want to use a custom Iceberg Catalog as `catalog-backend`, you can add a corresponding jar file to the classpath and load a custom Iceberg Catalog implementation by specifying the `catalog-backend-impl` property. @@ -217,18 +218,17 @@ If you want to use a custom Iceberg Catalog as `catalog-backend`, you can add a You could access the view interface if using JDBC backend and enable `jdbc.schema-version` property. -| Configuration item | Description | Default value | Required | Since Version | -|-------------------------------------------------|--------------------------------------------------------------------------------------------|---------------|----------|---------------| -| `gravitino.iceberg-rest.jdbc.schema-version` | The schema version of JDBC catalog backend, setting to `V1` if supporting view operations. | (none) | NO | 0.7.0 | - +| Configuration item | Description | Default value | Required | Since Version | +|----------------------------------------------|--------------------------------------------------------------------------------------------|---------------|----------|------------------| +| `gravitino.iceberg-rest.jdbc.schema-version` | The schema version of JDBC catalog backend, setting to `V1` if supporting view operations. | (none) | NO | 0.7.0-incubating | #### Multi catalog support The Gravitino Iceberg REST server supports multiple catalogs and offers a configuration-based catalog management system. -| Configuration item | Description | Default value | Required | Since Version | -|----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------|----------|---------------| -| `gravitino.iceberg-rest.catalog-provider` | Catalog provider class name, you can develop a class that implements `IcebergTableOpsProvider` and add the corresponding jar file to the Iceberg REST service classpath directory. | `config-based-provider` | No | 0.7.0 | +| Configuration item | Description | Default value | Required | Since Version | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------|----------|------------------| +| `gravitino.iceberg-rest.catalog-provider` | Catalog provider class name, you can develop a class that implements `IcebergTableOpsProvider` and add the corresponding jar file to the Iceberg REST service classpath directory. | `config-based-provider` | No | 0.7.0-incubating | ##### Configuration based catalog provider @@ -273,11 +273,11 @@ You can access different catalogs by setting the `prefix` to the specific catalo When using a Gravitino server based catalog provider, you can leverage Gravitino to support dynamic catalog management for the Iceberg REST server. -| Configuration item | Description | Default value | Required | Since Version | -|--------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|---------------| -| `gravitino.iceberg-rest.gravitino-uri` | The uri of Gravitino server address, only worked if `catalog-provider` is `gravitino-based-provider`. | (none) | No | 0.7.0 | -| `gravitino.iceberg-rest.gravitino-metalake` | The metalake name that `gravitino-based-provider` used to request to Gravitino, only worked if `catalog-provider` is `gravitino-based-provider`. | (none) | No | 0.7.0 | -| `gravitino.iceberg-rest.catalog-cache-eviction-interval-ms` | Catalog cache eviction interval. | 3600000 | No | 0.7.0 | +| Configuration item | Description | Default value | Required | Since Version | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------| +| `gravitino.iceberg-rest.gravitino-uri` | The uri of Gravitino server address, only worked if `catalog-provider` is `gravitino-based-provider`. | (none) | No | 0.7.0-incubating | +| `gravitino.iceberg-rest.gravitino-metalake` | The metalake name that `gravitino-based-provider` used to request to Gravitino, only worked if `catalog-provider` is `gravitino-based-provider`. | (none) | No | 0.7.0-incubating | +| `gravitino.iceberg-rest.catalog-cache-eviction-interval-ms` | Catalog cache eviction interval. | 3600000 | No | 0.7.0-incubating | ```text gravitino.iceberg-rest.catalog-cache-eviction-interval-ms = 300000 @@ -311,10 +311,9 @@ Gravitino provides a pluggable metrics store interface to store and delete Icebe ### Misc configurations -| Configuration item | Description | Default value | Required | Since Version | -|---------------------------------------------|--------------------------------------------------------------|---------------|----------|---------------| -| `gravitino.iceberg-rest.extension-packages` | Comma-separated list of Iceberg REST API packages to expand. | (none) | No | 0.7.0 | - +| Configuration item | Description | Default value | Required | Since Version | +|---------------------------------------------|--------------------------------------------------------------|---------------|----------|------------------| +| `gravitino.iceberg-rest.extension-packages` | Comma-separated list of Iceberg REST API packages to expand. | (none) | No | 0.7.0-incubating | ## Starting the Iceberg REST server diff --git a/docs/security/authorization-pushdown.md b/docs/security/authorization-pushdown.md index e521402f6e3..148e76b5f81 100644 --- a/docs/security/authorization-pushdown.md +++ b/docs/security/authorization-pushdown.md @@ -17,14 +17,14 @@ This module translates Gravitino's authorization model into the permission rules In order to use the Authorization Ranger Hive Plugin, you need to configure the following properties and [Apache Hive catalog properties](../apache-hive-catalog.md#catalog-properties): -| Property Name | Description | Default Value | Required | Since Version | -|-------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|---------------| -| `authorization-provider` | Providers to use to implement authorization plugin such as `ranger`. | (none) | No | 0.6.0 | -| `authorization.ranger.admin.url` | The Apache Ranger web URIs. | (none) | No | 0.6.0 | -| `authorization.ranger.auth.type` | The Apache Ranger authentication type `simple` or `kerberos`. | `simple` | No | 0.6.0 | -| `authorization.ranger.username` | The Apache Ranger admin web login username (auth type=simple), or kerberos principal(auth type=kerberos), Need have Ranger administrator permission. | (none) | No | 0.6.0 | -| `authorization.ranger.password` | The Apache Ranger admin web login user password (auth type=simple), or path of the keytab file(auth type=kerberos) | (none) | No | 0.6.0 | -| `authorization.ranger.service.name` | The Apache Ranger service name. | (none) | No | 0.6.0 | +| Property Name | Description | Default Value | Required | Since Version | +|-------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------| +| `authorization-provider` | Providers to use to implement authorization plugin such as `ranger`. | (none) | No | 0.6.0-incubating | +| `authorization.ranger.admin.url` | The Apache Ranger web URIs. | (none) | No | 0.6.0-incubating | +| `authorization.ranger.auth.type` | The Apache Ranger authentication type `simple` or `kerberos`. | `simple` | No | 0.6.0-incubating | +| `authorization.ranger.username` | The Apache Ranger admin web login username (auth type=simple), or kerberos principal(auth type=kerberos), Need have Ranger administrator permission. | (none) | No | 0.6.0-incubating | +| `authorization.ranger.password` | The Apache Ranger admin web login user password (auth type=simple), or path of the keytab file(auth type=kerberos) | (none) | No | 0.6.0-incubating | +| `authorization.ranger.service.name` | The Apache Ranger service name. | (none) | No | 0.6.0-incubating | Once you have used the correct configuration, you can perform authorization operations by calling Gravitino [authorization RESTful API](https://gravitino.apache.org/docs/latest/api/rest/grant-roles-to-a-user). diff --git a/docs/security/how-to-authenticate.md b/docs/security/how-to-authenticate.md index c98676350e4..61c2295f091 100644 --- a/docs/security/how-to-authenticate.md +++ b/docs/security/how-to-authenticate.md @@ -40,8 +40,8 @@ Gravitino only supports external OAuth 2.0 servers. To enable OAuth mode, users - First, users need to guarantee that the external correctly configured OAuth 2.0 server supports Bearer JWT. - Then, on the server side, users should set `gravitino.authenticators` as `oauth` and give -`gravitino.authenticator.oauth.defaultSignKey`, `gravitino.authenticator.oauth.serverUri` and -`gravitino.authenticator.oauth.tokenPath` a proper value. + `gravitino.authenticator.oauth.defaultSignKey`, `gravitino.authenticator.oauth.serverUri` and + `gravitino.authenticator.oauth.tokenPath` a proper value. - Next, for the client side, users can enable `OAuth` mode by the following code: ```java @@ -88,18 +88,18 @@ The URI must use the hostname of server instead of IP. ### Server configuration -| Configuration item | Description | Default value | Required | Since version | -|---------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------|--------------------------------------------|---------------| -| `gravitino.authenticator` | It is deprecated since Gravitino 0.6.0. Please use `gravitino.authenticators` instead. | `simple` | No | 0.3.0 | -| `gravitino.authenticators` | The authenticators which Gravitino uses, setting as `simple`,`oauth` or `kerberos`. Multiple authenticators are separated by commas. If a request is supported by multiple authenticators simultaneously, the first authenticator will be used by default. | `simple` | No | 0.6.0 | -| `gravitino.authenticator.oauth.serviceAudience` | The audience name when Gravitino uses OAuth as the authenticator. | `GravitinoServer` | No | 0.3.0 | -| `gravitino.authenticator.oauth.allowSkewSecs` | The JWT allows skew seconds when Gravitino uses OAuth as the authenticator. | `0` | No | 0.3.0 | -| `gravitino.authenticator.oauth.defaultSignKey` | The signing key of JWT when Gravitino uses OAuth as the authenticator. | (none) | Yes if use `oauth` as the authenticator | 0.3.0 | -| `gravitino.authenticator.oauth.signAlgorithmType` | The signature algorithm when Gravitino uses OAuth as the authenticator. | `RS256` | No | 0.3.0 | -| `gravitino.authenticator.oauth.serverUri` | The URI of the default OAuth server. | (none) | Yes if use `oauth` as the authenticator | 0.3.0 | -| `gravitino.authenticator.oauth.tokenPath` | The path for token of the default OAuth server. | (none) | Yes if use `oauth` as the authenticator | 0.3.0 | -| `gravitino.authenticator.kerberos.principal` | Indicates the Kerberos principal to be used for HTTP endpoint. Principal should start with `HTTP/`. | (none) | Yes if use `kerberos` as the authenticator | 0.4.0 | -| `gravitino.authenticator.kerberos.keytab` | Location of the keytab file with the credentials for the principal. | (none) | Yes if use `kerberos` as the authenticator | 0.4.0 | +| Configuration item | Description | Default value | Required | Since version | +|---------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------|--------------------------------------------|------------------| +| `gravitino.authenticator` | It is deprecated since Gravitino 0.6.0. Please use `gravitino.authenticators` instead. | `simple` | No | 0.3.0 | +| `gravitino.authenticators` | The authenticators which Gravitino uses, setting as `simple`,`oauth` or `kerberos`. Multiple authenticators are separated by commas. If a request is supported by multiple authenticators simultaneously, the first authenticator will be used by default. | `simple` | No | 0.6.0-incubating | +| `gravitino.authenticator.oauth.serviceAudience` | The audience name when Gravitino uses OAuth as the authenticator. | `GravitinoServer` | No | 0.3.0 | +| `gravitino.authenticator.oauth.allowSkewSecs` | The JWT allows skew seconds when Gravitino uses OAuth as the authenticator. | `0` | No | 0.3.0 | +| `gravitino.authenticator.oauth.defaultSignKey` | The signing key of JWT when Gravitino uses OAuth as the authenticator. | (none) | Yes if use `oauth` as the authenticator | 0.3.0 | +| `gravitino.authenticator.oauth.signAlgorithmType` | The signature algorithm when Gravitino uses OAuth as the authenticator. | `RS256` | No | 0.3.0 | +| `gravitino.authenticator.oauth.serverUri` | The URI of the default OAuth server. | (none) | Yes if use `oauth` as the authenticator | 0.3.0 | +| `gravitino.authenticator.oauth.tokenPath` | The path for token of the default OAuth server. | (none) | Yes if use `oauth` as the authenticator | 0.3.0 | +| `gravitino.authenticator.kerberos.principal` | Indicates the Kerberos principal to be used for HTTP endpoint. Principal should start with `HTTP/`. | (none) | Yes if use `kerberos` as the authenticator | 0.4.0 | +| `gravitino.authenticator.kerberos.keytab` | Location of the keytab file with the credentials for the principal. | (none) | Yes if use `kerberos` as the authenticator | 0.4.0 | The signature algorithms that Gravitino supports follows: diff --git a/docs/spark-connector/spark-catalog-iceberg.md b/docs/spark-connector/spark-catalog-iceberg.md index f0b1f2f6419..dca23db6a33 100644 --- a/docs/spark-connector/spark-catalog-iceberg.md +++ b/docs/spark-connector/spark-catalog-iceberg.md @@ -95,23 +95,23 @@ SELECT * FROM employee FOR SYSTEM_TIME AS OF '2024-05-27 01:01:00'; DESC EXTENDED employee; ``` -For more details about `CALL`, please refer to the [Spark Procedures description](https://iceberg.apache.org/docs/1.5.2/spark-procedures/#spark-procedures) in Iceberg official document. +For more details about `CALL`, please refer to the [Spark Procedures description](https://iceberg.apache.org/docs/1.5.2/spark-procedures/#spark-procedures) in Iceberg official document. ## Catalog properties Gravitino spark connector will transform below property names which are defined in catalog properties to Spark Iceberg connector configuration. -| Gravitino catalog property name | Spark Iceberg connector configuration | Description | Since Version | -|---------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| -| `catalog-backend` | `type` | Catalog backend type | 0.5.0 | -| `uri` | `uri` | Catalog backend uri | 0.5.0 | -| `warehouse` | `warehouse` | Catalog backend warehouse | 0.5.0 | -| `jdbc-user` | `jdbc.user` | JDBC user name | 0.5.0 | -| `jdbc-password` | `jdbc.password` | JDBC password | 0.5.0 | -| `io-impl` | `io-impl` | The io implementation for `FileIO` in Iceberg. | 0.6.0 | -| `s3-endpoint` | `s3.endpoint` | An alternative endpoint of the S3 service, This could be used for S3FileIO with any s3-compatible object storage service that has a different endpoint, or access a private S3 endpoint in a virtual private cloud. | 0.6.0 | -| `s3-region` | `client.region` | The region of the S3 service, like `us-west-2`. | 0.6.0 | -| `oss-endpoint` | `oss.endpoint` | The endpoint of Aliyun OSS service. | 0.7.0 | +| Gravitino catalog property name | Spark Iceberg connector configuration | Description | Since Version | +|---------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------| +| `catalog-backend` | `type` | Catalog backend type | 0.5.0 | +| `uri` | `uri` | Catalog backend uri | 0.5.0 | +| `warehouse` | `warehouse` | Catalog backend warehouse | 0.5.0 | +| `jdbc-user` | `jdbc.user` | JDBC user name | 0.5.0 | +| `jdbc-password` | `jdbc.password` | JDBC password | 0.5.0 | +| `io-impl` | `io-impl` | The io implementation for `FileIO` in Iceberg. | 0.6.0-incubating | +| `s3-endpoint` | `s3.endpoint` | An alternative endpoint of the S3 service, This could be used for S3FileIO with any s3-compatible object storage service that has a different endpoint, or access a private S3 endpoint in a virtual private cloud. | 0.6.0-incubating | +| `s3-region` | `client.region` | The region of the S3 service, like `us-west-2`. | 0.6.0-incubating | +| `oss-endpoint` | `oss.endpoint` | The endpoint of Aliyun OSS service. | 0.7.0-incubating | Gravitino catalog property names with the prefix `spark.bypass.` are passed to Spark Iceberg connector. For example, using `spark.bypass.clients` to pass the `clients` to the Spark Iceberg connector. diff --git a/docs/spark-connector/spark-integration-test.md b/docs/spark-connector/spark-integration-test.md index 35ad27b56ef..cba1c104dc5 100644 --- a/docs/spark-connector/spark-integration-test.md +++ b/docs/spark-connector/spark-integration-test.md @@ -7,7 +7,7 @@ license: "This software is licensed under the Apache License version 2." ## Overview -There are two types of integration tests in spark connector, normal integration test like `SparkXXCatalogIT`, and the golden file integration test. +There are two types of integration tests in spark connector, normal integration test like `SparkXXCatalogIT`, and the golden file integration test. ## Normal integration test @@ -28,15 +28,15 @@ Golden file integration test are mainly to test the correctness of the SQL resul Please change the Spark version number if you want to test other Spark versions. If you want to change the test behaviour, please modify `spark-connector/spark-common/src/test/resources/spark-test.conf`. -| Configuration item | Description | Default value | Required | Since Version | -|--------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------|----------|---------------| -| `gravitino.spark.test.dir` | The Spark SQL test base dir, include `test-sqls` and `data`. | `spark-connector/spark-common/src/test/resources/` | No | 0.6.0 | -| `gravitino.spark.test.sqls` | Specify the test SQLs, using directory to specify group of SQLs like `test-sqls/hive`, using file path to specify one SQL like `test-sqls/hive/basic.sql`, use `,` to split multi part | run all SQLs | No | 0.6.0 | -| `gravitino.spark.test.generateGoldenFiles` | Whether generate golden files which are used to check the correctness of the SQL result | false | No | 0.6.0 | -| `gravitino.spark.test.metalake` | The metalake name to run the test | `test` | No | 0.6.0 | -| `gravitino.spark.test.setupEnv` | Whether to setup Gravitino and Hive environment | `false` | No | 0.6.0 | -| `gravitino.spark.test.uri` | Gravitino uri address, only available when `gravitino.spark.test.setupEnv` is false | http://127.0.0.1:8090 | No | 0.6.0 | -| `gravitino.spark.test.iceberg.warehouse` | The warehouse location, only available when `gravitino.spark.test.setupEnv` is false | hdfs://127.0.0.1:9000/user/hive/warehouse-spark-test | No | 0.6.0 | +| Configuration item | Description | Default value | Required | Since Version | +|--------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------|----------|------------------| +| `gravitino.spark.test.dir` | The Spark SQL test base dir, include `test-sqls` and `data`. | `spark-connector/spark-common/src/test/resources/` | No | 0.6.0-incubating | +| `gravitino.spark.test.sqls` | Specify the test SQLs, using directory to specify group of SQLs like `test-sqls/hive`, using file path to specify one SQL like `test-sqls/hive/basic.sql`, use `,` to split multi part | run all SQLs | No | 0.6.0-incubating | +| `gravitino.spark.test.generateGoldenFiles` | Whether generate golden files which are used to check the correctness of the SQL result | false | No | 0.6.0-incubating | +| `gravitino.spark.test.metalake` | The metalake name to run the test | `test` | No | 0.6.0-incubating | +| `gravitino.spark.test.setupEnv` | Whether to setup Gravitino and Hive environment | `false` | No | 0.6.0-incubating | +| `gravitino.spark.test.uri` | Gravitino uri address, only available when `gravitino.spark.test.setupEnv` is false | http://127.0.0.1:8090 | No | 0.6.0-incubating | +| `gravitino.spark.test.iceberg.warehouse` | The warehouse location, only available when `gravitino.spark.test.setupEnv` is false | hdfs://127.0.0.1:9000/user/hive/warehouse-spark-test | No | 0.6.0-incubating | The test SQL files are located in `spark-connector/spark-common/src/test/resources/` by default. There are three directories: - `hive`, SQL tests for Hive catalog. From 67ac7f9473b3c2781d32eb12e6d9e4d1ec4f5672 Mon Sep 17 00:00:00 2001 From: Liang Chun Date: Thu, 24 Oct 2024 01:03:28 -0700 Subject: [PATCH 003/123] [#5213] Improvement: Fix the flaky test (#5234) ### What changes were proposed in this pull request? Increase the max_retries to prevent timeout ### Why are the changes needed? Fix: #5213 ### Does this PR introduce _any_ user-facing change? No. only the UT was changed. ### How was this patch tested? `./gradlew :catalogs:catalog-hive:test` --- .../org/apache/gravitino/catalog/hive/TestFetchFileUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/TestFetchFileUtils.java b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/TestFetchFileUtils.java index f949f724965..31904e669f0 100644 --- a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/TestFetchFileUtils.java +++ b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/TestFetchFileUtils.java @@ -30,7 +30,7 @@ public class TestFetchFileUtils { private static final Logger LOG = LoggerFactory.getLogger(TestFetchFileUtils.class); - private static final int MAX_RETRIES = 3; + private static final int MAX_RETRIES = 8; private static final long INITIAL_RETRY_DELAY_MS = 1000; @Test From 92d9fc473fc9c1bd26085014e395f771f164b4f0 Mon Sep 17 00:00:00 2001 From: Qian Xia Date: Thu, 24 Oct 2024 17:09:07 +0800 Subject: [PATCH 004/123] [#5168][#5169][#5170] Feature(web): Add ui support for creating, editing, viewing, and deleting fileset (#5223) ### What changes were proposed in this pull request? Add ui support for creating, editing, viewing, and deleting fileset image ### Why are the changes needed? N/A Fix: #5168 Fix: #5169 Fix: #5170 ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? manually --- .../rightContent/CreateFilesetDialog.js | 510 ++++++++++++++++++ .../metalake/rightContent/RightContent.js | 39 +- .../tabsContent/tableView/TableView.js | 50 +- web/web/src/lib/api/filesets/index.js | 20 +- web/web/src/lib/store/metalakes/index.js | 69 ++- 5 files changed, 682 insertions(+), 6 deletions(-) create mode 100644 web/web/src/app/metalakes/metalake/rightContent/CreateFilesetDialog.js diff --git a/web/web/src/app/metalakes/metalake/rightContent/CreateFilesetDialog.js b/web/web/src/app/metalakes/metalake/rightContent/CreateFilesetDialog.js new file mode 100644 index 00000000000..cb19015458b --- /dev/null +++ b/web/web/src/app/metalakes/metalake/rightContent/CreateFilesetDialog.js @@ -0,0 +1,510 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +'use client' + +import { useState, forwardRef, useEffect, Fragment } from 'react' + +import { + Box, + Grid, + Button, + Dialog, + TextField, + Typography, + DialogContent, + DialogActions, + IconButton, + Fade, + Select, + MenuItem, + InputLabel, + FormControl, + FormHelperText +} from '@mui/material' + +import Icon from '@/components/Icon' + +import { useAppDispatch } from '@/lib/hooks/useStore' +import { createFileset, updateFileset } from '@/lib/store/metalakes' + +import * as yup from 'yup' +import { useForm, Controller } from 'react-hook-form' +import { yupResolver } from '@hookform/resolvers/yup' + +import { groupBy } from 'lodash-es' +import { genUpdates } from '@/lib/utils' +import { nameRegex, nameRegexDesc, keyRegex } from '@/lib/utils/regex' +import { useSearchParams } from 'next/navigation' + +const defaultValues = { + name: '', + type: 'managed', + storageLocation: '', + comment: '', + propItems: [] +} + +const schema = yup.object().shape({ + name: yup.string().required().matches(nameRegex, nameRegexDesc), + type: yup.mixed().oneOf(['managed', 'external']).required(), + storageLocation: yup.string().when('type', { + is: 'external', + then: schema => schema.required(), + otherwise: schema => schema + }), + propItems: yup.array().of( + yup.object().shape({ + required: yup.boolean(), + key: yup.string().required(), + value: yup.string().when('required', { + is: true, + then: schema => schema.required() + }) + }) + ) +}) + +const Transition = forwardRef(function Transition(props, ref) { + return +}) + +const CreateFilesetDialog = props => { + const { open, setOpen, type = 'create', data = {} } = props + const searchParams = useSearchParams() + const metalake = searchParams.get('metalake') + const catalog = searchParams.get('catalog') + const catalogType = searchParams.get('type') + const schemaName = searchParams.get('schema') + const [innerProps, setInnerProps] = useState([]) + const dispatch = useAppDispatch() + const [cacheData, setCacheData] = useState() + + const { + control, + reset, + watch, + setValue, + getValues, + handleSubmit, + trigger, + formState: { errors } + } = useForm({ + defaultValues, + mode: 'all', + resolver: yupResolver(schema) + }) + + const handleFormChange = ({ index, event }) => { + let data = [...innerProps] + data[index][event.target.name] = event.target.value + + if (event.target.name === 'key') { + const invalidKey = !keyRegex.test(event.target.value) + data[index].invalid = invalidKey + } + + const nonEmptyKeys = data.filter(item => item.key.trim() !== '') + const grouped = groupBy(nonEmptyKeys, 'key') + const duplicateKeys = Object.keys(grouped).some(key => grouped[key].length > 1) + + if (duplicateKeys) { + data[index].hasDuplicateKey = duplicateKeys + } else { + data.forEach(it => (it.hasDuplicateKey = false)) + } + + setInnerProps(data) + setValue('propItems', data) + } + + const addFields = () => { + const duplicateKeys = innerProps + .filter(item => item.key.trim() !== '') + .some( + (item, index, filteredItems) => + filteredItems.findIndex(otherItem => otherItem !== item && otherItem.key.trim() === item.key.trim()) !== -1 + ) + + if (duplicateKeys) { + return + } + + let newField = { key: '', value: '', required: false } + + setInnerProps([...innerProps, newField]) + setValue('propItems', [...innerProps, newField]) + } + + const removeFields = index => { + let data = [...innerProps] + data.splice(index, 1) + setInnerProps(data) + setValue('propItems', data) + } + + const handleClose = () => { + reset() + setInnerProps([]) + setValue('propItems', []) + setOpen(false) + } + + const handleClickSubmit = e => { + e.preventDefault() + + return handleSubmit(onSubmit(getValues()), onError) + } + + const onSubmit = data => { + const duplicateKeys = innerProps + .filter(item => item.key.trim() !== '') + .some( + (item, index, filteredItems) => + filteredItems.findIndex(otherItem => otherItem !== item && otherItem.key.trim() === item.key.trim()) !== -1 + ) + + const invalidKeys = innerProps.some(i => i.invalid) + + if (duplicateKeys || invalidKeys) { + return + } + + trigger() + + schema + .validate(data) + .then(() => { + const properties = innerProps.reduce((acc, item) => { + acc[item.key] = item.value + + return acc + }, {}) + + const filesetData = { + name: data.name, + type: data.type, + storageLocation: data.storageLocation, + comment: data.comment, + properties + } + + if (type === 'create') { + dispatch(createFileset({ data: filesetData, metalake, catalog, type: catalogType, schema: schemaName })).then( + res => { + if (!res.payload?.err) { + handleClose() + } + } + ) + } else { + const reqData = { updates: genUpdates(cacheData, filesetData) } + + if (reqData.updates.length !== 0) { + dispatch( + updateFileset({ + metalake, + catalog, + type: catalogType, + schema: schemaName, + fileset: cacheData.name, + data: reqData + }) + ).then(res => { + if (!res.payload?.err) { + handleClose() + } + }) + } + } + }) + .catch(err => { + console.error('valid error', err) + }) + } + + const onError = errors => { + console.error('fields error', errors) + } + + useEffect(() => { + if (open && JSON.stringify(data) !== '{}') { + const { properties = {} } = data + + setCacheData(data) + setValue('name', data.name) + setValue('type', data.type) + setValue('storageLocation', data.storageLocation) + setValue('comment', data.comment) + + const propsItems = Object.entries(properties).map(([key, value]) => { + return { + key, + value + } + }) + + setInnerProps(propsItems) + setValue('propItems', propsItems) + } + }, [open, data, setValue, type]) + + return ( + +
handleClickSubmit(e)}> + `${theme.spacing(8)} !important`, + px: theme => [`${theme.spacing(5)} !important`, `${theme.spacing(15)} !important`], + pt: theme => [`${theme.spacing(8)} !important`, `${theme.spacing(12.5)} !important`] + }} + > + handleClose()} + sx={{ position: 'absolute', right: '1rem', top: '1rem' }} + > + + + + + {type === 'create' ? 'Create' : 'Edit'} Fileset + + + + + + + ( + + )} + /> + {errors.name && {errors.name.message}} + + + + + + + Type + + ( + + )} + /> + {errors.type && {errors.type.message}} + + + + + + ( + + )} + /> + {errors.storageLocation ? ( + {errors.storageLocation.message} + ) : ( + <> + + It is optional if the fileset is 'Managed' type and a storage location is already specified at the + parent catalog or schema level. + + + It becomes mandatory if the fileset type is 'External' or no storage location is defined at the + parent level. + + + )} + + + + + + ( + + )} + /> + + + + + + Properties + + {innerProps.map((item, index) => { + return ( + + + + + + + handleFormChange({ index, event })} + error={item.hasDuplicateKey || item.invalid || !item.key.trim()} + data-refer={`props-key-${index}`} + /> + + + handleFormChange({ index, event })} + data-refer={`props-value-${index}`} + data-prev-refer={`props-${item.key}`} + /> + + + {!(item.disabled || (item.key === 'location' && type === 'update')) ? ( + + removeFields(index)}> + + + + ) : ( + + )} + + + + {item.description} + + {item.hasDuplicateKey && ( + Key already exists + )} + {item.key && item.invalid && ( + + Invalid key, matches strings starting with a letter/underscore, followed by alphanumeric + characters, underscores, hyphens, or dots. + + )} + {!item.key.trim() && ( + Key is required field + )} + + + + ) + })} + + + + + + + + [`${theme.spacing(5)} !important`, `${theme.spacing(15)} !important`], + pb: theme => [`${theme.spacing(5)} !important`, `${theme.spacing(12.5)} !important`] + }} + > + + + +
+
+ ) +} + +export default CreateFilesetDialog diff --git a/web/web/src/app/metalakes/metalake/rightContent/RightContent.js b/web/web/src/app/metalakes/metalake/rightContent/RightContent.js index 1706399ddc2..4dfd091a4b5 100644 --- a/web/web/src/app/metalakes/metalake/rightContent/RightContent.js +++ b/web/web/src/app/metalakes/metalake/rightContent/RightContent.js @@ -26,6 +26,7 @@ import Icon from '@/components/Icon' import MetalakePath from './MetalakePath' import CreateCatalogDialog from './CreateCatalogDialog' import CreateSchemaDialog from './CreateSchemaDialog' +import CreateFilesetDialog from './CreateFilesetDialog' import TabsContent from './tabsContent/TabsContent' import { useSearchParams } from 'next/navigation' import { useAppSelector } from '@/lib/hooks/useStore' @@ -33,9 +34,11 @@ import { useAppSelector } from '@/lib/hooks/useStore' const RightContent = () => { const [open, setOpen] = useState(false) const [openSchema, setOpenSchema] = useState(false) + const [openFileset, setOpenFileset] = useState(false) const searchParams = useSearchParams() const [isShowBtn, setBtnVisible] = useState(true) const [isShowSchemaBtn, setSchemaBtnVisible] = useState(false) + const [isShowFilesetBtn, setFilesetBtnVisible] = useState(false) const store = useAppSelector(state => state.metalakes) const handleCreateCatalog = () => { @@ -46,15 +49,33 @@ const RightContent = () => { setOpenSchema(true) } + const handleCreateFileset = () => { + setOpenFileset(true) + } + useEffect(() => { const paramsSize = [...searchParams.keys()].length const isCatalogList = paramsSize == 1 && searchParams.get('metalake') setBtnVisible(isCatalogList) + const isFilesetList = + paramsSize == 4 && + searchParams.has('metalake') && + searchParams.has('catalog') && + searchParams.get('type') === 'fileset' + searchParams.has('schema') + setFilesetBtnVisible(isFilesetList) + if (store.catalogs.length) { const currentCatalog = store.catalogs.filter(ca => ca.name === searchParams.get('catalog'))[0] - const isHideSchemaAction = ['lakehouse-hudi', 'kafka'].includes(currentCatalog?.provider) && paramsSize == 3 - setSchemaBtnVisible(!isHideSchemaAction && !isCatalogList) + + const isSchemaList = + paramsSize == 3 && + searchParams.has('metalake') && + searchParams.has('catalog') && + searchParams.has('type') && + !['lakehouse-hudi', 'kafka'].includes(currentCatalog?.provider) + setSchemaBtnVisible(isSchemaList) } }, [searchParams, store.catalogs, store.catalogs.length]) @@ -105,6 +126,20 @@ const RightContent = () => { )} + {isShowFilesetBtn && ( + + + + + )} diff --git a/web/web/src/app/metalakes/metalake/rightContent/tabsContent/tableView/TableView.js b/web/web/src/app/metalakes/metalake/rightContent/tabsContent/tableView/TableView.js index cdc94c776df..cf8cc3bafef 100644 --- a/web/web/src/app/metalakes/metalake/rightContent/tabsContent/tableView/TableView.js +++ b/web/web/src/app/metalakes/metalake/rightContent/tabsContent/tableView/TableView.js @@ -41,14 +41,16 @@ import DetailsDrawer from '@/components/DetailsDrawer' import ConfirmDeleteDialog from '@/components/ConfirmDeleteDialog' import CreateCatalogDialog from '../../CreateCatalogDialog' import CreateSchemaDialog from '../../CreateSchemaDialog' +import CreateFilesetDialog from '../../CreateFilesetDialog' import { useAppSelector, useAppDispatch } from '@/lib/hooks/useStore' -import { deleteCatalog, deleteSchema } from '@/lib/store/metalakes' +import { deleteCatalog, deleteFileset, deleteSchema } from '@/lib/store/metalakes' import { to } from '@/lib/utils' import { getCatalogDetailsApi } from '@/lib/api/catalogs' import { getSchemaDetailsApi } from '@/lib/api/schemas' import { useSearchParams } from 'next/navigation' +import { getFilesetDetailsApi } from '@/lib/api/filesets' const fonts = Inconsolata({ subsets: ['latin'] }) @@ -76,6 +78,13 @@ const TableView = () => { const metalake = searchParams.get('metalake') || '' const catalog = searchParams.get('catalog') || '' const type = searchParams.get('type') || '' + const schema = searchParams.get('schema') || '' + + const isKafkaSchema = + paramsSize == 3 && + searchParams.has('metalake') && + searchParams.has('catalog') && + searchParams.get('type') === 'messaging' const defaultPaginationConfig = { pageSize: 10, page: 0 } const pageSizeOptions = [10, 25, 50] @@ -91,6 +100,7 @@ const TableView = () => { const [openConfirmDelete, setOpenConfirmDelete] = useState(false) const [openDialog, setOpenDialog] = useState(false) const [openSchemaDialog, setOpenSchemaDialog] = useState(false) + const [openFilesetDialog, setOpenFilesetDialog] = useState(false) const [dialogData, setDialogData] = useState({}) const [dialogType, setDialogType] = useState('create') const [isHideSchemaEdit, setIsHideSchemaEdit] = useState(true) @@ -463,6 +473,15 @@ const TableView = () => { setOpenDrawer(true) break } + case 'fileset': { + const [err, res] = await to(getFilesetDetailsApi({ metalake, catalog, schema, fileset: row.name })) + if (err || !res) { + throw new Error(err) + } + + setDrawerData(res.fileset) + setOpenDrawer(true) + } default: return } @@ -498,6 +517,18 @@ const TableView = () => { } break } + case 'fileset': { + if (metalake && catalog && schema) { + const [err, res] = await to(getFilesetDetailsApi({ metalake, catalog, schema, fileset: data.row?.name })) + if (err || !res) { + throw new Error(err) + } + + setDialogType('update') + setDialogData(res.fileset) + setOpenFilesetDialog(true) + } + } default: return } @@ -522,6 +553,9 @@ const TableView = () => { case 'schema': dispatch(deleteSchema({ metalake, catalog, type, schema: confirmCacheData.name })) break + case 'fileset': + dispatch(deleteFileset({ metalake, catalog, type, schema, fileset: confirmCacheData.name })) + break default: break } @@ -533,7 +567,12 @@ const TableView = () => { const checkColumns = () => { if ( (paramsSize == 1 && searchParams.has('metalake')) || - (paramsSize == 3 && searchParams.has('metalake') && searchParams.has('catalog') && searchParams.has('type')) + (paramsSize == 3 && searchParams.has('metalake') && searchParams.has('catalog') && searchParams.has('type')) || + (paramsSize == 4 && + searchParams.has('metalake') && + searchParams.has('catalog') && + searchParams.get('type') === 'fileset' && + searchParams.has('schema')) ) { return actionsColumns } else if (paramsSize == 5 && searchParams.has('table')) { @@ -580,6 +619,13 @@ const TableView = () => { + + ) } diff --git a/web/web/src/lib/api/filesets/index.js b/web/web/src/lib/api/filesets/index.js index 81f05488fac..bae492a11de 100644 --- a/web/web/src/lib/api/filesets/index.js +++ b/web/web/src/lib/api/filesets/index.js @@ -27,7 +27,13 @@ const Apis = { GET_DETAIL: ({ metalake, catalog, schema, fileset }) => `/api/metalakes/${encodeURIComponent(metalake)}/catalogs/${encodeURIComponent( catalog - )}/schemas/${encodeURIComponent(schema)}/filesets/${encodeURIComponent(fileset)}` + )}/schemas/${encodeURIComponent(schema)}/filesets/${encodeURIComponent(fileset)}`, + CREATE: ({ metalake, catalog, schema }) => + `/api/metalakes/${encodeURIComponent(metalake)}/catalogs/${encodeURIComponent(catalog)}/schemas/${encodeURIComponent(schema)}/filesets`, + UPDATE: ({ metalake, catalog, schema, fileset }) => + `/api/metalakes/${encodeURIComponent(metalake)}/catalogs/${encodeURIComponent(catalog)}/schemas/${encodeURIComponent(schema)}/filesets/${encodeURIComponent(fileset)}`, + DELETE: ({ metalake, catalog, schema, fileset }) => + `/api/metalakes/${encodeURIComponent(metalake)}/catalogs/${encodeURIComponent(catalog)}/schemas/${encodeURIComponent(schema)}/filesets/${encodeURIComponent(fileset)}` } export const getFilesetsApi = params => { @@ -41,3 +47,15 @@ export const getFilesetDetailsApi = ({ metalake, catalog, schema, fileset }) => url: `${Apis.GET_DETAIL({ metalake, catalog, schema, fileset })}` }) } + +export const createFilesetApi = ({ metalake, catalog, schema, data }) => { + return defHttp.post({ url: `${Apis.CREATE({ metalake, catalog, schema })}`, data }) +} + +export const updateFilesetApi = ({ metalake, catalog, schema, fileset, data }) => { + return defHttp.put({ url: `${Apis.UPDATE({ metalake, catalog, schema, fileset })}`, data }) +} + +export const deleteFilesetApi = ({ metalake, catalog, schema, fileset }) => { + return defHttp.delete({ url: `${Apis.DELETE({ metalake, catalog, schema, fileset })}` }) +} diff --git a/web/web/src/lib/store/metalakes/index.js b/web/web/src/lib/store/metalakes/index.js index 7c58e80e4cc..445d2838d3f 100644 --- a/web/web/src/lib/store/metalakes/index.js +++ b/web/web/src/lib/store/metalakes/index.js @@ -47,7 +47,13 @@ import { deleteSchemaApi } from '@/lib/api/schemas' import { getTablesApi, getTableDetailsApi } from '@/lib/api/tables' -import { getFilesetsApi, getFilesetDetailsApi } from '@/lib/api/filesets' +import { + getFilesetsApi, + getFilesetDetailsApi, + createFilesetApi, + updateFilesetApi, + deleteFilesetApi +} from '@/lib/api/filesets' import { getTopicsApi, getTopicDetailsApi } from '@/lib/api/topics' export const fetchMetalakes = createAsyncThunk('appMetalakes/fetchMetalakes', async (params, { getState }) => { @@ -885,6 +891,67 @@ export const getFilesetDetails = createAsyncThunk( } ) +export const createFileset = createAsyncThunk( + 'appMetalakes/createFileset', + async ({ data, metalake, catalog, type, schema }, { dispatch }) => { + dispatch(setTableLoading(true)) + const [err, res] = await to(createFilesetApi({ data, metalake, catalog, schema })) + dispatch(setTableLoading(false)) + + if (err || !res) { + return { err: true } + } + + const { fileset: filesetItem } = res + + const filesetData = { + ...filesetItem, + node: 'fileset', + id: `{{${metalake}}}{{${catalog}}}{{${type}}}{{${schema}}}{{${filesetItem.name}}}`, + key: `{{${metalake}}}{{${catalog}}}{{${type}}}{{${schema}}}{{${filesetItem.name}}}`, + path: `?${new URLSearchParams({ metalake, catalog, type, schema, fileset: filesetItem.name }).toString()}`, + name: filesetItem.name, + title: filesetItem.name, + tables: [], + children: [] + } + + dispatch(fetchFilesets({ metalake, catalog, schema, type, init: true })) + + return filesetData + } +) + +export const updateFileset = createAsyncThunk( + 'appMetalakes/updateFileset', + async ({ metalake, catalog, type, schema, fileset, data }, { dispatch }) => { + const [err, res] = await to(updateFilesetApi({ metalake, catalog, schema, fileset, data })) + if (err || !res) { + return { err: true } + } + dispatch(fetchFilesets({ metalake, catalog, type, schema, init: true })) + + return res.catalog + } +) + +export const deleteFileset = createAsyncThunk( + 'appMetalakes/deleteFileset', + async ({ metalake, catalog, type, schema, fileset }, { dispatch }) => { + dispatch(setTableLoading(true)) + const [err, res] = await to(deleteFilesetApi({ metalake, catalog, schema, fileset })) + dispatch(setTableLoading(false)) + + if (err || !res) { + throw new Error(err) + } + + dispatch(fetchFilesets({ metalake, catalog, type, schema, page: 'schemas', init: true })) + + return res + } +) + export const fetchTopics = createAsyncThunk( 'appMetalakes/fetchTopics', async ({ init, page, metalake, catalog, schema }, { getState, dispatch }) => { From 13c46253115be18d40072e4c8fe53550ada581d2 Mon Sep 17 00:00:00 2001 From: Qi Yu Date: Thu, 24 Oct 2024 19:04:49 +0800 Subject: [PATCH 005/123] [#5221] feat(python-client): Support OSS for fileset python client (#5225) ### What changes were proposed in this pull request? Add support for Aliyun OSS python client. ### Why are the changes needed? It's a need Fix: #5221 ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? Test locally. Do the following change to `test_gvfs_with_oss.py` and run `./gradlew :clients:client-python:test -PskipDockerTests=false` image image --- .../gravitino/filesystem/gvfs.py | 78 ++++ .../gravitino/filesystem/gvfs_config.py | 4 + clients/client-python/requirements.txt | 3 +- .../tests/integration/test_gvfs_with_oss.py | 353 ++++++++++++++++++ 4 files changed, 437 insertions(+), 1 deletion(-) create mode 100644 clients/client-python/tests/integration/test_gvfs_with_oss.py diff --git a/clients/client-python/gravitino/filesystem/gvfs.py b/clients/client-python/gravitino/filesystem/gvfs.py index a9201a83326..8176c325a81 100644 --- a/clients/client-python/gravitino/filesystem/gvfs.py +++ b/clients/client-python/gravitino/filesystem/gvfs.py @@ -49,6 +49,7 @@ class StorageType(Enum): LOCAL = "file" GCS = "gs" S3A = "s3a" + OSS = "oss" class FilesetContextPair: @@ -318,6 +319,7 @@ def mv(self, path1, path2, recursive=False, maxdepth=None, **kwargs): StorageType.HDFS, StorageType.GCS, StorageType.S3A, + StorageType.OSS, ]: src_context_pair.filesystem().mv( self._strip_storage_protocol(storage_type, src_actual_path), @@ -567,6 +569,14 @@ def _convert_actual_path( or storage_location.startswith(f"{StorageType.S3A.value}://") ): actual_prefix = infer_storage_options(storage_location)["path"] + elif storage_location.startswith(f"{StorageType.OSS.value}:/"): + ops = infer_storage_options(storage_location) + if "host" not in ops or "path" not in ops: + raise GravitinoRuntimeException( + f"Storage location:{storage_location} doesn't support now." + ) + + actual_prefix = ops["host"] + ops["path"] elif storage_location.startswith(f"{StorageType.LOCAL.value}:/"): actual_prefix = storage_location[len(f"{StorageType.LOCAL.value}:") :] else: @@ -733,6 +743,8 @@ def _recognize_storage_type(path: str): return StorageType.GCS if path.startswith(f"{StorageType.S3A.value}://"): return StorageType.S3A + if path.startswith(f"{StorageType.OSS.value}://"): + return StorageType.OSS raise GravitinoRuntimeException( f"Storage type doesn't support now. Path:{path}" ) @@ -756,12 +768,46 @@ def _strip_storage_protocol(storage_type: StorageType, path: str): :param storage_type: The storage type :param path: The path :return: The stripped path + + We will handle OSS differently from S3 and GCS, because OSS has different behavior than S3 and GCS. + Please see the following example: + + ``` + >> oss = context_pair.filesystem() + >> oss.ls('oss://bucket-xiaoyu/test_gvfs_catalog678/test_gvfs_schema/test_gvfs_fileset/test_ls') + DEBUG:ossfs:Get directory listing page for bucket-xiaoyu/test_gvfs_catalog678/ + test_gvfs_schema/test_gvfs_fileset + DEBUG:ossfs:CALL: ObjectIterator - () - {'prefix': 'test_gvfs_catalog678/test_gvfs_schema + /test_gvfs_fileset/', 'delimiter': '/'} + [] + >> oss.ls('bucket-xiaoyu/test_gvfs_catalog678/test_gvfs_schema/test_gvfs_fileset/test_ls') + DEBUG:ossfs:Get directory listing page for bucket-xiaoyu/test_gvfs_catalog678/test_gvfs_schema + /test_gvfs_fileset/test_ls + DEBUG:ossfs:CALL: ObjectIterator - () - {'prefix': 'test_gvfs_catalog678/test_gvfs_schema + /test_gvfs_fileset/test_ls/', 'delimiter': '/'} + [{'name': 'bucket-xiaoyu/test_gvfs_catalog678/test_gvfs_schema/test_gvfs_fileset/test_ls + /test.file', 'type': 'file', 'size': 0, 'LastModified': 1729754793, + 'Size': 0, 'Key': 'bucket-xiaoyu/test_gvfs_catalog678/test_gvfs_schema/ + test_gvfs_fileset/test_ls/test.file'}] + + ``` + + Please take a look at the above example: if we do not remove the protocol (starts with oss://), + it will always return an empty array when we call `oss.ls`, however, if we remove the protocol, + it will produce the correct result as expected. """ if storage_type in (StorageType.HDFS, StorageType.GCS, StorageType.S3A): return path if storage_type == StorageType.LOCAL: return path[len(f"{StorageType.LOCAL.value}:") :] + # OSS has different behavior than S3 and GCS, if we do not remove the + # protocol, it will always return an empty array. + if storage_type == StorageType.OSS: + if path.startswith(f"{StorageType.OSS.value}://"): + return path[len(f"{StorageType.OSS.value}://") :] + return path + raise GravitinoRuntimeException( f"Storage type:{storage_type} doesn't support now." ) @@ -835,6 +881,8 @@ def _get_filesystem(self, actual_file_location: str): fs = self._get_gcs_filesystem() elif storage_type == StorageType.S3A: fs = self._get_s3_filesystem() + elif storage_type == StorageType.OSS: + fs = self._get_oss_filesystem() else: raise GravitinoRuntimeException( f"Storage type: `{storage_type}` doesn't support now." @@ -887,5 +935,35 @@ def _get_s3_filesystem(self): endpoint_url=aws_endpoint_url, ) + def _get_oss_filesystem(self): + # get 'oss_access_key_id' from oss options, if the key is not found, throw an exception + oss_access_key_id = self._options.get(GVFSConfig.GVFS_FILESYSTEM_OSS_ACCESS_KEY) + if oss_access_key_id is None: + raise GravitinoRuntimeException( + "OSS access key id is not found in the options." + ) + + # get 'oss_secret_access_key' from oss options, if the key is not found, throw an exception + oss_secret_access_key = self._options.get( + GVFSConfig.GVFS_FILESYSTEM_OSS_SECRET_KEY + ) + if oss_secret_access_key is None: + raise GravitinoRuntimeException( + "OSS secret access key is not found in the options." + ) + + # get 'oss_endpoint_url' from oss options, if the key is not found, throw an exception + oss_endpoint_url = self._options.get(GVFSConfig.GVFS_FILESYSTEM_OSS_ENDPOINT) + if oss_endpoint_url is None: + raise GravitinoRuntimeException( + "OSS endpoint url is not found in the options." + ) + + return importlib.import_module("ossfs").OSSFileSystem( + key=oss_access_key_id, + secret=oss_secret_access_key, + endpoint=oss_endpoint_url, + ) + fsspec.register_implementation(PROTOCOL_NAME, GravitinoVirtualFileSystem) diff --git a/clients/client-python/gravitino/filesystem/gvfs_config.py b/clients/client-python/gravitino/filesystem/gvfs_config.py index 7ffacdb095d..00ae8c6419e 100644 --- a/clients/client-python/gravitino/filesystem/gvfs_config.py +++ b/clients/client-python/gravitino/filesystem/gvfs_config.py @@ -37,3 +37,7 @@ class GVFSConfig: GVFS_FILESYSTEM_S3_ACCESS_KEY = "s3_access_key" GVFS_FILESYSTEM_S3_SECRET_KEY = "s3_secret_key" GVFS_FILESYSTEM_S3_ENDPOINT = "s3_endpoint" + + GVFS_FILESYSTEM_OSS_ACCESS_KEY = "oss_access_key" + GVFS_FILESYSTEM_OSS_SECRET_KEY = "oss_secret_key" + GVFS_FILESYSTEM_OSS_ENDPOINT = "oss_endpoint" diff --git a/clients/client-python/requirements.txt b/clients/client-python/requirements.txt index 1d0f4fadd5d..8eebd572770 100644 --- a/clients/client-python/requirements.txt +++ b/clients/client-python/requirements.txt @@ -24,4 +24,5 @@ fsspec==2024.3.1 pyarrow==15.0.2 cachetools==5.3.3 gcsfs==2024.3.1 -s3fs==2024.3.1 \ No newline at end of file +s3fs==2024.3.1 +ossfs==2023.12.0 \ No newline at end of file diff --git a/clients/client-python/tests/integration/test_gvfs_with_oss.py b/clients/client-python/tests/integration/test_gvfs_with_oss.py new file mode 100644 index 00000000000..95b385ea925 --- /dev/null +++ b/clients/client-python/tests/integration/test_gvfs_with_oss.py @@ -0,0 +1,353 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import os +from random import randint +import unittest + + +from ossfs import OSSFileSystem + +from tests.integration.test_gvfs_with_hdfs import TestGvfsWithHDFS +from gravitino import ( + gvfs, + GravitinoClient, + Catalog, + Fileset, +) +from gravitino.exceptions.base import GravitinoRuntimeException +from gravitino.filesystem.gvfs_config import GVFSConfig + + +logger = logging.getLogger(__name__) + + +@unittest.skip("This test require oss service account") +class TestGvfsWithOSS(TestGvfsWithHDFS): + # Before running this test, please set the make sure aliyun-bundle-x.jar has been + # copy to the $GRAVITINO_HOME/catalogs/hadoop/libs/ directory + oss_access_key = "your_access_key" + oss_secret_key = "your_secret_key" + oss_endpoint = "your_endpoint" + bucket_name = "your_bucket_name" + + metalake_name: str = "TestGvfsWithOSS_metalake" + str(randint(1, 10000)) + + def setUp(self): + self.options = { + f"{GVFSConfig.GVFS_FILESYSTEM_OSS_ACCESS_KEY}": self.oss_access_key, + f"{GVFSConfig.GVFS_FILESYSTEM_OSS_SECRET_KEY}": self.oss_secret_key, + f"{GVFSConfig.GVFS_FILESYSTEM_OSS_ENDPOINT}": self.oss_endpoint, + } + + def tearDown(self): + self.options = {} + + @classmethod + def setUpClass(cls): + cls._get_gravitino_home() + + cls.hadoop_conf_path = f"{cls.gravitino_home}/catalogs/hadoop/conf/hadoop.conf" + # restart the server + cls.restart_server() + # create entity + cls._init_test_entities() + + @classmethod + def tearDownClass(cls): + cls._clean_test_data() + # reset server conf in case of other ITs like HDFS has changed it and fail + # to reset it + cls._reset_conf(cls.config, cls.hadoop_conf_path) + # restart server + cls.restart_server() + + # clear all config in the conf_path + @classmethod + def _reset_conf(cls, config, conf_path): + logger.info("Reset %s.", conf_path) + if not os.path.exists(conf_path): + raise GravitinoRuntimeException(f"Conf file is not found at `{conf_path}`.") + filtered_lines = [] + with open(conf_path, mode="r", encoding="utf-8") as file: + origin_lines = file.readlines() + + for line in origin_lines: + line = line.strip() + if line.startswith("#"): + # append annotations directly + filtered_lines.append(line + "\n") + + with open(conf_path, mode="w", encoding="utf-8") as file: + for line in filtered_lines: + file.write(line) + + @classmethod + def _init_test_entities(cls): + cls.gravitino_admin_client.create_metalake( + name=cls.metalake_name, comment="", properties={} + ) + cls.gravitino_client = GravitinoClient( + uri="http://localhost:8090", metalake_name=cls.metalake_name + ) + + cls.config = {} + cls.conf = {} + catalog = cls.gravitino_client.create_catalog( + name=cls.catalog_name, + catalog_type=Catalog.Type.FILESET, + provider=cls.catalog_provider, + comment="", + properties={ + "filesystem-providers": "oss", + "gravitino.bypass.fs.oss.accessKeyId": cls.oss_access_key, + "gravitino.bypass.fs.oss.accessKeySecret": cls.oss_secret_key, + "gravitino.bypass.fs.oss.endpoint": cls.oss_endpoint, + "gravitino.bypass.fs.oss.impl": "org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem", + }, + ) + catalog.as_schemas().create_schema( + schema_name=cls.schema_name, comment="", properties={} + ) + + cls.fileset_storage_location: str = ( + f"oss://{cls.bucket_name}/{cls.catalog_name}/{cls.schema_name}/{cls.fileset_name}" + ) + cls.fileset_gvfs_location = ( + f"gvfs://fileset/{cls.catalog_name}/{cls.schema_name}/{cls.fileset_name}" + ) + catalog.as_fileset_catalog().create_fileset( + ident=cls.fileset_ident, + fileset_type=Fileset.Type.MANAGED, + comment=cls.fileset_comment, + storage_location=cls.fileset_storage_location, + properties=cls.fileset_properties, + ) + + cls.fs = OSSFileSystem( + key=cls.oss_access_key, + secret=cls.oss_secret_key, + endpoint=cls.oss_endpoint, + ) + + def check_mkdir(self, gvfs_dir, actual_dir, gvfs_instance): + # OSS will not create a directory, so the directory will not exist. + self.fs.mkdir(actual_dir) + self.assertFalse(self.fs.exists(actual_dir)) + self.assertFalse(gvfs_instance.exists(gvfs_dir)) + + def check_makedirs(self, gvfs_dir, actual_dir, gvfs_instance): + self.fs.makedirs(actual_dir) + self.assertFalse(self.fs.exists(actual_dir)) + self.assertFalse(gvfs_instance.exists(gvfs_dir)) + + def test_modified(self): + modified_dir = self.fileset_gvfs_location + "/test_modified" + modified_actual_dir = self.fileset_storage_location + "/test_modified" + fs = gvfs.GravitinoVirtualFileSystem( + server_uri="http://localhost:8090", + metalake_name=self.metalake_name, + options=self.options, + **self.conf, + ) + + self.check_mkdir(modified_dir, modified_actual_dir, fs) + # S3 only supports getting the `object` modify time, so the modified time will be None + # if it's a directory. + # >>> gcs.mkdir('example_qazwsx/catalog/schema/fileset3') + # >>> r = gcs.modified('example_qazwsx/catalog/schema/fileset3') + # >>> print(r) + # None + # self.assertIsNone(fs.modified(modified_dir)) + + # create a file under the dir 'modified_dir'. + file_path = modified_dir + "/test.txt" + fs.touch(file_path) + self.assertTrue(fs.exists(file_path)) + self.assertIsNotNone(fs.modified(file_path)) + + def test_rm(self): + rm_dir = self.fileset_gvfs_location + "/test_rm" + rm_actual_dir = self.fileset_storage_location + "/test_rm" + fs = gvfs.GravitinoVirtualFileSystem( + server_uri="http://localhost:8090", + metalake_name=self.metalake_name, + options=self.options, + **self.conf, + ) + self.check_mkdir(rm_dir, rm_actual_dir, fs) + + rm_file = self.fileset_gvfs_location + "/test_rm/test.file" + rm_actual_file = self.fileset_storage_location + "/test_rm/test.file" + fs.touch(rm_file) + self.assertTrue(self.fs.exists(rm_actual_file)) + self.assertTrue(fs.exists(rm_file)) + + # test delete file + fs.rm(rm_file) + self.assertFalse(fs.exists(rm_file)) + + # test delete dir with recursive = false + rm_new_file = self.fileset_gvfs_location + "/test_rm/test_new.file" + rm_new_actual_file = self.fileset_storage_location + "/test_rm/test_new.file" + self.fs.touch(rm_new_actual_file) + self.assertTrue(self.fs.exists(rm_new_actual_file)) + self.assertTrue(fs.exists(rm_new_file)) + + def test_rmdir(self): + rmdir_dir = self.fileset_gvfs_location + "/test_rmdir" + rmdir_actual_dir = self.fileset_storage_location + "/test_rmdir" + fs = gvfs.GravitinoVirtualFileSystem( + server_uri="http://localhost:8090", + metalake_name=self.metalake_name, + options=self.options, + **self.conf, + ) + self.check_mkdir(rmdir_dir, rmdir_actual_dir, fs) + + rmdir_file = self.fileset_gvfs_location + "/test_rmdir/test.file" + rmdir_actual_file = self.fileset_storage_location + "/test_rmdir/test.file" + self.fs.touch(rmdir_actual_file) + self.assertTrue(self.fs.exists(rmdir_actual_file)) + self.assertTrue(fs.exists(rmdir_file)) + + fs.rm_file(rmdir_file) + + def test_mkdir(self): + mkdir_dir = self.fileset_gvfs_location + "/test_mkdir" + mkdir_actual_dir = self.fileset_storage_location + "/test_mkdir" + fs = gvfs.GravitinoVirtualFileSystem( + server_uri="http://localhost:8090", + metalake_name=self.metalake_name, + options=self.options, + **self.conf, + ) + + # it actually takes no effect. + self.check_mkdir(mkdir_dir, mkdir_actual_dir, fs) + + # check whether it will automatically create the bucket if 'create_parents' + # is set to True. + new_bucket = self.bucket_name + "1" + mkdir_dir = mkdir_dir.replace(self.bucket_name, new_bucket) + mkdir_actual_dir = mkdir_actual_dir.replace(self.bucket_name, new_bucket) + fs.mkdir(mkdir_dir, create_parents=True) + + with self.assertRaises(FileNotFoundError): + self.fs.exists(mkdir_actual_dir) + + self.assertFalse(fs.exists(mkdir_dir)) + + with self.assertRaises(FileNotFoundError): + self.fs.exists("oss://" + new_bucket) + + def test_makedirs(self): + mkdir_dir = self.fileset_gvfs_location + "/test_mkdir" + mkdir_actual_dir = self.fileset_storage_location + "/test_mkdir" + fs = gvfs.GravitinoVirtualFileSystem( + server_uri="http://localhost:8090", + metalake_name=self.metalake_name, + options=self.options, + **self.conf, + ) + + # it actually takes no effect. + self.check_mkdir(mkdir_dir, mkdir_actual_dir, fs) + + # check whether it will automatically create the bucket if 'create_parents' + # is set to True. + new_bucket = self.bucket_name + "1" + mkdir_dir = mkdir_dir.replace(self.bucket_name, new_bucket) + mkdir_actual_dir = mkdir_actual_dir.replace(self.bucket_name, new_bucket) + + # it takes no effect. + fs.makedirs(mkdir_dir) + + with self.assertRaises(FileNotFoundError): + self.fs.exists(mkdir_actual_dir) + + self.assertFalse(fs.exists(mkdir_dir)) + with self.assertRaises(FileNotFoundError): + self.fs.exists("oss://" + new_bucket) + + def test_rm_file(self): + rm_file_dir = self.fileset_gvfs_location + "/test_rm_file" + rm_file_actual_dir = self.fileset_storage_location + "/test_rm_file" + fs = gvfs.GravitinoVirtualFileSystem( + server_uri="http://localhost:8090", + metalake_name=self.metalake_name, + options=self.options, + **self.conf, + ) + self.check_mkdir(rm_file_dir, rm_file_actual_dir, fs) + + rm_file_file = self.fileset_gvfs_location + "/test_rm_file/test.file" + rm_file_actual_file = self.fileset_storage_location + "/test_rm_file/test.file" + self.fs.touch(rm_file_actual_file) + self.assertTrue(self.fs.exists(rm_file_actual_file)) + self.assertTrue(fs.exists(rm_file_file)) + + # test delete file + fs.rm_file(rm_file_file) + self.assertFalse(fs.exists(rm_file_file)) + + # test delete dir + fs.rm_file(rm_file_dir) + + def test_info(self): + info_dir = self.fileset_gvfs_location + "/test_info" + info_actual_dir = self.fileset_storage_location + "/test_info" + fs = gvfs.GravitinoVirtualFileSystem( + server_uri="http://localhost:8090", + metalake_name=self.metalake_name, + options=self.options, + **self.conf, + ) + + self.check_mkdir(info_dir, info_actual_dir, fs) + + info_file = self.fileset_gvfs_location + "/test_info/test.file" + info_actual_file = self.fileset_storage_location + "/test_info/test.file" + self.fs.touch(info_actual_file) + self.assertTrue(self.fs.exists(info_actual_file)) + + ## OSS info has different behavior than S3 info. For OSS info, the name of the + ## directory will have a trailing slash if it's a directory and the path + # does not end with a slash, while S3 info will not have a trailing + # slash if it's a directory. + + # >> > oss.info('bucket-xiaoyu/lisi') + # {'name': 'bucket-xiaoyu/lisi/', 'type': 'directory', + # 'size': 0, 'Size': 0, 'Key': 'bucket-xiaoyu/lisi/'} + # >> > oss.info('bucket-xiaoyu/lisi/') + # {'name': 'bucket-xiaoyu/lisi', 'size': 0, + # 'type': 'directory', 'Size': 0, + # 'Key': 'bucket-xiaoyu/lisi' + + # >> > s3.info('paimon-bucket/lisi'); + # {'name': 'paimon-bucket/lisi', 'type': 'directory', 'size': 0, + # 'StorageClass': 'DIRECTORY'} + # >> > s3.info('paimon-bucket/lisi/'); + # {'name': 'paimon-bucket/lisi', 'type': 'directory', 'size': 0, + # 'StorageClass': 'DIRECTORY'} + + dir_info = fs.info(info_dir) + self.assertEqual(dir_info["name"][:-1], info_dir[len("gvfs://") :]) + + file_info = fs.info(info_file) + self.assertEqual(file_info["name"], info_file[len("gvfs://") :]) From a89a1ca2de011994e59d5cd59f37dd6b2d76f049 Mon Sep 17 00:00:00 2001 From: Jerry Shao Date: Fri, 25 Oct 2024 08:24:33 +0800 Subject: [PATCH 006/123] [#4599][#4809] feat(core): Add tag support for column (#5186) ### What changes were proposed in this pull request? This PR proposes to add tag support for columns. ### Why are the changes needed? With this PR, user could add and remove tags in the column level. Fix: #4599 Fix: #4809 ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Add new UT and IT to test. --------- Co-authored-by: Qi Yu --- .../java/org/apache/gravitino/rel/Column.java | 9 + .../apache/gravitino/TestMetadataObjects.java | 87 ++++++++ .../integration/test/HudiCatalogHMSIT.java | 53 +++-- .../test/CatalogIcebergBaseIT.java | 14 +- .../integration/test/CatalogPaimonBaseIT.java | 22 +- .../paimon/utils/TestCatalogUtils.java | 2 + .../gravitino/client/GenericColumn.java | 128 +++++++++++ .../gravitino/client/RelationalTable.java | 13 +- .../gravitino/client/TestGenericTag.java | 5 + .../client/TestRelationalCatalog.java | 20 +- .../gravitino/client/TestSupportTags.java | 37 ++++ .../client/integration/test/TagIT.java | 98 +++++++++ .../relational/mapper/TableColumnMapper.java | 9 + .../mapper/TableColumnSQLProviderFactory.java | 9 + .../base/TableColumnBaseSQLProvider.java | 28 +++ .../service/MetadataObjectService.java | 202 ++++++++++-------- .../service/TableColumnMetaService.java | 34 +++ .../org/apache/gravitino/tag/TagManager.java | 1 - .../gravitino/utils/MetadataObjectUtil.java | 11 +- .../gravitino/utils/NameIdentifierUtil.java | 33 +++ .../apache/gravitino/utils/NamespaceUtil.java | 26 +++ .../service/TestTableColumnMetaService.java | 110 ++++++++++ .../apache/gravitino/tag/TestTagManager.java | 91 +++++++- .../utils/TestMetadataObjectUtil.java | 12 +- .../utils/TestNameIdentifierUtil.java | 12 +- docs/manage-tags-in-gravitino.md | 6 +- docs/open-api/openapi.yaml | 1 + docs/open-api/tags.yaml | 1 + 28 files changed, 929 insertions(+), 145 deletions(-) create mode 100644 api/src/test/java/org/apache/gravitino/TestMetadataObjects.java create mode 100644 clients/client-java/src/main/java/org/apache/gravitino/client/GenericColumn.java diff --git a/api/src/main/java/org/apache/gravitino/rel/Column.java b/api/src/main/java/org/apache/gravitino/rel/Column.java index 650f5748f1d..e508970fa50 100644 --- a/api/src/main/java/org/apache/gravitino/rel/Column.java +++ b/api/src/main/java/org/apache/gravitino/rel/Column.java @@ -27,6 +27,7 @@ import org.apache.gravitino.rel.expressions.Expression; import org.apache.gravitino.rel.expressions.FunctionExpression; import org.apache.gravitino.rel.types.Type; +import org.apache.gravitino.tag.SupportsTags; /** * An interface representing a column of a {@link Table}. It defines basic properties of a column, @@ -71,6 +72,14 @@ public interface Column { */ Expression defaultValue(); + /** + * @return the {@link SupportsTags} if the column supports tag operations. + * @throws UnsupportedOperationException if the column does not support tag operations. + */ + default SupportsTags supportsTags() { + throw new UnsupportedOperationException("Column does not support tag operations."); + } + /** * Create a {@link Column} instance. * diff --git a/api/src/test/java/org/apache/gravitino/TestMetadataObjects.java b/api/src/test/java/org/apache/gravitino/TestMetadataObjects.java new file mode 100644 index 00000000000..bab5c5833fe --- /dev/null +++ b/api/src/test/java/org/apache/gravitino/TestMetadataObjects.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino; + +import com.google.common.collect.Lists; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class TestMetadataObjects { + + @Test + public void testColumnObject() { + MetadataObject columnObject = + MetadataObjects.of("catalog.schema.table", "c1", MetadataObject.Type.COLUMN); + Assertions.assertEquals("catalog.schema.table", columnObject.parent()); + Assertions.assertEquals("c1", columnObject.name()); + Assertions.assertEquals(MetadataObject.Type.COLUMN, columnObject.type()); + Assertions.assertEquals("catalog.schema.table.c1", columnObject.fullName()); + + MetadataObject columnObject2 = + MetadataObjects.of( + Lists.newArrayList("catalog", "schema", "table", "c2"), MetadataObject.Type.COLUMN); + Assertions.assertEquals("catalog.schema.table", columnObject2.parent()); + Assertions.assertEquals("c2", columnObject2.name()); + Assertions.assertEquals(MetadataObject.Type.COLUMN, columnObject2.type()); + Assertions.assertEquals("catalog.schema.table.c2", columnObject2.fullName()); + + MetadataObject columnObject3 = + MetadataObjects.parse("catalog.schema.table.c3", MetadataObject.Type.COLUMN); + Assertions.assertEquals("catalog.schema.table", columnObject3.parent()); + Assertions.assertEquals("c3", columnObject3.name()); + Assertions.assertEquals(MetadataObject.Type.COLUMN, columnObject3.type()); + Assertions.assertEquals("catalog.schema.table.c3", columnObject3.fullName()); + + // Test parent + MetadataObject parent = MetadataObjects.parent(columnObject); + Assertions.assertEquals("catalog.schema.table", parent.fullName()); + Assertions.assertEquals("catalog.schema", parent.parent()); + Assertions.assertEquals("table", parent.name()); + Assertions.assertEquals(MetadataObject.Type.TABLE, parent.type()); + + // Test incomplete name + Assertions.assertThrows( + IllegalArgumentException.class, + () -> MetadataObjects.parse("c1", MetadataObject.Type.COLUMN)); + Assertions.assertThrows( + IllegalArgumentException.class, + () -> MetadataObjects.parse("catalog", MetadataObject.Type.COLUMN)); + Assertions.assertThrows( + IllegalArgumentException.class, + () -> MetadataObjects.parse("catalog.schema", MetadataObject.Type.COLUMN)); + Assertions.assertThrows( + IllegalArgumentException.class, + () -> MetadataObjects.parse("catalog.schema.table", MetadataObject.Type.COLUMN)); + + // Test incomplete name list + Assertions.assertThrows( + IllegalArgumentException.class, + () -> MetadataObjects.of(Lists.newArrayList("catalog"), MetadataObject.Type.COLUMN)); + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + MetadataObjects.of( + Lists.newArrayList("catalog", "schema"), MetadataObject.Type.COLUMN)); + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + MetadataObjects.of( + Lists.newArrayList("catalog", "schema", "table"), MetadataObject.Type.COLUMN)); + } +} diff --git a/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/integration/test/HudiCatalogHMSIT.java b/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/integration/test/HudiCatalogHMSIT.java index dcc7e1ad916..9fc1c81b5fd 100644 --- a/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/integration/test/HudiCatalogHMSIT.java +++ b/catalogs/catalog-lakehouse-hudi/src/test/java/org/apache/gravitino/catalog/lakehouse/hudi/integration/test/HudiCatalogHMSIT.java @@ -291,77 +291,77 @@ private void assertColumns(Table table) { Column[] columns = table.columns(); Assertions.assertEquals(11, columns.length); if (table.name().endsWith("_rt") || table.name().endsWith("_ro")) { - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("_hoodie_commit_time") .withDataType(Types.StringType.get()) .withComment("") .build(), columns[0]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("_hoodie_commit_seqno") .withDataType(Types.StringType.get()) .withComment("") .build(), columns[1]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("_hoodie_record_key") .withDataType(Types.StringType.get()) .withComment("") .build(), columns[2]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("_hoodie_partition_path") .withDataType(Types.StringType.get()) .withComment("") .build(), columns[3]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("_hoodie_file_name") .withDataType(Types.StringType.get()) .withComment("") .build(), columns[4]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("ts") .withDataType(Types.LongType.get()) .withComment("") .build(), columns[5]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("uuid") .withDataType(Types.StringType.get()) .withComment("") .build(), columns[6]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("rider") .withDataType(Types.StringType.get()) .withComment("") .build(), columns[7]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("driver") .withDataType(Types.StringType.get()) .withComment("") .build(), columns[8]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("fare") .withDataType(Types.DoubleType.get()) .withComment("") .build(), columns[9]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("city") .withDataType(Types.StringType.get()) @@ -369,57 +369,66 @@ private void assertColumns(Table table) { .build(), columns[10]); } else { - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("_hoodie_commit_time") .withDataType(Types.StringType.get()) .build(), columns[0]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("_hoodie_commit_seqno") .withDataType(Types.StringType.get()) .build(), columns[1]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("_hoodie_record_key") .withDataType(Types.StringType.get()) .build(), columns[2]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("_hoodie_partition_path") .withDataType(Types.StringType.get()) .build(), columns[3]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder() .withName("_hoodie_file_name") .withDataType(Types.StringType.get()) .build(), columns[4]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder().withName("ts").withDataType(Types.LongType.get()).build(), columns[5]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder().withName("uuid").withDataType(Types.StringType.get()).build(), columns[6]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder().withName("rider").withDataType(Types.StringType.get()).build(), columns[7]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder().withName("driver").withDataType(Types.StringType.get()).build(), columns[8]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder().withName("fare").withDataType(Types.DoubleType.get()).build(), columns[9]); - Assertions.assertEquals( + assertColumn( ColumnDTO.builder().withName("city").withDataType(Types.StringType.get()).build(), columns[10]); } } + private void assertColumn(ColumnDTO columnDTO, Column column) { + Assertions.assertEquals(columnDTO.name(), column.name()); + Assertions.assertEquals(columnDTO.dataType(), column.dataType()); + Assertions.assertEquals(columnDTO.comment(), column.comment()); + Assertions.assertEquals(columnDTO.nullable(), column.nullable()); + Assertions.assertEquals(columnDTO.autoIncrement(), column.autoIncrement()); + Assertions.assertEquals(columnDTO.defaultValue(), column.defaultValue()); + } + private static void createHudiTables() { sparkSession = SparkSession.builder() diff --git a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergBaseIT.java b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergBaseIT.java index 7c5d93362f6..57598dd2435 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergBaseIT.java +++ b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergBaseIT.java @@ -51,7 +51,6 @@ import org.apache.gravitino.catalog.lakehouse.iceberg.IcebergTable; import org.apache.gravitino.catalog.lakehouse.iceberg.ops.IcebergCatalogWrapperHelper; import org.apache.gravitino.client.GravitinoMetalake; -import org.apache.gravitino.dto.util.DTOConverters; import org.apache.gravitino.exceptions.NoSuchSchemaException; import org.apache.gravitino.exceptions.SchemaAlreadyExistsException; import org.apache.gravitino.exceptions.TableAlreadyExistsException; @@ -417,7 +416,7 @@ void testCreateAndLoadIcebergTable() { Assertions.assertEquals(createdTable.columns().length, columns.length); for (int i = 0; i < columns.length; i++) { - Assertions.assertEquals(DTOConverters.toDTO(columns[i]), createdTable.columns()[i]); + assertColumn(columns[i], createdTable.columns()[i]); } // TODO add partitioning and sort order check @@ -434,7 +433,7 @@ void testCreateAndLoadIcebergTable() { } Assertions.assertEquals(loadTable.columns().length, columns.length); for (int i = 0; i < columns.length; i++) { - Assertions.assertEquals(DTOConverters.toDTO(columns[i]), loadTable.columns()[i]); + assertColumn(columns[i], loadTable.columns()[i]); } Assertions.assertEquals(partitioning.length, loadTable.partitioning().length); @@ -1257,4 +1256,13 @@ protected static void assertionsTableInfo( Assertions.assertEquals(entry.getValue(), table.properties().get(entry.getKey())); } } + + protected void assertColumn(Column expectedColumn, Column actualColumn) { + Assertions.assertEquals(expectedColumn.name(), actualColumn.name()); + Assertions.assertEquals(expectedColumn.dataType(), actualColumn.dataType()); + Assertions.assertEquals(expectedColumn.comment(), actualColumn.comment()); + Assertions.assertEquals(expectedColumn.nullable(), actualColumn.nullable()); + Assertions.assertEquals(expectedColumn.autoIncrement(), actualColumn.autoIncrement()); + Assertions.assertEquals(expectedColumn.defaultValue(), actualColumn.defaultValue()); + } } diff --git a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonBaseIT.java b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonBaseIT.java index 668cd404e91..ea1e8debce0 100644 --- a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonBaseIT.java +++ b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonBaseIT.java @@ -46,7 +46,6 @@ import org.apache.gravitino.catalog.lakehouse.paimon.ops.PaimonBackendCatalogWrapper; import org.apache.gravitino.catalog.lakehouse.paimon.utils.CatalogUtils; import org.apache.gravitino.client.GravitinoMetalake; -import org.apache.gravitino.dto.util.DTOConverters; import org.apache.gravitino.exceptions.NoSuchSchemaException; import org.apache.gravitino.exceptions.SchemaAlreadyExistsException; import org.apache.gravitino.exceptions.TableAlreadyExistsException; @@ -256,7 +255,7 @@ void testCreateAndLoadPaimonTable() Assertions.assertEquals(createdTable.columns().length, columns.length); for (int i = 0; i < columns.length; i++) { - Assertions.assertEquals(DTOConverters.toDTO(columns[i]), createdTable.columns()[i]); + assertColumn(columns[i], createdTable.columns()[i]); } Table loadTable = tableCatalog.loadTable(tableIdentifier); @@ -269,7 +268,7 @@ void testCreateAndLoadPaimonTable() } Assertions.assertEquals(loadTable.columns().length, columns.length); for (int i = 0; i < columns.length; i++) { - Assertions.assertEquals(DTOConverters.toDTO(columns[i]), loadTable.columns()[i]); + assertColumn(columns[i], loadTable.columns()[i]); } // catalog load check @@ -346,7 +345,7 @@ void testCreateAndLoadPaimonPartitionedTable() Assertions.assertEquals(createdTable.columns().length, columns.length); for (int i = 0; i < columns.length; i++) { - Assertions.assertEquals(DTOConverters.toDTO(columns[i]), createdTable.columns()[i]); + assertColumn(columns[i], createdTable.columns()[i]); } Table loadTable = tableCatalog.loadTable(tableIdentifier); @@ -374,7 +373,7 @@ void testCreateAndLoadPaimonPartitionedTable() Assertions.assertArrayEquals(partitionKeys, loadedPartitionKeys); Assertions.assertEquals(loadTable.columns().length, columns.length); for (int i = 0; i < columns.length; i++) { - Assertions.assertEquals(DTOConverters.toDTO(columns[i]), loadTable.columns()[i]); + assertColumn(columns[i], loadTable.columns()[i]); } // catalog load check @@ -459,7 +458,7 @@ void testCreateAndLoadPaimonPrimaryKeyTable() } Assertions.assertEquals(createdTable.columns().length, columns.length); for (int i = 0; i < columns.length; i++) { - Assertions.assertEquals(DTOConverters.toDTO(columns[i]), createdTable.columns()[i]); + assertColumn(columns[i], createdTable.columns()[i]); } Table loadTable = tableCatalog.loadTable(tableIdentifier); @@ -488,7 +487,7 @@ void testCreateAndLoadPaimonPrimaryKeyTable() } Assertions.assertEquals(loadTable.columns().length, columns.length); for (int i = 0; i < columns.length; i++) { - Assertions.assertEquals(DTOConverters.toDTO(columns[i]), loadTable.columns()[i]); + assertColumn(columns[i], loadTable.columns()[i]); } // catalog load check @@ -969,4 +968,13 @@ protected void initSparkEnv() { .enableHiveSupport() .getOrCreate(); } + + protected void assertColumn(Column expectedColumn, Column actualColumn) { + Assertions.assertEquals(expectedColumn.name(), actualColumn.name()); + Assertions.assertEquals(expectedColumn.dataType(), actualColumn.dataType()); + Assertions.assertEquals(expectedColumn.comment(), actualColumn.comment()); + Assertions.assertEquals(expectedColumn.nullable(), actualColumn.nullable()); + Assertions.assertEquals(expectedColumn.autoIncrement(), actualColumn.autoIncrement()); + Assertions.assertEquals(expectedColumn.defaultValue(), actualColumn.defaultValue()); + } } diff --git a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/utils/TestCatalogUtils.java b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/utils/TestCatalogUtils.java index d1b50d52073..c81ae830e6e 100644 --- a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/utils/TestCatalogUtils.java +++ b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/utils/TestCatalogUtils.java @@ -36,9 +36,11 @@ import org.apache.paimon.factories.FactoryException; import org.apache.paimon.hive.HiveCatalog; import org.apache.paimon.jdbc.JdbcCatalog; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; /** Tests for {@link org.apache.gravitino.catalog.lakehouse.paimon.utils.CatalogUtils}. */ +@Tag("gravitino-docker-test") public class TestCatalogUtils { @Test diff --git a/clients/client-java/src/main/java/org/apache/gravitino/client/GenericColumn.java b/clients/client-java/src/main/java/org/apache/gravitino/client/GenericColumn.java new file mode 100644 index 00000000000..aacf022e958 --- /dev/null +++ b/clients/client-java/src/main/java/org/apache/gravitino/client/GenericColumn.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.client; + +import com.google.common.collect.Lists; +import java.util.Objects; +import org.apache.gravitino.MetadataObject; +import org.apache.gravitino.MetadataObjects; +import org.apache.gravitino.exceptions.NoSuchTagException; +import org.apache.gravitino.exceptions.TagAlreadyAssociatedException; +import org.apache.gravitino.rel.Column; +import org.apache.gravitino.rel.expressions.Expression; +import org.apache.gravitino.rel.types.Type; +import org.apache.gravitino.tag.SupportsTags; +import org.apache.gravitino.tag.Tag; + +/** Represents a generic column. */ +public class GenericColumn implements Column, SupportsTags { + + private final Column internalColumn; + + private final MetadataObjectTagOperations objectTagOperations; + + GenericColumn( + Column column, + RESTClient restClient, + String metalake, + String catalog, + String schema, + String table) { + this.internalColumn = column; + MetadataObject columnObject = + MetadataObjects.of( + Lists.newArrayList(catalog, schema, table, internalColumn.name()), + MetadataObject.Type.COLUMN); + this.objectTagOperations = new MetadataObjectTagOperations(metalake, columnObject, restClient); + } + + @Override + public SupportsTags supportsTags() { + return this; + } + + @Override + public String[] listTags() { + return objectTagOperations.listTags(); + } + + @Override + public Tag[] listTagsInfo() { + return objectTagOperations.listTagsInfo(); + } + + @Override + public Tag getTag(String name) throws NoSuchTagException { + return objectTagOperations.getTag(name); + } + + @Override + public String[] associateTags(String[] tagsToAdd, String[] tagsToRemove) + throws TagAlreadyAssociatedException { + return objectTagOperations.associateTags(tagsToAdd, tagsToRemove); + } + + @Override + public String name() { + return internalColumn.name(); + } + + @Override + public Type dataType() { + return internalColumn.dataType(); + } + + @Override + public String comment() { + return internalColumn.comment(); + } + + @Override + public boolean nullable() { + return internalColumn.nullable(); + } + + @Override + public boolean autoIncrement() { + return internalColumn.autoIncrement(); + } + + @Override + public Expression defaultValue() { + return internalColumn.defaultValue(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof GenericColumn)) { + return false; + } + + GenericColumn column = (GenericColumn) obj; + return Objects.equals(internalColumn, column.internalColumn); + } + + @Override + public int hashCode() { + return internalColumn.hashCode(); + } +} diff --git a/clients/client-java/src/main/java/org/apache/gravitino/client/RelationalTable.java b/clients/client-java/src/main/java/org/apache/gravitino/client/RelationalTable.java index 83634295f95..e2ace7de278 100644 --- a/clients/client-java/src/main/java/org/apache/gravitino/client/RelationalTable.java +++ b/clients/client-java/src/main/java/org/apache/gravitino/client/RelationalTable.java @@ -23,6 +23,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -113,7 +114,17 @@ public String name() { /** @return the columns of the table. */ @Override public Column[] columns() { - return table.columns(); + return Arrays.stream(table.columns()) + .map( + c -> + new GenericColumn( + c, + restClient, + namespace.level(0), + namespace.level(1), + namespace.level(2), + name())) + .toArray(Column[]::new); } /** @return the partitioning of the table. */ diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/TestGenericTag.java b/clients/client-java/src/test/java/org/apache/gravitino/client/TestGenericTag.java index 0e86e9bf649..20463ddb867 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/TestGenericTag.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/TestGenericTag.java @@ -100,6 +100,11 @@ public void testAssociatedObjects() throws JsonProcessingException { .withParent("catalog1.schema1") .withName("table1") .withType(MetadataObject.Type.TABLE) + .build(), + MetadataObjectDTO.builder() + .withParent("catalog1.schema1.table1") + .withName("column1") + .withType(MetadataObject.Type.COLUMN) .build() }; diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/TestRelationalCatalog.java b/clients/client-java/src/test/java/org/apache/gravitino/client/TestRelationalCatalog.java index 01d17271ef3..c5e36247bbf 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/TestRelationalCatalog.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/TestRelationalCatalog.java @@ -36,6 +36,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.Map; +import java.util.stream.IntStream; import org.apache.gravitino.Catalog; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; @@ -727,9 +728,22 @@ private void assertTableEquals(Table expected, Table actual) { Assertions.assertEquals(expected.name(), actual.name()); Assertions.assertEquals(expected.comment(), actual.comment()); Assertions.assertEquals(expected.properties(), actual.properties()); - - Assertions.assertArrayEquals(expected.columns(), actual.columns()); - + Assertions.assertEquals(expected.columns().length, actual.columns().length); + IntStream.range(0, expected.columns().length) + .forEach( + i -> { + Assertions.assertEquals(expected.columns()[i].name(), actual.columns()[i].name()); + Assertions.assertEquals( + expected.columns()[i].dataType(), actual.columns()[i].dataType()); + Assertions.assertEquals( + expected.columns()[i].comment(), actual.columns()[i].comment()); + Assertions.assertEquals( + expected.columns()[i].nullable(), actual.columns()[i].nullable()); + Assertions.assertEquals( + expected.columns()[i].autoIncrement(), actual.columns()[i].autoIncrement()); + Assertions.assertEquals( + expected.columns()[i].defaultValue(), actual.columns()[i].defaultValue()); + }); Assertions.assertArrayEquals(expected.partitioning(), actual.partitioning()); } diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/TestSupportTags.java b/clients/client-java/src/test/java/org/apache/gravitino/client/TestSupportTags.java index a80fb324659..3d903a972c7 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/TestSupportTags.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/TestSupportTags.java @@ -46,6 +46,7 @@ import org.apache.gravitino.exceptions.NotFoundException; import org.apache.gravitino.file.Fileset; import org.apache.gravitino.messaging.Topic; +import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.Table; import org.apache.gravitino.rel.types.Types; import org.apache.gravitino.tag.SupportsTags; @@ -69,6 +70,8 @@ public class TestSupportTags extends TestBase { private static Table relationalTable; + private static Column genericColumn; + private static Fileset genericFileset; private static Topic genericTopic; @@ -141,6 +144,8 @@ public static void setUp() throws Exception { .build(), client.restClient()); + genericColumn = relationalTable.columns()[0]; + genericFileset = new GenericFileset( FilesetDTO.builder() @@ -195,6 +200,14 @@ public void testListTagsForTable() throws JsonProcessingException { MetadataObjects.of("catalog1.schema1", relationalTable.name(), MetadataObject.Type.TABLE)); } + @Test + public void testListTagsForColumn() throws JsonProcessingException { + testListTags( + genericColumn.supportsTags(), + MetadataObjects.of( + "catalog1.schema1.table1", genericColumn.name(), MetadataObject.Type.COLUMN)); + } + @Test public void testListTagsForFileset() throws JsonProcessingException { testListTags( @@ -238,6 +251,14 @@ public void testListTagsInfoForTable() throws JsonProcessingException { MetadataObjects.of("catalog1.schema1", relationalTable.name(), MetadataObject.Type.TABLE)); } + @Test + public void testListTagsInfoForColumn() throws JsonProcessingException { + testListTagsInfo( + genericColumn.supportsTags(), + MetadataObjects.of( + "catalog1.schema1.table1", genericColumn.name(), MetadataObject.Type.COLUMN)); + } + @Test public void testListTagsInfoForFileset() throws JsonProcessingException { testListTagsInfo( @@ -281,6 +302,14 @@ public void testGetTagForTable() throws JsonProcessingException { MetadataObjects.of("catalog1.schema1", relationalTable.name(), MetadataObject.Type.TABLE)); } + @Test + public void testGetTagForColumn() throws JsonProcessingException { + testGetTag( + genericColumn.supportsTags(), + MetadataObjects.of( + "catalog1.schema1.table1", genericColumn.name(), MetadataObject.Type.COLUMN)); + } + @Test public void testGetTagForFileset() throws JsonProcessingException { testGetTag( @@ -324,6 +353,14 @@ public void testAssociateTagsForTable() throws JsonProcessingException { MetadataObjects.of("catalog1.schema1", relationalTable.name(), MetadataObject.Type.TABLE)); } + @Test + public void testAssociateTagsForColumn() throws JsonProcessingException { + testAssociateTags( + genericColumn.supportsTags(), + MetadataObjects.of( + "catalog1.schema1.table1", genericColumn.name(), MetadataObject.Type.COLUMN)); + } + @Test public void testAssociateTagsForFileset() throws JsonProcessingException { testAssociateTags( diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java index dc82dfd67df..847b6253ff4 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java @@ -60,6 +60,8 @@ public class TagIT extends BaseIT { private static Schema schema; private static Table table; + private static Column column; + @BeforeAll public void setUp() { containerSuite.startHiveContainer(); @@ -105,6 +107,7 @@ public void setUp() { }, "comment", Collections.emptyMap()); + column = Arrays.stream(table.columns()).filter(c -> c.name().equals("col1")).findFirst().get(); } @AfterAll @@ -508,4 +511,99 @@ public void testAssociateTagsToTable() { Assertions.assertEquals( MetadataObject.Type.TABLE, tag3.associatedObjects().objects()[0].type()); } + + @Test + public void testAssociateTagsToColumn() { + Tag tag1 = + metalake.createTag( + GravitinoITUtils.genRandomName("tag_it_column_tag1"), + "comment1", + Collections.emptyMap()); + Tag tag2 = + metalake.createTag( + GravitinoITUtils.genRandomName("tag_it_column_tag2"), + "comment2", + Collections.emptyMap()); + Tag tag3 = + metalake.createTag( + GravitinoITUtils.genRandomName("tag_it_column_tag3"), + "comment3", + Collections.emptyMap()); + Tag tag4 = + metalake.createTag( + GravitinoITUtils.genRandomName("tag_it_column_tag4"), + "comment4", + Collections.emptyMap()); + + // Associate tags to catalog + relationalCatalog.supportsTags().associateTags(new String[] {tag1.name()}, null); + + // Associate tags to schema + schema.supportsTags().associateTags(new String[] {tag2.name()}, null); + + // Associate tags to table + table.supportsTags().associateTags(new String[] {tag3.name()}, null); + + // Associate tags to column + String[] tags = column.supportsTags().associateTags(new String[] {tag4.name()}, null); + + Assertions.assertEquals(1, tags.length); + Set tagNames = Sets.newHashSet(tags); + Assertions.assertTrue(tagNames.contains(tag4.name())); + + // Test list associated tags for column + String[] tags1 = column.supportsTags().listTags(); + Assertions.assertEquals(4, tags1.length); + Set tagNames1 = Sets.newHashSet(tags1); + Assertions.assertTrue(tagNames1.contains(tag1.name())); + Assertions.assertTrue(tagNames1.contains(tag2.name())); + Assertions.assertTrue(tagNames1.contains(tag3.name())); + Assertions.assertTrue(tagNames1.contains(tag4.name())); + + // Test list associated tags with details for column + Tag[] tags2 = column.supportsTags().listTagsInfo(); + Assertions.assertEquals(4, tags2.length); + + Set nonInheritedTags = + Arrays.stream(tags2).filter(tag -> !tag.inherited().get()).collect(Collectors.toSet()); + Set inheritedTags = + Arrays.stream(tags2).filter(tag -> tag.inherited().get()).collect(Collectors.toSet()); + + Assertions.assertEquals(1, nonInheritedTags.size()); + Assertions.assertEquals(3, inheritedTags.size()); + Assertions.assertTrue(nonInheritedTags.contains(tag4)); + Assertions.assertTrue(inheritedTags.contains(tag1)); + Assertions.assertTrue(inheritedTags.contains(tag2)); + Assertions.assertTrue(inheritedTags.contains(tag3)); + + // Test get associated tag for column + Tag resultTag1 = column.supportsTags().getTag(tag1.name()); + Assertions.assertEquals(tag1, resultTag1); + Assertions.assertTrue(resultTag1.inherited().get()); + + Tag resultTag2 = column.supportsTags().getTag(tag2.name()); + Assertions.assertEquals(tag2, resultTag2); + Assertions.assertTrue(resultTag2.inherited().get()); + + Tag resultTag3 = column.supportsTags().getTag(tag3.name()); + Assertions.assertEquals(tag3, resultTag3); + Assertions.assertTrue(resultTag3.inherited().get()); + + Tag resultTag4 = column.supportsTags().getTag(tag4.name()); + Assertions.assertEquals(tag4, resultTag4); + Assertions.assertFalse(resultTag4.inherited().get()); + + // Test get objects associated with tag + Assertions.assertEquals(1, tag1.associatedObjects().count()); + Assertions.assertEquals(relationalCatalog.name(), tag1.associatedObjects().objects()[0].name()); + + Assertions.assertEquals(1, tag2.associatedObjects().count()); + Assertions.assertEquals(schema.name(), tag2.associatedObjects().objects()[0].name()); + + Assertions.assertEquals(1, tag3.associatedObjects().count()); + Assertions.assertEquals(table.name(), tag3.associatedObjects().objects()[0].name()); + + Assertions.assertEquals(1, tag4.associatedObjects().count()); + Assertions.assertEquals(column.name(), tag4.associatedObjects().objects()[0].name()); + } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnMapper.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnMapper.java index 2214d8fd359..87b38ea482c 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnMapper.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnMapper.java @@ -62,4 +62,13 @@ List listColumnPOsByTableIdAndVersion( method = "deleteColumnPOsByLegacyTimeline") Integer deleteColumnPOsByLegacyTimeline( @Param("legacyTimeline") Long legacyTimeline, @Param("limit") int limit); + + @SelectProvider( + type = TableColumnSQLProviderFactory.class, + method = "selectColumnIdByTableIdAndName") + Long selectColumnIdByTableIdAndName( + @Param("tableId") Long tableId, @Param("columnName") String name); + + @SelectProvider(type = TableColumnSQLProviderFactory.class, method = "selectColumnPOById") + ColumnPO selectColumnPOById(@Param("columnId") Long columnId); } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnSQLProviderFactory.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnSQLProviderFactory.java index f85cf72d837..11f0d5419f1 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnSQLProviderFactory.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/TableColumnSQLProviderFactory.java @@ -81,4 +81,13 @@ public static String softDeleteColumnsByCatalogId(@Param("catalogId") Long catal public static String softDeleteColumnsBySchemaId(@Param("schemaId") Long schemaId) { return getProvider().softDeleteColumnsBySchemaId(schemaId); } + + public static String selectColumnIdByTableIdAndName( + @Param("tableId") Long tableId, @Param("columnName") String name) { + return getProvider().selectColumnIdByTableIdAndName(tableId, name); + } + + public static String selectColumnPOById(@Param("columnId") Long columnId) { + return getProvider().selectColumnPOById(columnId); + } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TableColumnBaseSQLProvider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TableColumnBaseSQLProvider.java index cdc32425b6f..d6154c907b6 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TableColumnBaseSQLProvider.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TableColumnBaseSQLProvider.java @@ -103,4 +103,32 @@ public String deleteColumnPOsByLegacyTimeline( + TableColumnMapper.COLUMN_TABLE_NAME + " WHERE deleted_at > 0 AND deleted_at < #{legacyTimeline} LIMIT #{limit}"; } + + public String selectColumnIdByTableIdAndName( + @Param("tableId") Long tableId, @Param("columnName") String name) { + return "SELECT" + + " CASE" + + " WHEN column_op_type = 3 THEN NULL" + + " ELSE column_id" + + " END" + + " FROM " + + TableColumnMapper.COLUMN_TABLE_NAME + + " WHERE table_id = #{tableId} AND column_name = #{columnName} AND deleted_at = 0" + + " ORDER BY table_version DESC LIMIT 1"; + } + + public String selectColumnPOById(@Param("columnId") Long columnId) { + return "SELECT column_id AS columnId, column_name AS columnName," + + " column_position AS columnPosition, metalake_id AS metalakeId, catalog_id AS catalogId," + + " schema_id AS schemaId, table_id AS tableId," + + " table_version AS tableVersion, column_type AS columnType," + + " column_comment AS columnComment, column_nullable AS nullable," + + " column_auto_increment AS autoIncrement," + + " column_default_value AS defaultValue, column_op_type AS columnOpType," + + " deleted_at AS deletedAt, audit_info AS auditInfo" + + " FROM " + + TableColumnMapper.COLUMN_TABLE_NAME + + " WHERE column_id = #{columnId} AND deleted_at = 0" + + " ORDER BY table_version DESC LIMIT 1"; + } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/MetadataObjectService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/MetadataObjectService.java index 0ee28d02973..c32759af56f 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/MetadataObjectService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/MetadataObjectService.java @@ -24,6 +24,7 @@ import javax.annotation.Nullable; import org.apache.gravitino.MetadataObject; import org.apache.gravitino.storage.relational.po.CatalogPO; +import org.apache.gravitino.storage.relational.po.ColumnPO; import org.apache.gravitino.storage.relational.po.FilesetPO; import org.apache.gravitino.storage.relational.po.MetalakePO; import org.apache.gravitino.storage.relational.po.SchemaPO; @@ -69,101 +70,130 @@ public static long getMetadataObjectId( return FilesetMetaService.getInstance().getFilesetIdBySchemaIdAndName(schemaId, names.get(2)); } else if (type == MetadataObject.Type.TOPIC) { return TopicMetaService.getInstance().getTopicIdBySchemaIdAndName(schemaId, names.get(2)); - } else if (type == MetadataObject.Type.TABLE) { - return TableMetaService.getInstance().getTableIdBySchemaIdAndName(schemaId, names.get(2)); } - throw new IllegalArgumentException(String.format("Doesn't support the type %s", type)); - } - - // Metadata object may be null because the metadata object can be deleted asynchronously. - @Nullable - public static String getMetadataObjectFullName(String type, long metadataObjectId) { - MetadataObject.Type metadatatype = MetadataObject.Type.valueOf(type); - if (metadatatype == MetadataObject.Type.METALAKE) { - MetalakePO metalakePO = MetalakeMetaService.getInstance().getMetalakePOById(metadataObjectId); - if (metalakePO == null) { - return null; - } - - return metalakePO.getMetalakeName(); - } - - if (metadatatype == MetadataObject.Type.CATALOG) { - return getCatalogFullName(metadataObjectId); - } - - if (metadatatype == MetadataObject.Type.SCHEMA) { - return getSchemaFullName(metadataObjectId); - } - - if (metadatatype == MetadataObject.Type.TABLE) { - TablePO tablePO = TableMetaService.getInstance().getTablePOById(metadataObjectId); - if (tablePO == null) { - return null; - } - - String schemaName = getSchemaFullName(tablePO.getSchemaId()); - if (schemaName == null) { - return null; - } - - return DOT_JOINER.join(schemaName, tablePO.getTableName()); - } - - if (metadatatype == MetadataObject.Type.TOPIC) { - TopicPO topicPO = TopicMetaService.getInstance().getTopicPOById(metadataObjectId); - if (topicPO == null) { - return null; - } - - String schemaName = getSchemaFullName(topicPO.getSchemaId()); - if (schemaName == null) { - return null; - } - - return DOT_JOINER.join(schemaName, topicPO.getTopicName()); + long tableId = + TableMetaService.getInstance().getTableIdBySchemaIdAndName(schemaId, names.get(2)); + if (type == MetadataObject.Type.TABLE) { + return tableId; } - if (metadatatype == MetadataObject.Type.FILESET) { - FilesetPO filesetPO = FilesetMetaService.getInstance().getFilesetPOById(metadataObjectId); - if (filesetPO == null) { - return null; - } - - String schemaName = getSchemaFullName(filesetPO.getSchemaId()); - if (schemaName == null) { - return null; - } - - return DOT_JOINER.join(schemaName, filesetPO.getFilesetName()); + if (type == MetadataObject.Type.COLUMN) { + return TableColumnMetaService.getInstance() + .getColumnIdByTableIdAndName(tableId, names.get(3)); } - throw new IllegalArgumentException(String.format("Doesn't support the type %s", metadatatype)); - } - - @Nullable - private static String getCatalogFullName(Long entityId) { - CatalogPO catalogPO = CatalogMetaService.getInstance().getCatalogPOById(entityId); - if (catalogPO == null) { - return null; - } - return catalogPO.getCatalogName(); + throw new IllegalArgumentException(String.format("Doesn't support the type %s", type)); } + // Metadata object may be null because the metadata object can be deleted asynchronously. @Nullable - private static String getSchemaFullName(Long entityId) { - SchemaPO schemaPO = SchemaMetaService.getInstance().getSchemaPOById(entityId); - - if (schemaPO == null) { - return null; - } - - String catalogName = getCatalogFullName(schemaPO.getCatalogId()); - if (catalogName == null) { - return null; - } + public static String getMetadataObjectFullName(String type, long metadataObjectId) { + MetadataObject.Type metadataType = MetadataObject.Type.valueOf(type); + String fullName = null; + long objectId = metadataObjectId; + + do { + switch (metadataType) { + case METALAKE: + MetalakePO metalakePO = MetalakeMetaService.getInstance().getMetalakePOById(objectId); + if (metalakePO != null) { + fullName = metalakePO.getMetalakeName(); + metadataType = null; + } else { + return null; + } + break; + + case CATALOG: + CatalogPO catalogPO = CatalogMetaService.getInstance().getCatalogPOById(objectId); + if (catalogPO != null) { + fullName = + fullName != null + ? DOT_JOINER.join(catalogPO.getCatalogName(), fullName) + : catalogPO.getCatalogName(); + metadataType = null; + } else { + return null; + } + break; + + case SCHEMA: + SchemaPO schemaPO = SchemaMetaService.getInstance().getSchemaPOById(objectId); + if (schemaPO != null) { + fullName = + fullName != null + ? DOT_JOINER.join(schemaPO.getSchemaName(), fullName) + : schemaPO.getSchemaName(); + objectId = schemaPO.getCatalogId(); + metadataType = MetadataObject.Type.CATALOG; + } else { + return null; + } + break; + + case TABLE: + TablePO tablePO = TableMetaService.getInstance().getTablePOById(objectId); + if (tablePO != null) { + fullName = + fullName != null + ? DOT_JOINER.join(tablePO.getTableName(), fullName) + : tablePO.getTableName(); + objectId = tablePO.getSchemaId(); + metadataType = MetadataObject.Type.SCHEMA; + } else { + return null; + } + break; + + case TOPIC: + TopicPO topicPO = TopicMetaService.getInstance().getTopicPOById(objectId); + if (topicPO != null) { + fullName = + fullName != null + ? DOT_JOINER.join(topicPO.getTopicName(), fullName) + : topicPO.getTopicName(); + objectId = topicPO.getSchemaId(); + metadataType = MetadataObject.Type.SCHEMA; + } else { + return null; + } + break; + + case FILESET: + FilesetPO filesetPO = FilesetMetaService.getInstance().getFilesetPOById(objectId); + if (filesetPO != null) { + fullName = + fullName != null + ? DOT_JOINER.join(filesetPO.getFilesetName(), fullName) + : filesetPO.getFilesetName(); + objectId = filesetPO.getSchemaId(); + metadataType = MetadataObject.Type.SCHEMA; + } else { + return null; + } + break; + + case COLUMN: + ColumnPO columnPO = TableColumnMetaService.getInstance().getColumnPOById(objectId); + if (columnPO != null) { + fullName = + fullName != null + ? DOT_JOINER.join(columnPO.getColumnName(), fullName) + : columnPO.getColumnName(); + objectId = columnPO.getTableId(); + metadataType = MetadataObject.Type.TABLE; + } else { + return null; + } + break; + + default: + throw new IllegalArgumentException( + String.format("Doesn't support the type %s", metadataType)); + } + } while (metadataType != null); - return DOT_JOINER.join(catalogName, schemaPO.getSchemaName()); + return fullName; } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/TableColumnMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/TableColumnMetaService.java index f881602bc53..9e2b3530d00 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/TableColumnMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/TableColumnMetaService.java @@ -21,9 +21,12 @@ import com.google.common.collect.Lists; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; +import org.apache.gravitino.Entity; +import org.apache.gravitino.exceptions.NoSuchEntityException; import org.apache.gravitino.meta.ColumnEntity; import org.apache.gravitino.meta.TableEntity; import org.apache.gravitino.storage.relational.mapper.TableColumnMapper; @@ -54,6 +57,37 @@ List getColumnsByTableIdAndVersion(Long tableId, Long version) { .collect(Collectors.toList()); } + Long getColumnIdByTableIdAndName(Long tableId, String columnName) { + Long columnId = + SessionUtils.getWithoutCommit( + TableColumnMapper.class, + mapper -> mapper.selectColumnIdByTableIdAndName(tableId, columnName)); + + if (columnId == null) { + throw new NoSuchEntityException( + NoSuchEntityException.NO_SUCH_ENTITY_MESSAGE, + Entity.EntityType.COLUMN.name().toLowerCase(Locale.ROOT), + columnName); + } + + return columnId; + } + + ColumnPO getColumnPOById(Long columnId) { + ColumnPO columnPO = + SessionUtils.getWithoutCommit( + TableColumnMapper.class, mapper -> mapper.selectColumnPOById(columnId)); + + if (columnPO == null || columnPO.getColumnOpType() == ColumnPO.ColumnOpType.DELETE.value()) { + throw new NoSuchEntityException( + NoSuchEntityException.NO_SUCH_ENTITY_MESSAGE, + Entity.EntityType.COLUMN.name().toLowerCase(Locale.ROOT), + columnId.toString()); + } + + return columnPO; + } + void insertColumnPOs(TablePO tablePO, List columnEntities) { List columnPOs = POConverters.initializeColumnPOs(tablePO, columnEntities, ColumnPO.ColumnOpType.CREATE); diff --git a/core/src/main/java/org/apache/gravitino/tag/TagManager.java b/core/src/main/java/org/apache/gravitino/tag/TagManager.java index 1b1626de00e..c5d41adb008 100644 --- a/core/src/main/java/org/apache/gravitino/tag/TagManager.java +++ b/core/src/main/java/org/apache/gravitino/tag/TagManager.java @@ -298,7 +298,6 @@ public String[] associateTagsForMetadataObject( throws NoSuchMetadataObjectException, TagAlreadyAssociatedException { Preconditions.checkArgument( !metadataObject.type().equals(MetadataObject.Type.METALAKE) - && !metadataObject.type().equals(MetadataObject.Type.COLUMN) && !metadataObject.type().equals(MetadataObject.Type.ROLE), "Cannot associate tags for unsupported metadata object type %s", metadataObject.type()); diff --git a/core/src/main/java/org/apache/gravitino/utils/MetadataObjectUtil.java b/core/src/main/java/org/apache/gravitino/utils/MetadataObjectUtil.java index 014ae3a1819..da9f4129a30 100644 --- a/core/src/main/java/org/apache/gravitino/utils/MetadataObjectUtil.java +++ b/core/src/main/java/org/apache/gravitino/utils/MetadataObjectUtil.java @@ -93,12 +93,9 @@ public static NameIdentifier toEntityIdent(String metalakeName, MetadataObject m case TABLE: case TOPIC: case FILESET: + case COLUMN: String fullName = DOT.join(metalakeName, metadataObject.fullName()); return NameIdentifier.parse(fullName); - case COLUMN: - throw new IllegalArgumentException( - "Cannot convert column metadata object to entity identifier: " - + metadataObject.fullName()); default: throw new IllegalArgumentException( "Unknown metadata object type: " + metadataObject.type()); @@ -150,6 +147,12 @@ public static void checkMetadataObject(String metalake, MetadataObject object) { check(env.tableDispatcher().tableExists(identifier), exceptionToThrowSupplier); break; + case COLUMN: + NameIdentifierUtil.checkColumn(identifier); + NameIdentifier tableIdent = NameIdentifier.of(identifier.namespace().levels()); + check(env.tableDispatcher().tableExists(tableIdent), exceptionToThrowSupplier); + break; + case TOPIC: NameIdentifierUtil.checkTopic(identifier); check(env.topicDispatcher().topicExists(identifier), exceptionToThrowSupplier); diff --git a/core/src/main/java/org/apache/gravitino/utils/NameIdentifierUtil.java b/core/src/main/java/org/apache/gravitino/utils/NameIdentifierUtil.java index 30f560102d4..550fef967de 100644 --- a/core/src/main/java/org/apache/gravitino/utils/NameIdentifierUtil.java +++ b/core/src/main/java/org/apache/gravitino/utils/NameIdentifierUtil.java @@ -93,6 +93,22 @@ public static NameIdentifier ofTable( return NameIdentifier.of(metalake, catalog, schema, table); } + /** + * Create the column {@link NameIdentifier} with the given metalake, catalog, schema, table and + * column name. + * + * @param metalake The metalake name + * @param catalog The catalog name + * @param schema The schema name + * @param table The table name + * @param column The column name + * @return The created column {@link NameIdentifier} + */ + public static NameIdentifier ofColumn( + String metalake, String catalog, String schema, String table, String column) { + return NameIdentifier.of(metalake, catalog, schema, table, column); + } + /** * Create the fileset {@link NameIdentifier} with the given metalake, catalog, schema and fileset * name. @@ -196,6 +212,17 @@ public static void checkTable(NameIdentifier ident) { NamespaceUtil.checkTable(ident.namespace()); } + /** + * Check the given {@link NameIdentifier} is a column identifier. Throw an {@link + * IllegalNameIdentifierException} if it's not. + * + * @param ident The column {@link NameIdentifier} to check. + */ + public static void checkColumn(NameIdentifier ident) { + NameIdentifier.check(ident != null, "Column identifier must not be null"); + NamespaceUtil.checkColumn(ident.namespace()); + } + /** * Check the given {@link NameIdentifier} is a fileset identifier. Throw an {@link * IllegalNameIdentifierException} if it's not. @@ -266,6 +293,12 @@ public static MetadataObject toMetadataObject( String tableParent = dot.join(ident.namespace().level(1), ident.namespace().level(2)); return MetadataObjects.of(tableParent, ident.name(), MetadataObject.Type.TABLE); + case COLUMN: + checkColumn(ident); + Namespace columnNs = ident.namespace(); + String columnParent = dot.join(columnNs.level(1), columnNs.level(2), columnNs.level(3)); + return MetadataObjects.of(columnParent, ident.name(), MetadataObject.Type.COLUMN); + case FILESET: checkFileset(ident); String filesetParent = dot.join(ident.namespace().level(1), ident.namespace().level(2)); diff --git a/core/src/main/java/org/apache/gravitino/utils/NamespaceUtil.java b/core/src/main/java/org/apache/gravitino/utils/NamespaceUtil.java index 2c353b07b5f..c24015bb330 100644 --- a/core/src/main/java/org/apache/gravitino/utils/NamespaceUtil.java +++ b/core/src/main/java/org/apache/gravitino/utils/NamespaceUtil.java @@ -70,6 +70,19 @@ public static Namespace ofTable(String metalake, String catalog, String schema) return Namespace.of(metalake, catalog, schema); } + /** + * Create a namespace for column. + * + * @param metalake The metalake name + * @param catalog The catalog name + * @param schema The schema name + * @param table The table name + * @return A namespace for column + */ + public static Namespace ofColumn(String metalake, String catalog, String schema, String table) { + return Namespace.of(metalake, catalog, schema, table); + } + /** * Create a namespace for fileset. * @@ -146,6 +159,19 @@ public static void checkTable(Namespace namespace) { namespace); } + /** + * Check if the given column namespace is legal, throw an {@link IllegalNamespaceException} if + * it's illegal. + * + * @param namespace The column namespace + */ + public static void checkColumn(Namespace namespace) { + check( + namespace != null && namespace.length() == 4, + "Column namespace must be non-null and have 4 levels, the input namespace is %s", + namespace); + } + /** * Check if the given fileset namespace is legal, throw an {@link IllegalNamespaceException} if * it's illegal. diff --git a/core/src/test/java/org/apache/gravitino/storage/relational/service/TestTableColumnMetaService.java b/core/src/test/java/org/apache/gravitino/storage/relational/service/TestTableColumnMetaService.java index 8d61d357cc7..30eb6bda65e 100644 --- a/core/src/test/java/org/apache/gravitino/storage/relational/service/TestTableColumnMetaService.java +++ b/core/src/test/java/org/apache/gravitino/storage/relational/service/TestTableColumnMetaService.java @@ -37,6 +37,7 @@ import org.apache.gravitino.rel.types.Types; import org.apache.gravitino.storage.RandomIdGenerator; import org.apache.gravitino.storage.relational.TestJDBCBackend; +import org.apache.gravitino.storage.relational.po.ColumnPO; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.testcontainers.shaded.com.google.common.collect.Lists; @@ -420,6 +421,115 @@ public void testDeleteMetalake() throws IOException { () -> TableMetaService.getInstance().getTableByIdentifier(retrievedTable.nameIdentifier())); } + @Test + public void testGetColumnIdAndPO() throws IOException { + String catalogName = "catalog1"; + String schemaName = "schema1"; + createParentEntities(METALAKE_NAME, catalogName, schemaName); + + // Create a table entity with column + ColumnEntity column = + ColumnEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("column1") + .withPosition(0) + .withComment("comment1") + .withDataType(Types.IntegerType.get()) + .withNullable(true) + .withAutoIncrement(false) + .withDefaultValue(Literals.integerLiteral(1)) + .withAuditInfo(auditInfo) + .build(); + + TableEntity createdTable = + TableEntity.builder() + .withId(RandomIdGenerator.INSTANCE.nextId()) + .withName("table1") + .withNamespace(Namespace.of(METALAKE_NAME, catalogName, schemaName)) + .withColumns(Lists.newArrayList(column)) + .withAuditInfo(auditInfo) + .build(); + + TableMetaService.getInstance().insertTable(createdTable, false); + + TableEntity retrievedTable = + TableMetaService.getInstance().getTableByIdentifier(createdTable.nameIdentifier()); + Assertions.assertEquals(1, retrievedTable.columns().size()); + Assertions.assertEquals(column.id(), retrievedTable.columns().get(0).id()); + + Long columnId = + TableColumnMetaService.getInstance() + .getColumnIdByTableIdAndName(retrievedTable.id(), column.name()); + Assertions.assertEquals(column.id(), columnId); + + ColumnPO retrievedColumn = TableColumnMetaService.getInstance().getColumnPOById(column.id()); + Assertions.assertEquals(column.id(), retrievedColumn.getColumnId()); + Assertions.assertEquals(column.name(), retrievedColumn.getColumnName()); + Assertions.assertEquals(column.position(), retrievedColumn.getColumnPosition()); + Assertions.assertEquals(column.comment(), retrievedColumn.getColumnComment()); + Assertions.assertEquals( + ColumnPO.ColumnOpType.CREATE.value(), retrievedColumn.getColumnOpType()); + + // Update the column name + ColumnEntity updatedColumn = + ColumnEntity.builder() + .withId(column.id()) + .withName("column1_updated") + .withPosition(column.position()) + .withComment(column.comment()) + .withDataType(column.dataType()) + .withNullable(column.nullable()) + .withAutoIncrement(column.autoIncrement()) + .withDefaultValue(column.defaultValue()) + .withAuditInfo(auditInfo) + .build(); + + TableEntity updatedTable = + TableEntity.builder() + .withId(retrievedTable.id()) + .withName(retrievedTable.name()) + .withNamespace(retrievedTable.namespace()) + .withColumns(Lists.newArrayList(updatedColumn)) + .withAuditInfo(retrievedTable.auditInfo()) + .build(); + + Function updater = oldTable -> updatedTable; + TableMetaService.getInstance().updateTable(retrievedTable.nameIdentifier(), updater); + + Long updatedColumnId = + TableColumnMetaService.getInstance() + .getColumnIdByTableIdAndName(retrievedTable.id(), updatedColumn.name()); + Assertions.assertEquals(updatedColumn.id(), updatedColumnId); + + ColumnPO updatedColumnPO = + TableColumnMetaService.getInstance().getColumnPOById(updatedColumn.id()); + Assertions.assertEquals(updatedColumn.id(), updatedColumnPO.getColumnId()); + Assertions.assertEquals(updatedColumn.name(), updatedColumnPO.getColumnName()); + + // Delete the column + TableEntity updatedTable2 = + TableEntity.builder() + .withId(retrievedTable.id()) + .withName(retrievedTable.name()) + .withNamespace(retrievedTable.namespace()) + .withColumns(Lists.newArrayList()) + .withAuditInfo(retrievedTable.auditInfo()) + .build(); + + Function updater2 = oldTable -> updatedTable2; + TableMetaService.getInstance().updateTable(retrievedTable.nameIdentifier(), updater2); + + Assertions.assertThrows( + NoSuchEntityException.class, + () -> + TableColumnMetaService.getInstance() + .getColumnIdByTableIdAndName(retrievedTable.id(), updatedColumn.name())); + + Assertions.assertThrows( + NoSuchEntityException.class, + () -> TableColumnMetaService.getInstance().getColumnPOById(updatedColumn.id())); + } + private void compareTwoColumns( List expectedColumns, List actualColumns) { Assertions.assertEquals(expectedColumns.size(), actualColumns.size()); diff --git a/core/src/test/java/org/apache/gravitino/tag/TestTagManager.java b/core/src/test/java/org/apache/gravitino/tag/TestTagManager.java index 82ed55eed2a..27b4fa84b6c 100644 --- a/core/src/test/java/org/apache/gravitino/tag/TestTagManager.java +++ b/core/src/test/java/org/apache/gravitino/tag/TestTagManager.java @@ -37,6 +37,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; import java.io.File; import java.io.IOException; import java.time.Instant; @@ -67,10 +68,12 @@ import org.apache.gravitino.meta.AuditInfo; import org.apache.gravitino.meta.BaseMetalake; import org.apache.gravitino.meta.CatalogEntity; +import org.apache.gravitino.meta.ColumnEntity; import org.apache.gravitino.meta.SchemaEntity; import org.apache.gravitino.meta.SchemaVersion; import org.apache.gravitino.meta.TableEntity; import org.apache.gravitino.metalake.MetalakeDispatcher; +import org.apache.gravitino.rel.types.Types; import org.apache.gravitino.storage.IdGenerator; import org.apache.gravitino.storage.RandomIdGenerator; import org.apache.gravitino.utils.NameIdentifierUtil; @@ -96,6 +99,9 @@ public class TestTagManager { private static final String SCHEMA = "schema_for_tag_test"; private static final String TABLE = "table_for_tag_test"; + + private static final String COLUMN = "column_for_tag_test"; + private static final MetalakeDispatcher metalakeDispatcher = mock(MetalakeDispatcher.class); private static final CatalogDispatcher catalogDispatcher = mock(CatalogDispatcher.class); private static final SchemaDispatcher schemaDispatcher = mock(SchemaDispatcher.class); @@ -166,10 +172,23 @@ public static void setUp() throws IOException, IllegalAccessException { .build(); entityStore.put(schema, false /* overwritten */); + ColumnEntity column = + ColumnEntity.builder() + .withId(idGenerator.nextId()) + .withName(COLUMN) + .withPosition(0) + .withComment("Test column") + .withDataType(Types.IntegerType.get()) + .withNullable(true) + .withAutoIncrement(false) + .withAuditInfo(audit) + .build(); + TableEntity table = TableEntity.builder() .withId(idGenerator.nextId()) .withName(TABLE) + .withColumns(Lists.newArrayList(column)) .withNamespace(Namespace.of(METALAKE, CATALOG, SCHEMA)) .withAuditInfo(audit) .build(); @@ -219,6 +238,13 @@ public void cleanUp() { String[] tableTags = tagManager.listTagsForMetadataObject(METALAKE, tableObject); tagManager.associateTagsForMetadataObject(METALAKE, tableObject, null, tableTags); + MetadataObject columnObject = + NameIdentifierUtil.toMetadataObject( + NameIdentifierUtil.ofColumn(METALAKE, CATALOG, SCHEMA, TABLE, COLUMN), + Entity.EntityType.COLUMN); + String[] columnTags = tagManager.listTagsForMetadataObject(METALAKE, columnObject); + tagManager.associateTagsForMetadataObject(METALAKE, columnObject, null, columnTags); + Arrays.stream(tagManager.listTags(METALAKE)).forEach(n -> tagManager.deleteTag(METALAKE, n)); } @@ -439,6 +465,37 @@ public void testAssociateTagsForMetadataObject() { Assertions.assertEquals(2, tags6.length); Assertions.assertEquals(ImmutableSet.of("tag1", "tag3"), ImmutableSet.copyOf(tags6)); + + // Test associate and disassociate same tags for column + MetadataObject columnObject = + NameIdentifierUtil.toMetadataObject( + NameIdentifierUtil.ofColumn(METALAKE, CATALOG, SCHEMA, TABLE, COLUMN), + Entity.EntityType.COLUMN); + + String[] tagsToAdd3 = new String[] {tag1.name()}; + String[] tags7 = + tagManager.associateTagsForMetadataObject(METALAKE, columnObject, tagsToAdd3, null); + + Assertions.assertEquals(1, tags7.length); + Assertions.assertEquals(ImmutableSet.of("tag1"), ImmutableSet.copyOf(tags7)); + + // Test associate and disassociate tags for column + String[] tagsToRemove2 = new String[] {tag1.name()}; + String[] tags8 = + tagManager.associateTagsForMetadataObject(METALAKE, columnObject, null, tagsToRemove2); + + Assertions.assertEquals(0, tags8.length); + Assertions.assertEquals(ImmutableSet.of(), ImmutableSet.copyOf(tags8)); + + // Test associate and disassociate same tags for column + String[] tagsToAdd4 = new String[] {tag2.name(), tag3.name()}; + String[] tagsToRemove3 = new String[] {tag2.name()}; + String[] tags9 = + tagManager.associateTagsForMetadataObject( + METALAKE, columnObject, tagsToAdd4, tagsToRemove3); + + Assertions.assertEquals(1, tags9.length); + Assertions.assertEquals(ImmutableSet.of("tag3"), ImmutableSet.copyOf(tags9)); } @Test @@ -456,6 +513,10 @@ public void testListMetadataObjectsForTag() { MetadataObject tableObject = NameIdentifierUtil.toMetadataObject( NameIdentifierUtil.ofTable(METALAKE, CATALOG, SCHEMA, TABLE), Entity.EntityType.TABLE); + MetadataObject columnObject = + NameIdentifierUtil.toMetadataObject( + NameIdentifierUtil.ofColumn(METALAKE, CATALOG, SCHEMA, TABLE, COLUMN), + Entity.EntityType.COLUMN); tagManager.associateTagsForMetadataObject( METALAKE, catalogObject, new String[] {tag1.name(), tag2.name(), tag3.name()}, null); @@ -463,11 +524,14 @@ public void testListMetadataObjectsForTag() { METALAKE, schemaObject, new String[] {tag1.name(), tag2.name()}, null); tagManager.associateTagsForMetadataObject( METALAKE, tableObject, new String[] {tag1.name()}, null); + tagManager.associateTagsForMetadataObject( + METALAKE, columnObject, new String[] {tag1.name()}, null); MetadataObject[] objects = tagManager.listMetadataObjectsForTag(METALAKE, tag1.name()); - Assertions.assertEquals(3, objects.length); + Assertions.assertEquals(4, objects.length); Assertions.assertEquals( - ImmutableSet.of(catalogObject, schemaObject, tableObject), ImmutableSet.copyOf(objects)); + ImmutableSet.of(catalogObject, schemaObject, tableObject, columnObject), + ImmutableSet.copyOf(objects)); MetadataObject[] objects1 = tagManager.listMetadataObjectsForTag(METALAKE, tag2.name()); Assertions.assertEquals(2, objects1.length); @@ -504,6 +568,10 @@ public void testListTagsForMetadataObject() { MetadataObject tableObject = NameIdentifierUtil.toMetadataObject( NameIdentifierUtil.ofTable(METALAKE, CATALOG, SCHEMA, TABLE), Entity.EntityType.TABLE); + MetadataObject columnObject = + NameIdentifierUtil.toMetadataObject( + NameIdentifierUtil.ofColumn(METALAKE, CATALOG, SCHEMA, TABLE, COLUMN), + Entity.EntityType.COLUMN); tagManager.associateTagsForMetadataObject( METALAKE, catalogObject, new String[] {tag1.name(), tag2.name(), tag3.name()}, null); @@ -511,6 +579,8 @@ public void testListTagsForMetadataObject() { METALAKE, schemaObject, new String[] {tag1.name(), tag2.name()}, null); tagManager.associateTagsForMetadataObject( METALAKE, tableObject, new String[] {tag1.name()}, null); + tagManager.associateTagsForMetadataObject( + METALAKE, columnObject, new String[] {tag1.name()}, null); String[] tags = tagManager.listTagsForMetadataObject(METALAKE, catalogObject); Assertions.assertEquals(3, tags.length); @@ -536,6 +606,14 @@ public void testListTagsForMetadataObject() { Assertions.assertEquals(1, tagsInfo2.length); Assertions.assertEquals(ImmutableSet.of(tag1), ImmutableSet.copyOf(tagsInfo2)); + String[] tags3 = tagManager.listTagsForMetadataObject(METALAKE, columnObject); + Assertions.assertEquals(1, tags3.length); + Assertions.assertEquals(ImmutableSet.of("tag1"), ImmutableSet.copyOf(tags3)); + + Tag[] tagsInfo3 = tagManager.listTagsInfoForMetadataObject(METALAKE, columnObject); + Assertions.assertEquals(1, tagsInfo3.length); + Assertions.assertEquals(ImmutableSet.of(tag1), ImmutableSet.copyOf(tagsInfo3)); + // List tags for non-existent metadata object MetadataObject nonExistentObject = NameIdentifierUtil.toMetadataObject( @@ -564,6 +642,10 @@ public void testGetTagForMetadataObject() { MetadataObject tableObject = NameIdentifierUtil.toMetadataObject( NameIdentifierUtil.ofTable(METALAKE, CATALOG, SCHEMA, TABLE), Entity.EntityType.TABLE); + MetadataObject columnObject = + NameIdentifierUtil.toMetadataObject( + NameIdentifierUtil.ofColumn(METALAKE, CATALOG, SCHEMA, TABLE, COLUMN), + Entity.EntityType.COLUMN); tagManager.associateTagsForMetadataObject( METALAKE, catalogObject, new String[] {tag1.name(), tag2.name(), tag3.name()}, null); @@ -571,6 +653,8 @@ public void testGetTagForMetadataObject() { METALAKE, schemaObject, new String[] {tag1.name(), tag2.name()}, null); tagManager.associateTagsForMetadataObject( METALAKE, tableObject, new String[] {tag1.name()}, null); + tagManager.associateTagsForMetadataObject( + METALAKE, columnObject, new String[] {tag1.name()}, null); Tag result = tagManager.getTagForMetadataObject(METALAKE, catalogObject, tag1.name()); Assertions.assertEquals(tag1, result); @@ -584,6 +668,9 @@ public void testGetTagForMetadataObject() { Tag result3 = tagManager.getTagForMetadataObject(METALAKE, catalogObject, tag3.name()); Assertions.assertEquals(tag3, result3); + Tag result4 = tagManager.getTagForMetadataObject(METALAKE, tableObject, tag1.name()); + Assertions.assertEquals(tag1, result4); + // Test get non-existent tag for metadata object Throwable e = Assertions.assertThrows( diff --git a/core/src/test/java/org/apache/gravitino/utils/TestMetadataObjectUtil.java b/core/src/test/java/org/apache/gravitino/utils/TestMetadataObjectUtil.java index 1de30d16fda..c5a2818669a 100644 --- a/core/src/test/java/org/apache/gravitino/utils/TestMetadataObjectUtil.java +++ b/core/src/test/java/org/apache/gravitino/utils/TestMetadataObjectUtil.java @@ -113,12 +113,10 @@ public void testToEntityIdent() { "metalake", MetadataObjects.of("catalog.schema", "fileset", MetadataObject.Type.FILESET))); - Assertions.assertThrows( - IllegalArgumentException.class, - () -> - MetadataObjectUtil.toEntityIdent( - "metalake", - MetadataObjects.of("catalog.schema.table", "column", MetadataObject.Type.COLUMN)), - "Cannot convert column metadata object to entity identifier: catalog.schema.table.column"); + Assertions.assertEquals( + NameIdentifier.of("metalake", "catalog", "schema", "table", "column"), + MetadataObjectUtil.toEntityIdent( + "metalake", + MetadataObjects.of("catalog.schema.table", "column", MetadataObject.Type.COLUMN))); } } diff --git a/core/src/test/java/org/apache/gravitino/utils/TestNameIdentifierUtil.java b/core/src/test/java/org/apache/gravitino/utils/TestNameIdentifierUtil.java index 964f910ba39..2eca30351b0 100644 --- a/core/src/test/java/org/apache/gravitino/utils/TestNameIdentifierUtil.java +++ b/core/src/test/java/org/apache/gravitino/utils/TestNameIdentifierUtil.java @@ -104,12 +104,12 @@ public void testToMetadataObject() { assertEquals( filesetObject, NameIdentifierUtil.toMetadataObject(fileset, Entity.EntityType.FILESET)); - // test column - Throwable e = - assertThrows( - IllegalArgumentException.class, - () -> NameIdentifierUtil.toMetadataObject(fileset, Entity.EntityType.COLUMN)); - assertTrue(e.getMessage().contains("Entity type COLUMN is not supported")); + NameIdentifier column = + NameIdentifier.of("metalake1", "catalog1", "schema1", "table1", "column1"); + MetadataObject columnObject = + MetadataObjects.parse("catalog1.schema1.table1.column1", MetadataObject.Type.COLUMN); + assertEquals( + columnObject, NameIdentifierUtil.toMetadataObject(column, Entity.EntityType.COLUMN)); // test null Throwable e1 = diff --git a/docs/manage-tags-in-gravitino.md b/docs/manage-tags-in-gravitino.md index ac088a7c2df..4163ca89d2c 100644 --- a/docs/manage-tags-in-gravitino.md +++ b/docs/manage-tags-in-gravitino.md @@ -26,9 +26,9 @@ the future versions. `COLUMN`, `FILESET`, `TOPIC`, `COLUMN`, etc. A metadata object is combined by a `type` and a comma-separated `name`. For example, a `CATAGLOG` object has a name "catalog1" with type "CATALOG", a `SCHEMA` object has a name "catalog1.schema1" with type "SCHEMA", a `TABLE` - object has a name "catalog1.schema1.table1" with type "TABLE". -2. Currently, only `CATALOG`, `SCHEMA`, `TABLE`, `FILESET`, `TOPIC` objects can be tagged, tagging - on `COLUMN` will be supported in the future. + object has a name "catalog1.schema1.table1" with type "TABLE", a `COLUMN` object has a name + "catalog1.schema1.table1.column1" with type "COLUMN". +2. Currently, `CATALOG`, `SCHEMA`, `TABLE`, `FILESET`, `TOPIC`, and `COLUMN` objects can be tagged. 3. Tags in Gravitino is inheritable, so listing tags of a metadata object will also list the tags of its parent metadata objects. For example, listing tags of a `Table` will also list the tags of its parent `Schema` and `Catalog`. diff --git a/docs/open-api/openapi.yaml b/docs/open-api/openapi.yaml index 0b16270c126..24bc0f2ce00 100644 --- a/docs/open-api/openapi.yaml +++ b/docs/open-api/openapi.yaml @@ -453,6 +453,7 @@ components: - "CATALOG" - "SCHEMA" - "TABLE" + - "COLUMN" - "FILESET" - "TOPIC" - "ROLE" diff --git a/docs/open-api/tags.yaml b/docs/open-api/tags.yaml index 42d45c2a1d7..7b8deef2520 100644 --- a/docs/open-api/tags.yaml +++ b/docs/open-api/tags.yaml @@ -213,6 +213,7 @@ paths: tags: - tag summary: Associate tags with metadata object + description: Associate and disassociate tags with metadata object, please be aware that supported metadata objects are CATALOG, SCHEMA, TABLE, FILESET, TOPIC, COLUMN operationId: associateTags requestBody: content: From 3c0997b95efc1c248c5a73ce71c74c135a6ade13 Mon Sep 17 00:00:00 2001 From: mchades Date: Fri, 25 Oct 2024 11:00:05 +0800 Subject: [PATCH 007/123] [#5237][#5240] fix(metalake): fix list metalakes missing in-use property (#5241) ### What changes were proposed in this pull request? - fix list metalakes missing in-use property - fix NPE when null properties ### Why are the changes needed? list metalakes will show metalake details by default so need put in-use property in it Fix: #5240 Fix: #5237 ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? tests added --- .../exceptions/NonEmptyCatalogException.java | 37 +++++ .../exceptions/NonEmptyMetalakeException.java | 37 +++++ .../exceptions/NonEmptySchemaException.java | 2 +- .../gravitino/client/ErrorHandlers.java | 11 ++ .../gravitino/client/GravitinoMetalake.java | 2 +- .../client/integration/test/MetalakeIT.java | 132 +++++++++++++++++- .../dto/requests/MetalakeSetRequest.java | 2 + .../gravitino/catalog/CatalogManager.java | 6 +- .../gravitino/metalake/MetalakeManager.java | 35 ++++- .../metalake/MetalakeNormalizeDispatcher.java | 23 +-- .../metalake/TestMetalakeManager.java | 6 +- .../server/web/rest/ExceptionHandlers.java | 8 ++ 12 files changed, 263 insertions(+), 38 deletions(-) create mode 100644 api/src/main/java/org/apache/gravitino/exceptions/NonEmptyCatalogException.java create mode 100644 api/src/main/java/org/apache/gravitino/exceptions/NonEmptyMetalakeException.java diff --git a/api/src/main/java/org/apache/gravitino/exceptions/NonEmptyCatalogException.java b/api/src/main/java/org/apache/gravitino/exceptions/NonEmptyCatalogException.java new file mode 100644 index 00000000000..30c62a8fda2 --- /dev/null +++ b/api/src/main/java/org/apache/gravitino/exceptions/NonEmptyCatalogException.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.exceptions; + +import com.google.errorprone.annotations.FormatMethod; +import com.google.errorprone.annotations.FormatString; + +/** Exception thrown when a catalog is not empty. */ +public class NonEmptyCatalogException extends GravitinoRuntimeException { + + /** + * Constructs a new exception with the specified detail message. + * + * @param message the detail message. + * @param args the arguments to the message. + */ + @FormatMethod + public NonEmptyCatalogException(@FormatString String message, Object... args) { + super(message, args); + } +} diff --git a/api/src/main/java/org/apache/gravitino/exceptions/NonEmptyMetalakeException.java b/api/src/main/java/org/apache/gravitino/exceptions/NonEmptyMetalakeException.java new file mode 100644 index 00000000000..4e9cf8cf9bd --- /dev/null +++ b/api/src/main/java/org/apache/gravitino/exceptions/NonEmptyMetalakeException.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.exceptions; + +import com.google.errorprone.annotations.FormatMethod; +import com.google.errorprone.annotations.FormatString; + +/** Exception thrown when a metalake is not empty. */ +public class NonEmptyMetalakeException extends GravitinoRuntimeException { + + /** + * Constructs a new exception with the specified detail message. + * + * @param message the detail message. + * @param args the arguments to the message. + */ + @FormatMethod + public NonEmptyMetalakeException(@FormatString String message, Object... args) { + super(message, args); + } +} diff --git a/api/src/main/java/org/apache/gravitino/exceptions/NonEmptySchemaException.java b/api/src/main/java/org/apache/gravitino/exceptions/NonEmptySchemaException.java index 846fd541e97..c1f8c29e5ce 100644 --- a/api/src/main/java/org/apache/gravitino/exceptions/NonEmptySchemaException.java +++ b/api/src/main/java/org/apache/gravitino/exceptions/NonEmptySchemaException.java @@ -21,7 +21,7 @@ import com.google.errorprone.annotations.FormatMethod; import com.google.errorprone.annotations.FormatString; -/** Exception thrown when a namespace is not empty. */ +/** Exception thrown when a schema is not empty. */ public class NonEmptySchemaException extends GravitinoRuntimeException { /** * Constructs a new exception with the specified detail message. diff --git a/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java b/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java index 3cc8df1d242..3dcf6672ae6 100644 --- a/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java +++ b/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java @@ -53,6 +53,8 @@ import org.apache.gravitino.exceptions.NoSuchTagException; import org.apache.gravitino.exceptions.NoSuchTopicException; import org.apache.gravitino.exceptions.NoSuchUserException; +import org.apache.gravitino.exceptions.NonEmptyCatalogException; +import org.apache.gravitino.exceptions.NonEmptyMetalakeException; import org.apache.gravitino.exceptions.NonEmptySchemaException; import org.apache.gravitino.exceptions.NotFoundException; import org.apache.gravitino.exceptions.NotInUseException; @@ -465,6 +467,9 @@ public void accept(ErrorResponse errorResponse) { throw new NotInUseException(errorMessage); } + case ErrorConstants.NON_EMPTY_CODE: + throw new NonEmptyCatalogException(errorMessage); + default: super.accept(errorResponse); } @@ -496,6 +501,12 @@ public void accept(ErrorResponse errorResponse) { case ErrorConstants.IN_USE_CODE: throw new MetalakeInUseException(errorMessage); + case ErrorConstants.NOT_IN_USE_CODE: + throw new MetalakeNotInUseException(errorMessage); + + case ErrorConstants.NON_EMPTY_CODE: + throw new NonEmptyMetalakeException(errorMessage); + default: super.accept(errorResponse); } diff --git a/clients/client-java/src/main/java/org/apache/gravitino/client/GravitinoMetalake.java b/clients/client-java/src/main/java/org/apache/gravitino/client/GravitinoMetalake.java index 47f42d3ad22..1e0f7c63fb0 100644 --- a/clients/client-java/src/main/java/org/apache/gravitino/client/GravitinoMetalake.java +++ b/clients/client-java/src/main/java/org/apache/gravitino/client/GravitinoMetalake.java @@ -383,7 +383,7 @@ public SupportsRoles supportsRoles() { return this; } - /* + /** * List all the tag names under a metalake. * * @return A list of the tag names under the current metalake. diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java index 2922154f319..3a650646416 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java @@ -18,44 +18,54 @@ */ package org.apache.gravitino.client.integration.test; +import static org.apache.gravitino.Metalake.PROPERTY_IN_USE; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import com.google.common.collect.ImmutableMap; import java.util.ArrayList; import java.util.Collections; import java.util.Map; +import org.apache.gravitino.Catalog; +import org.apache.gravitino.CatalogChange; import org.apache.gravitino.MetalakeChange; import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.Namespace; +import org.apache.gravitino.SchemaChange; +import org.apache.gravitino.SupportsSchemas; import org.apache.gravitino.auth.AuthConstants; import org.apache.gravitino.client.GravitinoMetalake; import org.apache.gravitino.exceptions.IllegalNameIdentifierException; import org.apache.gravitino.exceptions.MetalakeAlreadyExistsException; +import org.apache.gravitino.exceptions.MetalakeInUseException; +import org.apache.gravitino.exceptions.MetalakeNotInUseException; import org.apache.gravitino.exceptions.NoSuchMetalakeException; +import org.apache.gravitino.exceptions.NonEmptyMetalakeException; +import org.apache.gravitino.file.FilesetCatalog; +import org.apache.gravitino.file.FilesetChange; import org.apache.gravitino.integration.test.util.BaseIT; +import org.apache.gravitino.integration.test.util.GravitinoITUtils; import org.apache.gravitino.utils.RandomNameUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestMethodOrder; -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class MetalakeIT extends BaseIT { public static String metalakeNameA = RandomNameUtils.genRandomName("metalakeA"); public static String metalakeNameB = RandomNameUtils.genRandomName("metalakeB"); @BeforeEach - private void start() { + public void start() { // Just in case clean up is needed due to a test failure dropMetalakes(); } @AfterEach - private void stop() { + public void stop() { dropMetalakes(); } @@ -88,6 +98,7 @@ public void testLoadMetalake() { client.createMetalake(metalakeNameA, "metalake A comment", Collections.emptyMap()); GravitinoMetalake metaLakeA = client.loadMetalake(metalakeNameA); assertEquals(metaLakeA.name(), metalakeNameA); + assertEquals("true", metaLakeA.properties().get(PROPERTY_IN_USE)); // metalake does not exist NameIdentifier noexist = NameIdentifier.of(metalakeNameB); @@ -150,6 +161,7 @@ public void testCreateMetalake() { assertEquals(metalakeNameA, metalake.name()); assertEquals("metalake A comment", metalake.comment()); assertEquals(AuthConstants.ANONYMOUS_USER, metalake.auditInfo().creator()); + assertEquals("true", metalake.properties().get(PROPERTY_IN_USE)); // Test metalake name already exists Map emptyMap = Collections.emptyMap(); @@ -210,11 +222,119 @@ public void testUpdateMetalakeWithNullableComment() { assertTrue(client.dropMetalake(metalakeNameA, true)); } + @Test + public void testMetalakeAvailable() { + String metalakeName = RandomNameUtils.genRandomName("test_metalake"); + client.createMetalake(metalakeName, null, null); + // test load metalake + GravitinoMetalake metalake = client.loadMetalake(metalakeName); + Assertions.assertEquals(metalakeName, metalake.name()); + Assertions.assertEquals("true", metalake.properties().get(PROPERTY_IN_USE)); + + // test list metalakes + GravitinoMetalake[] metalakes = client.listMetalakes(); + Assertions.assertEquals(1, metalakes.length); + Assertions.assertEquals(metalakeName, metalakes[0].name()); + Assertions.assertEquals("true", metalakes[0].properties().get(PROPERTY_IN_USE)); + + Exception exception = + assertThrows(MetalakeInUseException.class, () -> client.dropMetalake(metalakeName)); + Assertions.assertTrue(exception.getMessage().contains("please disable it first")); + + // create a catalog under the metalake + String catalogName = GravitinoITUtils.genRandomName("test_catalog"); + Catalog catalog = + metalake.createCatalog( + catalogName, Catalog.Type.FILESET, "hadoop", "catalog comment", ImmutableMap.of()); + Assertions.assertEquals("true", catalog.properties().get(Catalog.PROPERTY_IN_USE)); + + Assertions.assertDoesNotThrow(() -> client.disableMetalake(metalakeName)); + GravitinoMetalake loadedMetalake = client.loadMetalake(metalakeName); + Assertions.assertEquals("false", loadedMetalake.properties().get(PROPERTY_IN_USE)); + + exception = + assertThrows( + MetalakeNotInUseException.class, + () -> client.alterMetalake(metalakeName, MetalakeChange.updateComment("new comment"))); + Assertions.assertTrue(exception.getMessage().contains("please enable it first")); + + // test catalog operations under non-in-use metalake + Assertions.assertThrows( + MetalakeNotInUseException.class, + () -> + loadedMetalake.createCatalog( + catalogName, Catalog.Type.FILESET, "dummy", null, Collections.emptyMap())); + Assertions.assertThrows(MetalakeNotInUseException.class, loadedMetalake::listCatalogs); + Assertions.assertThrows( + MetalakeNotInUseException.class, () -> loadedMetalake.loadCatalog(catalogName)); + Assertions.assertThrows( + MetalakeNotInUseException.class, () -> loadedMetalake.dropCatalog(catalogName)); + Assertions.assertThrows( + MetalakeNotInUseException.class, + () -> loadedMetalake.alterCatalog(catalogName, CatalogChange.rename("dummy"))); + Assertions.assertThrows( + MetalakeNotInUseException.class, () -> loadedMetalake.disableCatalog(catalogName)); + Assertions.assertThrows( + MetalakeNotInUseException.class, () -> loadedMetalake.enableCatalog(catalogName)); + Assertions.assertThrows( + MetalakeNotInUseException.class, + () -> + loadedMetalake.testConnection( + catalogName, Catalog.Type.FILESET, "dummy", null, Collections.emptyMap())); + Assertions.assertThrows( + MetalakeNotInUseException.class, () -> loadedMetalake.catalogExists(catalogName)); + + // test schema operations under non-in-use metalake + SupportsSchemas schemaOps = catalog.asSchemas(); + Assertions.assertThrows(MetalakeNotInUseException.class, schemaOps::listSchemas); + Assertions.assertThrows( + MetalakeNotInUseException.class, () -> schemaOps.createSchema("dummy", null, null)); + Assertions.assertThrows(MetalakeNotInUseException.class, () -> schemaOps.loadSchema("dummy")); + Assertions.assertThrows( + MetalakeNotInUseException.class, + () -> schemaOps.alterSchema("dummy", SchemaChange.removeProperty("dummy"))); + Assertions.assertThrows( + MetalakeNotInUseException.class, () -> schemaOps.dropSchema("dummy", false)); + + // test fileset operations under non-in-use catalog + FilesetCatalog filesetOps = catalog.asFilesetCatalog(); + Assertions.assertThrows( + MetalakeNotInUseException.class, () -> filesetOps.listFilesets(Namespace.of("dummy"))); + Assertions.assertThrows( + MetalakeNotInUseException.class, + () -> filesetOps.loadFileset(NameIdentifier.of("dummy", "dummy"))); + Assertions.assertThrows( + MetalakeNotInUseException.class, + () -> + filesetOps.createFileset(NameIdentifier.of("dummy", "dummy"), null, null, null, null)); + Assertions.assertThrows( + MetalakeNotInUseException.class, + () -> filesetOps.dropFileset(NameIdentifier.of("dummy", "dummy"))); + Assertions.assertThrows( + MetalakeNotInUseException.class, + () -> filesetOps.getFileLocation(NameIdentifier.of("dummy", "dummy"), "dummy")); + Assertions.assertThrows( + MetalakeNotInUseException.class, + () -> + filesetOps.alterFileset( + NameIdentifier.of("dummy", "dummy"), FilesetChange.removeComment())); + + Assertions.assertThrows( + NonEmptyMetalakeException.class, () -> client.dropMetalake(metalakeName)); + + Assertions.assertDoesNotThrow(() -> client.enableMetalake(metalakeName)); + Assertions.assertTrue(loadedMetalake.dropCatalog(catalogName, true)); + + Assertions.assertDoesNotThrow(() -> client.disableMetalake(metalakeName)); + Assertions.assertTrue(client.dropMetalake(metalakeName)); + Assertions.assertFalse(client.dropMetalake(metalakeName)); + } + public void dropMetalakes() { GravitinoMetalake[] metaLakes = client.listMetalakes(); for (GravitinoMetalake metalake : metaLakes) { assertDoesNotThrow(() -> client.disableMetalake(metalake.name())); - assertTrue(client.dropMetalake(metalake.name())); + assertTrue(client.dropMetalake(metalake.name(), true)); } // Reload metadata from backend to check if the drop operations are applied diff --git a/common/src/main/java/org/apache/gravitino/dto/requests/MetalakeSetRequest.java b/common/src/main/java/org/apache/gravitino/dto/requests/MetalakeSetRequest.java index be75b2d4a21..f87abef25eb 100644 --- a/common/src/main/java/org/apache/gravitino/dto/requests/MetalakeSetRequest.java +++ b/common/src/main/java/org/apache/gravitino/dto/requests/MetalakeSetRequest.java @@ -18,6 +18,7 @@ */ package org.apache.gravitino.dto.requests; +import com.fasterxml.jackson.annotation.JsonProperty; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.ToString; @@ -29,6 +30,7 @@ @ToString public class MetalakeSetRequest implements RESTRequest { + @JsonProperty("inUse") private final boolean inUse; /** Default constructor for MetalakeSetRequest. */ diff --git a/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java b/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java index 6af759c6142..ed300e45c7e 100644 --- a/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java +++ b/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java @@ -87,6 +87,7 @@ import org.apache.gravitino.exceptions.NoSuchCatalogException; import org.apache.gravitino.exceptions.NoSuchEntityException; import org.apache.gravitino.exceptions.NoSuchMetalakeException; +import org.apache.gravitino.exceptions.NonEmptyCatalogException; import org.apache.gravitino.exceptions.NonEmptyEntityException; import org.apache.gravitino.file.FilesetCatalog; import org.apache.gravitino.messaging.TopicCatalog; @@ -629,6 +630,9 @@ public Catalog alterCatalog(NameIdentifier ident, CatalogChange... changes) @Override public boolean dropCatalog(NameIdentifier ident, boolean force) throws NonEmptyEntityException, CatalogInUseException { + NameIdentifier metalakeIdent = NameIdentifier.of(ident.namespace().levels()); + checkMetalake(metalakeIdent, store); + try { boolean catalogInUse = catalogInUse(store, ident); if (catalogInUse && !force) { @@ -646,7 +650,7 @@ public boolean dropCatalog(NameIdentifier ident, boolean force) if (!schemas.isEmpty() && !force) { // the Kafka catalog is special, it includes a default schema if (!catalogEntity.getProvider().equals("kafka") || schemas.size() > 1) { - throw new NonEmptyEntityException( + throw new NonEmptyCatalogException( "Catalog %s has schemas, please drop them first or use force option", ident); } } diff --git a/core/src/main/java/org/apache/gravitino/metalake/MetalakeManager.java b/core/src/main/java/org/apache/gravitino/metalake/MetalakeManager.java index b8c9f77f18c..0239526e5ee 100644 --- a/core/src/main/java/org/apache/gravitino/metalake/MetalakeManager.java +++ b/core/src/main/java/org/apache/gravitino/metalake/MetalakeManager.java @@ -23,6 +23,7 @@ import com.google.common.collect.Maps; import java.io.IOException; import java.time.Instant; +import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.gravitino.Entity.EntityType; @@ -38,6 +39,7 @@ import org.apache.gravitino.exceptions.NoSuchEntityException; import org.apache.gravitino.exceptions.NoSuchMetalakeException; import org.apache.gravitino.exceptions.NonEmptyEntityException; +import org.apache.gravitino.exceptions.NonEmptyMetalakeException; import org.apache.gravitino.meta.AuditInfo; import org.apache.gravitino.meta.BaseMetalake; import org.apache.gravitino.meta.CatalogEntity; @@ -120,9 +122,9 @@ public static boolean metalakeInUse(EntityStore store, NameIdentifier ident) @Override public BaseMetalake[] listMetalakes() { try { - return store - .list(Namespace.empty(), BaseMetalake.class, EntityType.METALAKE) - .toArray(new BaseMetalake[0]); + return store.list(Namespace.empty(), BaseMetalake.class, EntityType.METALAKE).stream() + .map(this::newMetalakeWithResolvedProperties) + .toArray(BaseMetalake[]::new); } catch (IOException ioe) { LOG.error("Listing Metalakes failed due to storage issues.", ioe); throw new RuntimeException(ioe); @@ -140,7 +142,8 @@ public BaseMetalake[] listMetalakes() { @Override public BaseMetalake loadMetalake(NameIdentifier ident) throws NoSuchMetalakeException { try { - return store.get(ident, EntityType.METALAKE, BaseMetalake.class); + return newMetalakeWithResolvedProperties( + store.get(ident, EntityType.METALAKE, BaseMetalake.class)); } catch (NoSuchEntityException e) { LOG.warn("Metalake {} does not exist", ident, e); throw new NoSuchMetalakeException(METALAKE_DOES_NOT_EXIST_MSG, ident); @@ -150,6 +153,28 @@ public BaseMetalake loadMetalake(NameIdentifier ident) throws NoSuchMetalakeExce } } + private BaseMetalake newMetalakeWithResolvedProperties(BaseMetalake metalakeEntity) { + Map newProps = + metalakeEntity.properties() == null + ? new HashMap<>() + : new HashMap<>(metalakeEntity.properties()); + newProps + .entrySet() + .removeIf(e -> metalakeEntity.propertiesMetadata().isHiddenProperty(e.getKey())); + newProps.putIfAbsent( + PROPERTY_IN_USE, + metalakeEntity.propertiesMetadata().getDefaultValue(PROPERTY_IN_USE).toString()); + + return BaseMetalake.builder() + .withId(metalakeEntity.id()) + .withName(metalakeEntity.name()) + .withComment(metalakeEntity.comment()) + .withProperties(newProps) + .withVersion(metalakeEntity.getVersion()) + .withAuditInfo(metalakeEntity.auditInfo()) + .build(); + } + /** * Creates a new Metalake. * @@ -253,7 +278,7 @@ public boolean dropMetalake(NameIdentifier ident, boolean force) List catalogEntities = store.list(Namespace.of(ident.name()), CatalogEntity.class, EntityType.CATALOG); if (!catalogEntities.isEmpty() && !force) { - throw new NonEmptyEntityException( + throw new NonEmptyMetalakeException( "Metalake %s has catalogs, please drop them first or use force option", ident); } diff --git a/core/src/main/java/org/apache/gravitino/metalake/MetalakeNormalizeDispatcher.java b/core/src/main/java/org/apache/gravitino/metalake/MetalakeNormalizeDispatcher.java index dbc9d6bdc29..e32f6e9460a 100644 --- a/core/src/main/java/org/apache/gravitino/metalake/MetalakeNormalizeDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/metalake/MetalakeNormalizeDispatcher.java @@ -19,7 +19,6 @@ package org.apache.gravitino.metalake; import static org.apache.gravitino.Entity.SYSTEM_METALAKE_RESERVED_NAME; -import static org.apache.gravitino.Metalake.PROPERTY_IN_USE; import static org.apache.gravitino.catalog.PropertiesMetadataHelpers.validatePropertyForAlter; import static org.apache.gravitino.catalog.PropertiesMetadataHelpers.validatePropertyForCreate; import static org.apache.gravitino.meta.BaseMetalake.PROPERTIES_METADATA; @@ -37,7 +36,6 @@ import org.apache.gravitino.exceptions.MetalakeInUseException; import org.apache.gravitino.exceptions.NoSuchMetalakeException; import org.apache.gravitino.exceptions.NonEmptyEntityException; -import org.apache.gravitino.meta.BaseMetalake; public class MetalakeNormalizeDispatcher implements MetalakeDispatcher { private static final Set RESERVED_WORDS = ImmutableSet.of(SYSTEM_METALAKE_RESERVED_NAME); @@ -66,7 +64,7 @@ public Metalake[] listMetalakes() { @Override public Metalake loadMetalake(NameIdentifier ident) throws NoSuchMetalakeException { - return newMetalakeWithResolvedProperties((BaseMetalake) dispatcher.loadMetalake(ident)); + return dispatcher.loadMetalake(ident); } @Override @@ -132,25 +130,6 @@ private void validateMetalakeName(String name) { } } - private BaseMetalake newMetalakeWithResolvedProperties(BaseMetalake metalakeEntity) { - Map newProps = Maps.newHashMap(metalakeEntity.properties()); - newProps - .entrySet() - .removeIf(e -> metalakeEntity.propertiesMetadata().isHiddenProperty(e.getKey())); - newProps.putIfAbsent( - PROPERTY_IN_USE, - metalakeEntity.propertiesMetadata().getDefaultValue(PROPERTY_IN_USE).toString()); - - return BaseMetalake.builder() - .withId(metalakeEntity.id()) - .withName(metalakeEntity.name()) - .withComment(metalakeEntity.comment()) - .withProperties(newProps) - .withVersion(metalakeEntity.getVersion()) - .withAuditInfo(metalakeEntity.auditInfo()) - .build(); - } - private Pair, Map> getMetalakeAlterProperty( MetalakeChange... metalakeChanges) { Map upserts = Maps.newHashMap(); diff --git a/core/src/test/java/org/apache/gravitino/metalake/TestMetalakeManager.java b/core/src/test/java/org/apache/gravitino/metalake/TestMetalakeManager.java index d21db2ce5ca..0d50c7fb767 100644 --- a/core/src/test/java/org/apache/gravitino/metalake/TestMetalakeManager.java +++ b/core/src/test/java/org/apache/gravitino/metalake/TestMetalakeManager.java @@ -90,8 +90,10 @@ public void testListMetalakes() { NameIdentifier ident2 = NameIdentifier.of("test12"); Map props = ImmutableMap.of("key1", "value1"); - BaseMetalake metalake1 = metalakeManager.createMetalake(ident1, "comment", props); - BaseMetalake metalake2 = metalakeManager.createMetalake(ident2, "comment", props); + metalakeManager.createMetalake(ident1, "comment", props); + BaseMetalake metalake1 = metalakeManager.loadMetalake(ident1); + metalakeManager.createMetalake(ident2, "comment", props); + BaseMetalake metalake2 = metalakeManager.loadMetalake(ident2); Set metalakes = Sets.newHashSet(metalakeManager.listMetalakes()); Assertions.assertTrue(metalakes.contains(metalake1)); diff --git a/server/src/main/java/org/apache/gravitino/server/web/rest/ExceptionHandlers.java b/server/src/main/java/org/apache/gravitino/server/web/rest/ExceptionHandlers.java index 284a07b849f..8d1ba85657e 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/rest/ExceptionHandlers.java +++ b/server/src/main/java/org/apache/gravitino/server/web/rest/ExceptionHandlers.java @@ -33,6 +33,8 @@ import org.apache.gravitino.exceptions.MetalakeInUseException; import org.apache.gravitino.exceptions.MetalakeNotInUseException; import org.apache.gravitino.exceptions.NoSuchMetalakeException; +import org.apache.gravitino.exceptions.NonEmptyCatalogException; +import org.apache.gravitino.exceptions.NonEmptyMetalakeException; import org.apache.gravitino.exceptions.NonEmptySchemaException; import org.apache.gravitino.exceptions.NotFoundException; import org.apache.gravitino.exceptions.NotInUseException; @@ -317,6 +319,9 @@ public Response handle(OperationType op, String catalog, String metalake, Except } else if (e instanceof InUseException) { return Utils.inUse(errorMsg, e); + } else if (e instanceof NonEmptyCatalogException) { + return Utils.nonEmpty(errorMsg, e); + } else { return super.handle(op, catalog, metalake, e); } @@ -354,6 +359,9 @@ public Response handle(OperationType op, String metalake, String parent, Excepti } else if (e instanceof MetalakeInUseException) { return Utils.inUse(errorMsg, e); + } else if (e instanceof NonEmptyMetalakeException) { + return Utils.nonEmpty(errorMsg, e); + } else { return super.handle(op, metalake, parent, e); } From 128e51645c1c567bd8c7190999de480e9b20a118 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:18:24 +0800 Subject: [PATCH 008/123] build(deps): bump next from 14.2.3 to 14.2.10 in /web/web (#5246) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [next](https://github.com/vercel/next.js) from 14.2.3 to 14.2.10.
Release notes

Sourced from next's releases.

v14.2.10

[!NOTE]
This release is backporting bug fixes. It does not include all pending features/changes on canary.

Core Changes

Credits

Huge thanks to @​huozhi and @​ijjk for helping!

v14.2.9

[!NOTE]
This release is backporting bug fixes. It does not include all pending features/changes on canary.

Core Changes

  • Revert "Fix esm property def in flight loader (#66990)" (#69749)
  • Disable experimental.optimizeServer by default to fix failed server action (#69788)
  • Fix middleware fallback: false case (#69799)
  • Fix status code for /_not-found route (#64058) (#69808)
  • Fix metadata prop merging (#69807)
  • create-next-app: fix font file corruption when using import alias (#69806)

Credits

Huge thanks to @​huozhi, @​ztanner, @​ijjk, and @​lubieowoce for helping!

v14.2.8

What's Changed

[!NOTE]
This release is backporting bug fixes and minor improvements. It does not include all pending features/changes on canary.

Support esmExternals in app directory

  • Support esm externals in app router (#65041)
  • Turbopack: Allow client components from foreign code in app routes (#64751)
  • Turbopack: add support for esm externals in app dir (#64918)
  • other related PRs: #66990 #66727 #66286 #65519

Reading cookies set in middleware in components and actions

  • initialize ALS with cookies in middleware (#65008)
  • fix middleware cookie initialization (#65820)
  • ensure cookies set in middleware can be read in a server action (#67924)
  • fix: merged middleware cookies should preserve options (#67956)

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=next&package-manager=npm_and_yarn&previous-version=14.2.3&new-version=14.2.10)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/apache/gravitino/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/web/package.json | 2 +- web/web/pnpm-lock.yaml | 620 +++++++++++++++++++++-------------------- 2 files changed, 321 insertions(+), 301 deletions(-) diff --git a/web/web/package.json b/web/web/package.json index d0537804b96..816caf8059e 100644 --- a/web/web/package.json +++ b/web/web/package.json @@ -33,7 +33,7 @@ "clsx": "^2.1.1", "dayjs": "^1.11.11", "lodash-es": "^4.17.21", - "next": "14.2.3", + "next": "14.2.10", "nprogress": "^0.2.0", "qs": "^6.12.2", "react": "^18.3.1", diff --git a/web/web/pnpm-lock.yaml b/web/web/pnpm-lock.yaml index 02efeb4dc2a..4c3c1fa8689 100644 --- a/web/web/pnpm-lock.yaml +++ b/web/web/pnpm-lock.yaml @@ -16,31 +16,31 @@ importers: version: 11.11.4(@types/react@18.3.3)(react@18.3.1) '@emotion/styled': specifier: ^11.11.5 - version: 11.11.5(@emotion/react@11.11.4)(@types/react@18.3.3)(react@18.3.1) + version: 11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) '@hookform/resolvers': specifier: ^3.7.0 - version: 3.7.0(react-hook-form@7.52.1) + version: 3.7.0(react-hook-form@7.52.1(react@18.3.1)) '@mui/icons-material': specifier: ^5.15.21 - version: 5.15.21(@mui/material@5.15.21)(@types/react@18.3.3)(react@18.3.1) + version: 5.15.21(@mui/material@5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) '@mui/lab': specifier: 5.0.0-alpha.170 - version: 5.0.0-alpha.170(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@mui/material@5.15.21)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) + version: 5.0.0-alpha.170(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@mui/material@5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@mui/material': specifier: ^5.15.21 - version: 5.15.21(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) + version: 5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@mui/x-data-grid': specifier: ^6.20.3 - version: 6.20.3(@mui/material@5.15.21)(@mui/system@5.15.20)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) + version: 6.20.3(@mui/material@5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@mui/system@5.15.20(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@mui/x-tree-view': specifier: ^6.17.0 - version: 6.17.0(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@mui/material@5.15.21)(@mui/system@5.15.20)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) + version: 6.17.0(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@mui/material@5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@mui/system@5.15.20(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@reduxjs/toolkit': specifier: ^1.9.7 - version: 1.9.7(react-redux@8.1.3)(react@18.3.1) + version: 1.9.7(react-redux@8.1.3(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(redux@4.2.1))(react@18.3.1) antd: specifier: ^5.19.0 - version: 5.19.0(react-dom@18.3.1)(react@18.3.1) + version: 5.19.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) axios: specifier: ^1.7.2 version: 1.7.2 @@ -57,8 +57,8 @@ importers: specifier: ^4.17.21 version: 4.17.21 next: - specifier: 14.2.3 - version: 14.2.3(react-dom@18.3.1)(react@18.3.1) + specifier: 14.2.10 + version: 14.2.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) nprogress: specifier: ^0.2.0 version: 0.2.0 @@ -76,13 +76,13 @@ importers: version: 7.52.1(react@18.3.1) react-hot-toast: specifier: ^2.4.1 - version: 2.4.1(csstype@3.1.3)(react-dom@18.3.1)(react@18.3.1) + version: 2.4.1(csstype@3.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react-redux: specifier: ^8.1.3 - version: 8.1.3(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)(redux@4.2.1) + version: 8.1.3(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(redux@4.2.1) react-use: specifier: ^17.5.0 - version: 17.5.0(react-dom@18.3.1)(react@18.3.1) + version: 17.5.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) yup: specifier: ^1.4.0 version: 1.4.0 @@ -672,62 +672,62 @@ packages: '@next/bundle-analyzer@14.2.4': resolution: {integrity: sha512-ydSDikSgGhYmBlnvzS4tgdGyn40SCFI9uWDldbkRSwXS60tg4WBJR4qJoTSERTmdAFb1PeUYCyFdfC80i2WL1w==} - '@next/env@14.2.3': - resolution: {integrity: sha512-W7fd7IbkfmeeY2gXrzJYDx8D2lWKbVoTIj1o1ScPHNzvp30s1AuoEFSdr39bC5sjxJaxTtq3OTCZboNp0lNWHA==} + '@next/env@14.2.10': + resolution: {integrity: sha512-dZIu93Bf5LUtluBXIv4woQw2cZVZ2DJTjax5/5DOs3lzEOeKLy7GxRSr4caK9/SCPdaW6bCgpye6+n4Dh9oJPw==} '@next/eslint-plugin-next@14.0.3': resolution: {integrity: sha512-j4K0n+DcmQYCVnSAM+UByTVfIHnYQy2ODozfQP+4RdwtRDfobrIvKq1K4Exb2koJ79HSSa7s6B2SA8T/1YR3RA==} - '@next/swc-darwin-arm64@14.2.3': - resolution: {integrity: sha512-3pEYo/RaGqPP0YzwnlmPN2puaF2WMLM3apt5jLW2fFdXD9+pqcoTzRk+iZsf8ta7+quAe4Q6Ms0nR0SFGFdS1A==} + '@next/swc-darwin-arm64@14.2.10': + resolution: {integrity: sha512-V3z10NV+cvMAfxQUMhKgfQnPbjw+Ew3cnr64b0lr8MDiBJs3eLnM6RpGC46nhfMZsiXgQngCJKWGTC/yDcgrDQ==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@next/swc-darwin-x64@14.2.3': - resolution: {integrity: sha512-6adp7waE6P1TYFSXpY366xwsOnEXM+y1kgRpjSRVI2CBDOcbRjsJ67Z6EgKIqWIue52d2q/Mx8g9MszARj8IEA==} + '@next/swc-darwin-x64@14.2.10': + resolution: {integrity: sha512-Y0TC+FXbFUQ2MQgimJ/7Ina2mXIKhE7F+GUe1SgnzRmwFY3hX2z8nyVCxE82I2RicspdkZnSWMn4oTjIKz4uzA==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@next/swc-linux-arm64-gnu@14.2.3': - resolution: {integrity: sha512-cuzCE/1G0ZSnTAHJPUT1rPgQx1w5tzSX7POXSLaS7w2nIUJUD+e25QoXD/hMfxbsT9rslEXugWypJMILBj/QsA==} + '@next/swc-linux-arm64-gnu@14.2.10': + resolution: {integrity: sha512-ZfQ7yOy5zyskSj9rFpa0Yd7gkrBnJTkYVSya95hX3zeBG9E55Z6OTNPn1j2BTFWvOVVj65C3T+qsjOyVI9DQpA==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@next/swc-linux-arm64-musl@14.2.3': - resolution: {integrity: sha512-0D4/oMM2Y9Ta3nGuCcQN8jjJjmDPYpHX9OJzqk42NZGJocU2MqhBq5tWkJrUQOQY9N+In9xOdymzapM09GeiZw==} + '@next/swc-linux-arm64-musl@14.2.10': + resolution: {integrity: sha512-n2i5o3y2jpBfXFRxDREr342BGIQCJbdAUi/K4q6Env3aSx8erM9VuKXHw5KNROK9ejFSPf0LhoSkU/ZiNdacpQ==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@next/swc-linux-x64-gnu@14.2.3': - resolution: {integrity: sha512-ENPiNnBNDInBLyUU5ii8PMQh+4XLr4pG51tOp6aJ9xqFQ2iRI6IH0Ds2yJkAzNV1CfyagcyzPfROMViS2wOZ9w==} + '@next/swc-linux-x64-gnu@14.2.10': + resolution: {integrity: sha512-GXvajAWh2woTT0GKEDlkVhFNxhJS/XdDmrVHrPOA83pLzlGPQnixqxD8u3bBB9oATBKB//5e4vpACnx5Vaxdqg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@next/swc-linux-x64-musl@14.2.3': - resolution: {integrity: sha512-BTAbq0LnCbF5MtoM7I/9UeUu/8ZBY0i8SFjUMCbPDOLv+un67e2JgyN4pmgfXBwy/I+RHu8q+k+MCkDN6P9ViQ==} + '@next/swc-linux-x64-musl@14.2.10': + resolution: {integrity: sha512-opFFN5B0SnO+HTz4Wq4HaylXGFV+iHrVxd3YvREUX9K+xfc4ePbRrxqOuPOFjtSuiVouwe6uLeDtabjEIbkmDA==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@next/swc-win32-arm64-msvc@14.2.3': - resolution: {integrity: sha512-AEHIw/dhAMLNFJFJIJIyOFDzrzI5bAjI9J26gbO5xhAKHYTZ9Or04BesFPXiAYXDNdrwTP2dQceYA4dL1geu8A==} + '@next/swc-win32-arm64-msvc@14.2.10': + resolution: {integrity: sha512-9NUzZuR8WiXTvv+EiU/MXdcQ1XUvFixbLIMNQiVHuzs7ZIFrJDLJDaOF1KaqttoTujpcxljM/RNAOmw1GhPPQQ==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@next/swc-win32-ia32-msvc@14.2.3': - resolution: {integrity: sha512-vga40n1q6aYb0CLrM+eEmisfKCR45ixQYXuBXxOOmmoV8sYST9k7E3US32FsY+CkkF7NtzdcebiFT4CHuMSyZw==} + '@next/swc-win32-ia32-msvc@14.2.10': + resolution: {integrity: sha512-fr3aEbSd1GeW3YUMBkWAu4hcdjZ6g4NBl1uku4gAn661tcxd1bHs1THWYzdsbTRLcCKLjrDZlNp6j2HTfrw+Bg==} engines: {node: '>= 10'} cpu: [ia32] os: [win32] - '@next/swc-win32-x64-msvc@14.2.3': - resolution: {integrity: sha512-Q1/zm43RWynxrO7lW4ehciQVj+5ePBhOK+/K2P7pLFX3JaJ/IZVC69SHidrmZSOkqz7ECIOhhy7XhAFG4JYyHA==} + '@next/swc-win32-x64-msvc@14.2.10': + resolution: {integrity: sha512-UjeVoRGKNL2zfbcQ6fscmgjBAS/inHBh63mjIlfPg/NG8Yn2ztqylXt5qilYb6hoHIwaU2ogHknHWWmahJjgZQ==} engines: {node: '>= 10'} cpu: [x64] os: [win32] @@ -1473,6 +1473,7 @@ packages: eslint@8.57.0: resolution: {integrity: sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + deprecated: This version is no longer supported. Please see https://eslint.org/version-support for other options. hasBin: true espree@9.6.1: @@ -2078,8 +2079,8 @@ packages: natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} - next@14.2.3: - resolution: {integrity: sha512-dowFkFTR8v79NPJO4QsBUtxv0g9BrS/phluVpMAt2ku7H+cbcBJlopXjkWlwxrk/xGqMemr7JkGPGemPrLLX7A==} + next@14.2.10: + resolution: {integrity: sha512-sDDExXnh33cY3RkS9JuFEKaS4HmlWmDKP1VJioucCG6z5KuA008DPsDZOzi8UfqEk3Ii+2NCQSJrfbEWtZZfww==} engines: {node: '>=18.17.0'} hasBin: true peerDependencies: @@ -3094,27 +3095,27 @@ snapshots: dependencies: '@ctrl/tinycolor': 3.6.1 - '@ant-design/cssinjs@1.21.0(react-dom@18.3.1)(react@18.3.1)': + '@ant-design/cssinjs@1.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 '@emotion/hash': 0.8.0 '@emotion/unitless': 0.7.5 classnames: 2.5.1 csstype: 3.1.3 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) stylis: 4.3.2 '@ant-design/icons-svg@4.4.2': {} - '@ant-design/icons@5.3.7(react-dom@18.3.1)(react@18.3.1)': + '@ant-design/icons@5.3.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@ant-design/colors': 7.1.0 '@ant-design/icons-svg': 4.4.2 '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) @@ -3263,9 +3264,10 @@ snapshots: '@emotion/use-insertion-effect-with-fallbacks': 1.0.1(react@18.3.1) '@emotion/utils': 1.2.1 '@emotion/weak-memoize': 0.3.1 - '@types/react': 18.3.3 hoist-non-react-statics: 3.3.2 react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.3 transitivePeerDependencies: - supports-color @@ -3279,7 +3281,7 @@ snapshots: '@emotion/sheet@1.2.2': {} - '@emotion/styled@11.11.5(@emotion/react@11.11.4)(@types/react@18.3.3)(react@18.3.1)': + '@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 '@emotion/babel-plugin': 11.11.0 @@ -3288,8 +3290,9 @@ snapshots: '@emotion/serialize': 1.1.4 '@emotion/use-insertion-effect-with-fallbacks': 1.0.1(react@18.3.1) '@emotion/utils': 1.2.1 - '@types/react': 18.3.3 react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.3 transitivePeerDependencies: - supports-color @@ -3406,7 +3409,7 @@ snapshots: '@floating-ui/core': 1.6.4 '@floating-ui/utils': 0.2.4 - '@floating-ui/react-dom@2.1.1(react-dom@18.3.1)(react@18.3.1)': + '@floating-ui/react-dom@2.1.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@floating-ui/dom': 1.6.7 react: 18.3.1 @@ -3414,7 +3417,7 @@ snapshots: '@floating-ui/utils@0.2.4': {} - '@hookform/resolvers@3.7.0(react-hook-form@7.52.1)': + '@hookform/resolvers@3.7.0(react-hook-form@7.52.1(react@18.3.1))': dependencies: react-hook-form: 7.52.1(react@18.3.1) @@ -3491,55 +3494,55 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.4.15 - '@mui/base@5.0.0-beta.40(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)': + '@mui/base@5.0.0-beta.40(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 - '@floating-ui/react-dom': 2.1.1(react-dom@18.3.1)(react@18.3.1) + '@floating-ui/react-dom': 2.1.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@mui/types': 7.2.14(@types/react@18.3.3) '@mui/utils': 5.15.20(@types/react@18.3.3)(react@18.3.1) '@popperjs/core': 2.11.8 - '@types/react': 18.3.3 clsx: 2.1.1 prop-types: 15.8.1 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.3 '@mui/core-downloads-tracker@5.15.21': {} - '@mui/icons-material@5.15.21(@mui/material@5.15.21)(@types/react@18.3.3)(react@18.3.1)': + '@mui/icons-material@5.15.21(@mui/material@5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@types/react@18.3.3)(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 - '@mui/material': 5.15.21(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) - '@types/react': 18.3.3 + '@mui/material': 5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.3 - '@mui/lab@5.0.0-alpha.170(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@mui/material@5.15.21)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)': + '@mui/lab@5.0.0-alpha.170(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@mui/material@5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 - '@emotion/react': 11.11.4(@types/react@18.3.3)(react@18.3.1) - '@emotion/styled': 11.11.5(@emotion/react@11.11.4)(@types/react@18.3.3)(react@18.3.1) - '@mui/base': 5.0.0-beta.40(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) - '@mui/material': 5.15.21(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) - '@mui/system': 5.15.20(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react@18.3.1) + '@mui/base': 5.0.0-beta.40(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@mui/material': 5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@mui/system': 5.15.20(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) '@mui/types': 7.2.14(@types/react@18.3.3) '@mui/utils': 5.15.20(@types/react@18.3.3)(react@18.3.1) - '@types/react': 18.3.3 clsx: 2.1.1 prop-types: 15.8.1 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@emotion/react': 11.11.4(@types/react@18.3.3)(react@18.3.1) + '@emotion/styled': 11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) + '@types/react': 18.3.3 - '@mui/material@5.15.21(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)': + '@mui/material@5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 - '@emotion/react': 11.11.4(@types/react@18.3.3)(react@18.3.1) - '@emotion/styled': 11.11.5(@emotion/react@11.11.4)(@types/react@18.3.3)(react@18.3.1) - '@mui/base': 5.0.0-beta.40(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) + '@mui/base': 5.0.0-beta.40(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@mui/core-downloads-tracker': 5.15.21 - '@mui/system': 5.15.20(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react@18.3.1) + '@mui/system': 5.15.20(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) '@mui/types': 7.2.14(@types/react@18.3.3) '@mui/utils': 5.15.20(@types/react@18.3.3)(react@18.3.1) - '@types/react': 18.3.3 '@types/react-transition-group': 4.4.10 clsx: 2.1.1 csstype: 3.1.3 @@ -3547,59 +3550,67 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) react-is: 18.3.1 - react-transition-group: 4.4.5(react-dom@18.3.1)(react@18.3.1) + react-transition-group: 4.4.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + optionalDependencies: + '@emotion/react': 11.11.4(@types/react@18.3.3)(react@18.3.1) + '@emotion/styled': 11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) + '@types/react': 18.3.3 '@mui/private-theming@5.15.20(@types/react@18.3.3)(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 '@mui/utils': 5.15.20(@types/react@18.3.3)(react@18.3.1) - '@types/react': 18.3.3 prop-types: 15.8.1 react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.3 - '@mui/styled-engine@5.15.14(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(react@18.3.1)': + '@mui/styled-engine@5.15.14(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 '@emotion/cache': 11.11.0 - '@emotion/react': 11.11.4(@types/react@18.3.3)(react@18.3.1) - '@emotion/styled': 11.11.5(@emotion/react@11.11.4)(@types/react@18.3.3)(react@18.3.1) csstype: 3.1.3 prop-types: 15.8.1 react: 18.3.1 + optionalDependencies: + '@emotion/react': 11.11.4(@types/react@18.3.3)(react@18.3.1) + '@emotion/styled': 11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) - '@mui/system@5.15.20(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react@18.3.1)': + '@mui/system@5.15.20(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 - '@emotion/react': 11.11.4(@types/react@18.3.3)(react@18.3.1) - '@emotion/styled': 11.11.5(@emotion/react@11.11.4)(@types/react@18.3.3)(react@18.3.1) '@mui/private-theming': 5.15.20(@types/react@18.3.3)(react@18.3.1) - '@mui/styled-engine': 5.15.14(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(react@18.3.1) + '@mui/styled-engine': 5.15.14(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(react@18.3.1) '@mui/types': 7.2.14(@types/react@18.3.3) '@mui/utils': 5.15.20(@types/react@18.3.3)(react@18.3.1) - '@types/react': 18.3.3 clsx: 2.1.1 csstype: 3.1.3 prop-types: 15.8.1 react: 18.3.1 + optionalDependencies: + '@emotion/react': 11.11.4(@types/react@18.3.3)(react@18.3.1) + '@emotion/styled': 11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) + '@types/react': 18.3.3 '@mui/types@7.2.14(@types/react@18.3.3)': - dependencies: + optionalDependencies: '@types/react': 18.3.3 '@mui/utils@5.15.20(@types/react@18.3.3)(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 '@types/prop-types': 15.7.12 - '@types/react': 18.3.3 prop-types: 15.8.1 react: 18.3.1 react-is: 18.3.1 + optionalDependencies: + '@types/react': 18.3.3 - '@mui/x-data-grid@6.20.3(@mui/material@5.15.21)(@mui/system@5.15.20)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)': + '@mui/x-data-grid@6.20.3(@mui/material@5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@mui/system@5.15.20(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 - '@mui/material': 5.15.21(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) - '@mui/system': 5.15.20(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react@18.3.1) + '@mui/material': 5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@mui/system': 5.15.20(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) '@mui/utils': 5.15.20(@types/react@18.3.3)(react@18.3.1) clsx: 2.1.1 prop-types: 15.8.1 @@ -3609,21 +3620,21 @@ snapshots: transitivePeerDependencies: - '@types/react' - '@mui/x-tree-view@6.17.0(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@mui/material@5.15.21)(@mui/system@5.15.20)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)': + '@mui/x-tree-view@6.17.0(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@mui/material@5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@mui/system@5.15.20(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 '@emotion/react': 11.11.4(@types/react@18.3.3)(react@18.3.1) - '@emotion/styled': 11.11.5(@emotion/react@11.11.4)(@types/react@18.3.3)(react@18.3.1) - '@mui/base': 5.0.0-beta.40(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) - '@mui/material': 5.15.21(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1) - '@mui/system': 5.15.20(@emotion/react@11.11.4)(@emotion/styled@11.11.5)(@types/react@18.3.3)(react@18.3.1) + '@emotion/styled': 11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) + '@mui/base': 5.0.0-beta.40(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@mui/material': 5.15.21(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@mui/system': 5.15.20(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@emotion/styled@11.11.5(@emotion/react@11.11.4(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1))(@types/react@18.3.3)(react@18.3.1) '@mui/utils': 5.15.20(@types/react@18.3.3)(react@18.3.1) '@types/react-transition-group': 4.4.10 clsx: 2.1.1 prop-types: 15.8.1 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - react-transition-group: 4.4.5(react-dom@18.3.1)(react@18.3.1) + react-transition-group: 4.4.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) transitivePeerDependencies: - '@types/react' @@ -3634,37 +3645,37 @@ snapshots: - bufferutil - utf-8-validate - '@next/env@14.2.3': {} + '@next/env@14.2.10': {} '@next/eslint-plugin-next@14.0.3': dependencies: glob: 7.1.7 - '@next/swc-darwin-arm64@14.2.3': + '@next/swc-darwin-arm64@14.2.10': optional: true - '@next/swc-darwin-x64@14.2.3': + '@next/swc-darwin-x64@14.2.10': optional: true - '@next/swc-linux-arm64-gnu@14.2.3': + '@next/swc-linux-arm64-gnu@14.2.10': optional: true - '@next/swc-linux-arm64-musl@14.2.3': + '@next/swc-linux-arm64-musl@14.2.10': optional: true - '@next/swc-linux-x64-gnu@14.2.3': + '@next/swc-linux-x64-gnu@14.2.10': optional: true - '@next/swc-linux-x64-musl@14.2.3': + '@next/swc-linux-x64-musl@14.2.10': optional: true - '@next/swc-win32-arm64-msvc@14.2.3': + '@next/swc-win32-arm64-msvc@14.2.10': optional: true - '@next/swc-win32-ia32-msvc@14.2.3': + '@next/swc-win32-ia32-msvc@14.2.10': optional: true - '@next/swc-win32-x64-msvc@14.2.3': + '@next/swc-win32-x64-msvc@14.2.10': optional: true '@nodelib/fs.scandir@2.1.5': @@ -3690,19 +3701,19 @@ snapshots: dependencies: '@babel/runtime': 7.24.7 - '@rc-component/color-picker@1.5.3(react-dom@18.3.1)(react@18.3.1)': + '@rc-component/color-picker@1.5.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 '@ctrl/tinycolor': 3.6.1 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@rc-component/context@1.4.0(react-dom@18.3.1)(react@18.3.1)': + '@rc-component/context@1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) @@ -3710,59 +3721,60 @@ snapshots: dependencies: '@babel/runtime': 7.24.7 - '@rc-component/mutate-observer@1.1.0(react-dom@18.3.1)(react@18.3.1)': + '@rc-component/mutate-observer@1.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@rc-component/portal@1.1.2(react-dom@18.3.1)(react@18.3.1)': + '@rc-component/portal@1.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@rc-component/qrcode@1.0.0(react-dom@18.3.1)(react@18.3.1)': + '@rc-component/qrcode@1.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@rc-component/tour@1.15.0(react-dom@18.3.1)(react@18.3.1)': + '@rc-component/tour@1.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 - '@rc-component/portal': 1.1.2(react-dom@18.3.1)(react@18.3.1) - '@rc-component/trigger': 2.2.0(react-dom@18.3.1)(react@18.3.1) + '@rc-component/portal': 1.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@rc-component/trigger': 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@rc-component/trigger@2.2.0(react-dom@18.3.1)(react@18.3.1)': + '@rc-component/trigger@2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.7 - '@rc-component/portal': 1.1.2(react-dom@18.3.1)(react@18.3.1) + '@rc-component/portal': 1.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-resize-observer: 1.4.0(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-resize-observer: 1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@reduxjs/toolkit@1.9.7(react-redux@8.1.3)(react@18.3.1)': + '@reduxjs/toolkit@1.9.7(react-redux@8.1.3(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(redux@4.2.1))(react@18.3.1)': dependencies: immer: 9.0.21 - react: 18.3.1 - react-redux: 8.1.3(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)(redux@4.2.1) redux: 4.2.1 redux-thunk: 2.4.2(redux@4.2.1) reselect: 4.1.8 + optionalDependencies: + react: 18.3.1 + react-redux: 8.1.3(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(redux@4.2.1) '@rushstack/eslint-patch@1.10.3': {} @@ -3829,6 +3841,7 @@ snapshots: '@typescript-eslint/visitor-keys': 6.21.0 debug: 4.3.5 eslint: 8.57.0 + optionalDependencies: typescript: 5.5.3 transitivePeerDependencies: - supports-color @@ -3850,6 +3863,7 @@ snapshots: minimatch: 9.0.3 semver: 7.6.2 ts-api-utils: 1.3.0(typescript@5.5.3) + optionalDependencies: typescript: 5.5.3 transitivePeerDependencies: - supports-color @@ -3894,54 +3908,54 @@ snapshots: ansi-styles@6.2.1: {} - antd@5.19.0(react-dom@18.3.1)(react@18.3.1): + antd@5.19.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@ant-design/colors': 7.1.0 - '@ant-design/cssinjs': 1.21.0(react-dom@18.3.1)(react@18.3.1) - '@ant-design/icons': 5.3.7(react-dom@18.3.1)(react@18.3.1) + '@ant-design/cssinjs': 1.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@ant-design/icons': 5.3.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@ant-design/react-slick': 1.1.2(react@18.3.1) '@babel/runtime': 7.24.7 '@ctrl/tinycolor': 3.6.1 - '@rc-component/color-picker': 1.5.3(react-dom@18.3.1)(react@18.3.1) - '@rc-component/mutate-observer': 1.1.0(react-dom@18.3.1)(react@18.3.1) - '@rc-component/qrcode': 1.0.0(react-dom@18.3.1)(react@18.3.1) - '@rc-component/tour': 1.15.0(react-dom@18.3.1)(react@18.3.1) - '@rc-component/trigger': 2.2.0(react-dom@18.3.1)(react@18.3.1) + '@rc-component/color-picker': 1.5.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@rc-component/mutate-observer': 1.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@rc-component/qrcode': 1.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@rc-component/tour': 1.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@rc-component/trigger': 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 copy-to-clipboard: 3.3.3 dayjs: 1.11.11 - rc-cascader: 3.27.0(react-dom@18.3.1)(react@18.3.1) - rc-checkbox: 3.3.0(react-dom@18.3.1)(react@18.3.1) - rc-collapse: 3.7.3(react-dom@18.3.1)(react@18.3.1) - rc-dialog: 9.5.2(react-dom@18.3.1)(react@18.3.1) - rc-drawer: 7.2.0(react-dom@18.3.1)(react@18.3.1) - rc-dropdown: 4.2.0(react-dom@18.3.1)(react@18.3.1) - rc-field-form: 2.2.1(react-dom@18.3.1)(react@18.3.1) - rc-image: 7.9.0(react-dom@18.3.1)(react@18.3.1) - rc-input: 1.5.1(react-dom@18.3.1)(react@18.3.1) - rc-input-number: 9.1.0(react-dom@18.3.1)(react@18.3.1) - rc-mentions: 2.14.0(react-dom@18.3.1)(react@18.3.1) - rc-menu: 9.14.1(react-dom@18.3.1)(react@18.3.1) - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-notification: 5.6.0(react-dom@18.3.1)(react@18.3.1) - rc-pagination: 4.2.0(react-dom@18.3.1)(react@18.3.1) - rc-picker: 4.6.6(dayjs@1.11.11)(react-dom@18.3.1)(react@18.3.1) - rc-progress: 4.0.0(react-dom@18.3.1)(react@18.3.1) - rc-rate: 2.13.0(react-dom@18.3.1)(react@18.3.1) - rc-resize-observer: 1.4.0(react-dom@18.3.1)(react@18.3.1) - rc-segmented: 2.3.0(react-dom@18.3.1)(react@18.3.1) - rc-select: 14.15.0(react-dom@18.3.1)(react@18.3.1) - rc-slider: 10.6.2(react-dom@18.3.1)(react@18.3.1) - rc-steps: 6.0.1(react-dom@18.3.1)(react@18.3.1) - rc-switch: 4.1.0(react-dom@18.3.1)(react@18.3.1) - rc-table: 7.45.7(react-dom@18.3.1)(react@18.3.1) - rc-tabs: 15.1.1(react-dom@18.3.1)(react@18.3.1) - rc-textarea: 1.7.0(react-dom@18.3.1)(react@18.3.1) - rc-tooltip: 6.2.0(react-dom@18.3.1)(react@18.3.1) - rc-tree: 5.8.8(react-dom@18.3.1)(react@18.3.1) - rc-tree-select: 5.22.1(react-dom@18.3.1)(react@18.3.1) - rc-upload: 4.5.2(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-cascader: 3.27.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-checkbox: 3.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-collapse: 3.7.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-dialog: 9.5.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-drawer: 7.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-dropdown: 4.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-field-form: 2.2.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-image: 7.9.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-input: 1.5.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-input-number: 9.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-mentions: 2.14.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-menu: 9.14.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-notification: 5.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-pagination: 4.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-picker: 4.6.6(dayjs@1.11.11)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-progress: 4.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-rate: 2.13.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-resize-observer: 1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-segmented: 2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-select: 14.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-slider: 10.6.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-steps: 6.0.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-switch: 4.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-table: 7.45.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-tabs: 15.1.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-textarea: 1.7.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-tooltip: 6.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-tree: 5.8.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-tree-select: 5.22.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-upload: 4.5.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) scroll-into-view-if-needed: 3.1.0 @@ -4546,11 +4560,12 @@ snapshots: '@typescript-eslint/parser': 6.21.0(eslint@8.57.0)(typescript@5.5.3) eslint: 8.57.0 eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@6.21.0)(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@8.57.0) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@6.21.0)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) + eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.0))(eslint@8.57.0) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) eslint-plugin-jsx-a11y: 6.9.0(eslint@8.57.0) eslint-plugin-react: 7.34.3(eslint@8.57.0) eslint-plugin-react-hooks: 4.6.2(eslint@8.57.0) + optionalDependencies: typescript: 5.5.3 transitivePeerDependencies: - eslint-import-resolver-webpack @@ -4568,13 +4583,13 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0)(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@8.57.0): + eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.0))(eslint@8.57.0): dependencies: debug: 4.3.5 enhanced-resolve: 5.17.0 eslint: 8.57.0 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0)(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@6.21.0)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.0))(eslint@8.57.0))(eslint@8.57.0) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) fast-glob: 3.3.2 get-tsconfig: 4.7.5 is-core-module: 2.14.0 @@ -4585,19 +4600,19 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-module-utils@2.8.1(@typescript-eslint/parser@6.21.0)(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0): + eslint-module-utils@2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.0))(eslint@8.57.0))(eslint@8.57.0): dependencies: - '@typescript-eslint/parser': 6.21.0(eslint@8.57.0)(typescript@5.5.3) debug: 3.2.7 + optionalDependencies: + '@typescript-eslint/parser': 6.21.0(eslint@8.57.0)(typescript@5.5.3) eslint: 8.57.0 eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@6.21.0)(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1)(eslint@8.57.0) + eslint-import-resolver-typescript: 3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.0))(eslint@8.57.0) transitivePeerDependencies: - supports-color - eslint-plugin-import@2.29.1(@typescript-eslint/parser@6.21.0)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0): + eslint-plugin-import@2.29.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0): dependencies: - '@typescript-eslint/parser': 6.21.0(eslint@8.57.0)(typescript@5.5.3) array-includes: 3.1.8 array.prototype.findlastindex: 1.2.5 array.prototype.flat: 1.3.2 @@ -4606,7 +4621,7 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.0 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0)(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.0))(eslint@8.57.0))(eslint@8.57.0) hasown: 2.0.2 is-core-module: 2.14.0 is-glob: 4.0.3 @@ -4616,6 +4631,8 @@ snapshots: object.values: 1.2.0 semver: 6.3.1 tsconfig-paths: 3.15.0 + optionalDependencies: + '@typescript-eslint/parser': 6.21.0(eslint@8.57.0)(typescript@5.5.3) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -5287,7 +5304,7 @@ snapshots: object-assign: 4.1.1 thenify-all: 1.6.0 - nano-css@5.6.1(react-dom@18.3.1)(react@18.3.1): + nano-css@5.6.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@jridgewell/sourcemap-codec': 1.4.15 css-tree: 1.1.3 @@ -5304,9 +5321,9 @@ snapshots: natural-compare@1.4.0: {} - next@14.2.3(react-dom@18.3.1)(react@18.3.1): + next@14.2.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: - '@next/env': 14.2.3 + '@next/env': 14.2.10 '@swc/helpers': 0.5.5 busboy: 1.6.0 caniuse-lite: 1.0.30001639 @@ -5316,15 +5333,15 @@ snapshots: react-dom: 18.3.1(react@18.3.1) styled-jsx: 5.1.1(react@18.3.1) optionalDependencies: - '@next/swc-darwin-arm64': 14.2.3 - '@next/swc-darwin-x64': 14.2.3 - '@next/swc-linux-arm64-gnu': 14.2.3 - '@next/swc-linux-arm64-musl': 14.2.3 - '@next/swc-linux-x64-gnu': 14.2.3 - '@next/swc-linux-x64-musl': 14.2.3 - '@next/swc-win32-arm64-msvc': 14.2.3 - '@next/swc-win32-ia32-msvc': 14.2.3 - '@next/swc-win32-x64-msvc': 14.2.3 + '@next/swc-darwin-arm64': 14.2.10 + '@next/swc-darwin-x64': 14.2.10 + '@next/swc-linux-arm64-gnu': 14.2.10 + '@next/swc-linux-arm64-musl': 14.2.10 + '@next/swc-linux-x64-gnu': 14.2.10 + '@next/swc-linux-x64-musl': 14.2.10 + '@next/swc-win32-arm64-msvc': 14.2.10 + '@next/swc-win32-ia32-msvc': 14.2.10 + '@next/swc-win32-x64-msvc': 14.2.10 transitivePeerDependencies: - '@babel/core' - babel-plugin-macros @@ -5495,8 +5512,9 @@ snapshots: postcss-load-config@4.0.2(postcss@8.4.39): dependencies: lilconfig: 3.1.2 - postcss: 8.4.39 yaml: 2.4.5 + optionalDependencies: + postcss: 8.4.39 postcss-nested@6.0.1(postcss@8.4.39): dependencies: @@ -5549,321 +5567,322 @@ snapshots: queue-microtask@1.2.3: {} - rc-cascader@3.27.0(react-dom@18.3.1)(react@18.3.1): + rc-cascader@3.27.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 array-tree-filter: 2.1.0 classnames: 2.5.1 - rc-select: 14.15.0(react-dom@18.3.1)(react@18.3.1) - rc-tree: 5.8.8(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-select: 14.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-tree: 5.8.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-checkbox@3.3.0(react-dom@18.3.1)(react@18.3.1): + rc-checkbox@3.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-collapse@3.7.3(react-dom@18.3.1)(react@18.3.1): + rc-collapse@3.7.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-dialog@9.5.2(react-dom@18.3.1)(react@18.3.1): + rc-dialog@9.5.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 - '@rc-component/portal': 1.1.2(react-dom@18.3.1)(react@18.3.1) + '@rc-component/portal': 1.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-drawer@7.2.0(react-dom@18.3.1)(react@18.3.1): + rc-drawer@7.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 - '@rc-component/portal': 1.1.2(react-dom@18.3.1)(react@18.3.1) + '@rc-component/portal': 1.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-dropdown@4.2.0(react-dom@18.3.1)(react@18.3.1): + rc-dropdown@4.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 - '@rc-component/trigger': 2.2.0(react-dom@18.3.1)(react@18.3.1) + '@rc-component/trigger': 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-field-form@2.2.1(react-dom@18.3.1)(react@18.3.1): + rc-field-form@2.2.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 '@rc-component/async-validator': 5.0.4 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-image@7.9.0(react-dom@18.3.1)(react@18.3.1): + rc-image@7.9.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 - '@rc-component/portal': 1.1.2(react-dom@18.3.1)(react@18.3.1) + '@rc-component/portal': 1.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - rc-dialog: 9.5.2(react-dom@18.3.1)(react@18.3.1) - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-dialog: 9.5.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-input-number@9.1.0(react-dom@18.3.1)(react@18.3.1): + rc-input-number@9.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 '@rc-component/mini-decimal': 1.1.0 classnames: 2.5.1 - rc-input: 1.5.1(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-input: 1.5.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-input@1.5.1(react-dom@18.3.1)(react@18.3.1): + rc-input@1.5.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-mentions@2.14.0(react-dom@18.3.1)(react@18.3.1): + rc-mentions@2.14.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 - '@rc-component/trigger': 2.2.0(react-dom@18.3.1)(react@18.3.1) + '@rc-component/trigger': 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - rc-input: 1.5.1(react-dom@18.3.1)(react@18.3.1) - rc-menu: 9.14.1(react-dom@18.3.1)(react@18.3.1) - rc-textarea: 1.7.0(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-input: 1.5.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-menu: 9.14.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-textarea: 1.7.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-menu@9.14.1(react-dom@18.3.1)(react@18.3.1): + rc-menu@9.14.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 - '@rc-component/trigger': 2.2.0(react-dom@18.3.1)(react@18.3.1) + '@rc-component/trigger': 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-overflow: 1.3.2(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-overflow: 1.3.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-motion@2.9.2(react-dom@18.3.1)(react@18.3.1): + rc-motion@2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-notification@5.6.0(react-dom@18.3.1)(react@18.3.1): + rc-notification@5.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-overflow@1.3.2(react-dom@18.3.1)(react@18.3.1): + rc-overflow@1.3.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-resize-observer: 1.4.0(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-resize-observer: 1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-pagination@4.2.0(react-dom@18.3.1)(react@18.3.1): + rc-pagination@4.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-picker@4.6.6(dayjs@1.11.11)(react-dom@18.3.1)(react@18.3.1): + rc-picker@4.6.6(dayjs@1.11.11)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 - '@rc-component/trigger': 2.2.0(react-dom@18.3.1)(react@18.3.1) + '@rc-component/trigger': 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - dayjs: 1.11.11 - rc-overflow: 1.3.2(react-dom@18.3.1)(react@18.3.1) - rc-resize-observer: 1.4.0(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-overflow: 1.3.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-resize-observer: 1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + dayjs: 1.11.11 - rc-progress@4.0.0(react-dom@18.3.1)(react@18.3.1): + rc-progress@4.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-rate@2.13.0(react-dom@18.3.1)(react@18.3.1): + rc-rate@2.13.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-resize-observer@1.4.0(react-dom@18.3.1)(react@18.3.1): + rc-resize-observer@1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) resize-observer-polyfill: 1.5.1 - rc-segmented@2.3.0(react-dom@18.3.1)(react@18.3.1): + rc-segmented@2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-select@14.15.0(react-dom@18.3.1)(react@18.3.1): + rc-select@14.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 - '@rc-component/trigger': 2.2.0(react-dom@18.3.1)(react@18.3.1) + '@rc-component/trigger': 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-overflow: 1.3.2(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) - rc-virtual-list: 3.14.5(react-dom@18.3.1)(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-overflow: 1.3.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-virtual-list: 3.14.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-slider@10.6.2(react-dom@18.3.1)(react@18.3.1): + rc-slider@10.6.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-steps@6.0.1(react-dom@18.3.1)(react@18.3.1): + rc-steps@6.0.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-switch@4.1.0(react-dom@18.3.1)(react@18.3.1): + rc-switch@4.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-table@7.45.7(react-dom@18.3.1)(react@18.3.1): + rc-table@7.45.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 - '@rc-component/context': 1.4.0(react-dom@18.3.1)(react@18.3.1) + '@rc-component/context': 1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 - rc-resize-observer: 1.4.0(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) - rc-virtual-list: 3.14.5(react-dom@18.3.1)(react@18.3.1) + rc-resize-observer: 1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-virtual-list: 3.14.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-tabs@15.1.1(react-dom@18.3.1)(react@18.3.1): + rc-tabs@15.1.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-dropdown: 4.2.0(react-dom@18.3.1)(react@18.3.1) - rc-menu: 9.14.1(react-dom@18.3.1)(react@18.3.1) - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-resize-observer: 1.4.0(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-dropdown: 4.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-menu: 9.14.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-resize-observer: 1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-textarea@1.7.0(react-dom@18.3.1)(react@18.3.1): + rc-textarea@1.7.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-input: 1.5.1(react-dom@18.3.1)(react@18.3.1) - rc-resize-observer: 1.4.0(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-input: 1.5.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-resize-observer: 1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-tooltip@6.2.0(react-dom@18.3.1)(react@18.3.1): + rc-tooltip@6.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 - '@rc-component/trigger': 2.2.0(react-dom@18.3.1)(react@18.3.1) + '@rc-component/trigger': 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) classnames: 2.5.1 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-tree-select@5.22.1(react-dom@18.3.1)(react@18.3.1): + rc-tree-select@5.22.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-select: 14.15.0(react-dom@18.3.1)(react@18.3.1) - rc-tree: 5.8.8(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-select: 14.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-tree: 5.8.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-tree@5.8.8(react-dom@18.3.1)(react@18.3.1): + rc-tree@5.8.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-motion: 2.9.2(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) - rc-virtual-list: 3.14.5(react-dom@18.3.1)(react@18.3.1) + rc-motion: 2.9.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-virtual-list: 3.14.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-upload@4.5.2(react-dom@18.3.1)(react@18.3.1): + rc-upload@4.5.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rc-util@5.43.0(react-dom@18.3.1)(react@18.3.1): + rc-util@5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) react-is: 18.3.1 - rc-virtual-list@3.14.5(react-dom@18.3.1)(react@18.3.1): + rc-virtual-list@3.14.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 classnames: 2.5.1 - rc-resize-observer: 1.4.0(react-dom@18.3.1)(react@18.3.1) - rc-util: 5.43.0(react-dom@18.3.1)(react@18.3.1) + rc-resize-observer: 1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + rc-util: 5.43.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) @@ -5877,7 +5896,7 @@ snapshots: dependencies: react: 18.3.1 - react-hot-toast@2.4.1(csstype@3.1.3)(react-dom@18.3.1)(react@18.3.1): + react-hot-toast@2.4.1(csstype@3.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: goober: 2.1.14(csstype@3.1.3) react: 18.3.1 @@ -5889,20 +5908,21 @@ snapshots: react-is@18.3.1: {} - react-redux@8.1.3(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)(redux@4.2.1): + react-redux@8.1.3(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(redux@4.2.1): dependencies: '@babel/runtime': 7.24.7 '@types/hoist-non-react-statics': 3.3.5 - '@types/react': 18.3.3 '@types/use-sync-external-store': 0.0.3 hoist-non-react-statics: 3.3.2 react: 18.3.1 - react-dom: 18.3.1(react@18.3.1) react-is: 18.3.1 - redux: 4.2.1 use-sync-external-store: 1.2.2(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.3 + react-dom: 18.3.1(react@18.3.1) + redux: 4.2.1 - react-transition-group@4.4.5(react-dom@18.3.1)(react@18.3.1): + react-transition-group@4.4.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.24.7 dom-helpers: 5.2.1 @@ -5916,7 +5936,7 @@ snapshots: react: 18.3.1 tslib: 2.6.3 - react-use@17.5.0(react-dom@18.3.1)(react@18.3.1): + react-use@17.5.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@types/js-cookie': 2.2.7 '@xobotyi/scrollbar-width': 1.9.5 @@ -5924,7 +5944,7 @@ snapshots: fast-deep-equal: 3.1.3 fast-shallow-equal: 1.0.0 js-cookie: 2.2.1 - nano-css: 5.6.1(react-dom@18.3.1)(react@18.3.1) + nano-css: 5.6.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) react-universal-interface: 0.6.2(react@18.3.1)(tslib@2.6.3) From 65d12b2f354b6649c7715402ccc54340c41491aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:55:27 +0800 Subject: [PATCH 009/123] build(deps): bump axios from 1.7.2 to 1.7.4 in /web/web (#5247) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [axios](https://github.com/axios/axios) from 1.7.2 to 1.7.4.
Release notes

Sourced from axios's releases.

Release v1.7.4

Release notes:

Bug Fixes

Contributors to this release

Release v1.7.3

Release notes:

Bug Fixes

  • adapter: fix progress event emitting; (#6518) (e3c76fc)
  • fetch: fix withCredentials request config (#6505) (85d4d0e)
  • xhr: return original config on errors from XHR adapter (#6515) (8966ee7)

Contributors to this release

Changelog

Sourced from axios's changelog.

1.7.4 (2024-08-13)

Bug Fixes

Contributors to this release

1.7.3 (2024-08-01)

Bug Fixes

  • adapter: fix progress event emitting; (#6518) (e3c76fc)
  • fetch: fix withCredentials request config (#6505) (85d4d0e)
  • xhr: return original config on errors from XHR adapter (#6515) (8966ee7)

Contributors to this release

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=axios&package-manager=npm_and_yarn&previous-version=1.7.2&new-version=1.7.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/apache/gravitino/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/web/package.json | 2 +- web/web/pnpm-lock.yaml | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/web/web/package.json b/web/web/package.json index 816caf8059e..471844e2170 100644 --- a/web/web/package.json +++ b/web/web/package.json @@ -28,7 +28,7 @@ "@mui/x-tree-view": "^6.17.0", "@reduxjs/toolkit": "^1.9.7", "antd": "^5.19.0", - "axios": "^1.7.2", + "axios": "^1.7.4", "chroma-js": "^2.4.2", "clsx": "^2.1.1", "dayjs": "^1.11.11", diff --git a/web/web/pnpm-lock.yaml b/web/web/pnpm-lock.yaml index 4c3c1fa8689..2eac1ce688f 100644 --- a/web/web/pnpm-lock.yaml +++ b/web/web/pnpm-lock.yaml @@ -42,8 +42,8 @@ importers: specifier: ^5.19.0 version: 5.19.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) axios: - specifier: ^1.7.2 - version: 1.7.2 + specifier: ^1.7.4 + version: 1.7.4 chroma-js: specifier: ^2.4.2 version: 2.4.2 @@ -1034,8 +1034,8 @@ packages: resolution: {integrity: sha512-QbUdXJVTpvUTHU7871ppZkdOLBeGUKBQWHkHrvN2V9IQWGMt61zf3B45BtzjxEJzYuj0JBjBZP/hmYS/R9pmAw==} engines: {node: '>=4'} - axios@1.7.2: - resolution: {integrity: sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw==} + axios@1.7.4: + resolution: {integrity: sha512-DukmaFRnY6AzAALSH4J2M3k6PkaC+MfaAGdEERRWcC9q3/TWQwLpHR8ZRLKTdQ3aBDL64EdluRDjJqKw+BPZEw==} axobject-query@3.1.1: resolution: {integrity: sha512-goKlv8DZrK9hUh975fnHzhNIO4jUnFCfv/dszV5VwUGDFjI6vQ2VwoyjYjYNEbBE8AH87TduWP5uyDR1D+Iteg==} @@ -3443,7 +3443,7 @@ snapshots: '@iconify/types': 2.0.0 '@iconify/utils': 2.1.25 '@types/tar': 6.1.13 - axios: 1.7.2 + axios: 1.7.4 cheerio: 1.0.0-rc.12 extract-zip: 2.0.1 local-pkg: 0.5.0 @@ -4076,7 +4076,7 @@ snapshots: axe-core@4.9.1: {} - axios@1.7.2: + axios@1.7.4: dependencies: follow-redirects: 1.15.6 form-data: 4.0.0 From 5b2f823e21399bbf1d2a514f2558dabc4fbb6340 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 12:04:47 +0800 Subject: [PATCH 010/123] build(deps): bump fast-loops from 1.1.3 to 1.1.4 in /web/web (#5248) Bumps [fast-loops](https://github.com/robinweser/fast-loops) from 1.1.3 to 1.1.4.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=fast-loops&package-manager=npm_and_yarn&previous-version=1.1.3&new-version=1.1.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/apache/gravitino/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/web/pnpm-lock.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/web/web/pnpm-lock.yaml b/web/web/pnpm-lock.yaml index 2eac1ce688f..d085a9181cc 100644 --- a/web/web/pnpm-lock.yaml +++ b/web/web/pnpm-lock.yaml @@ -1518,8 +1518,8 @@ packages: fast-levenshtein@2.0.6: resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} - fast-loops@1.1.3: - resolution: {integrity: sha512-8EZzEP0eKkEEVX+drtd9mtuQ+/QrlfW/5MlwcwK5Nds6EkZ/tRzEexkzUY2mIssnAyVLT+TKHuRXmFNNXYUd6g==} + fast-loops@1.1.4: + resolution: {integrity: sha512-8dbd3XWoKCTms18ize6JmQF1SFnnfj5s0B7rRry22EofgMu7B6LKHVh+XfFqFGsqnbH54xgeO83PzpKI+ODhlg==} fast-shallow-equal@1.0.0: resolution: {integrity: sha512-HPtaa38cPgWvaCFmRNhlc6NG7pv6NUHqjPgVAkWGoB9mQMwYB27/K0CvOM5Czy+qpT3e8XJ6Q4aPAnzpNpzNaw==} @@ -4788,7 +4788,7 @@ snapshots: fast-levenshtein@2.0.6: {} - fast-loops@1.1.3: {} + fast-loops@1.1.4: {} fast-shallow-equal@1.0.0: {} @@ -5019,7 +5019,7 @@ snapshots: inline-style-prefixer@7.0.0: dependencies: css-in-js-utils: 3.1.0 - fast-loops: 1.1.3 + fast-loops: 1.1.4 internal-slot@1.0.7: dependencies: From 9ecc87d0f01ec0f5b7a306d82431b5bed6673a51 Mon Sep 17 00:00:00 2001 From: Jerry Shao Date: Fri, 25 Oct 2024 13:21:31 +0800 Subject: [PATCH 011/123] [#4817] fix(core): Fix unexpected tag delete issue (#5235) ### What changes were proposed in this pull request? This PR fixes the tag relation unexpected deletion issue. Previously we deleted other unrelated tag relations under the metalake, which will make the associated tag list fail. So this PR will fix this issue. ### Why are the changes needed? To fix the tag unexpectedly deleted issue. Fix: #4817 ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Add new IT to fix this issue. --- .../client/integration/test/TagIT.java | 39 +++++++++++++++++++ .../TagMetadataObjectRelBaseSQLProvider.java | 2 +- ...agMetadataObjectRelPostgreSQLProvider.java | 2 +- 3 files changed, 41 insertions(+), 2 deletions(-) diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java index 847b6253ff4..bd95f2ae34e 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java @@ -606,4 +606,43 @@ public void testAssociateTagsToColumn() { Assertions.assertEquals(1, tag4.associatedObjects().count()); Assertions.assertEquals(column.name(), tag4.associatedObjects().objects()[0].name()); } + + @Test + public void testAssociateAndDeleteTags() { + Tag tag1 = + metalake.createTag( + GravitinoITUtils.genRandomName("tag_it_tag1"), "comment1", Collections.emptyMap()); + Tag tag2 = + metalake.createTag( + GravitinoITUtils.genRandomName("tag_it_tag2"), "comment2", Collections.emptyMap()); + Tag tag3 = + metalake.createTag( + GravitinoITUtils.genRandomName("tag_it_tag3"), "comment3", Collections.emptyMap()); + + String[] associatedTags = + relationalCatalog + .supportsTags() + .associateTags(new String[] {tag1.name(), tag2.name()}, new String[] {tag3.name()}); + + Assertions.assertEquals(2, associatedTags.length); + Set tagNames = Sets.newHashSet(associatedTags); + Assertions.assertTrue(tagNames.contains(tag1.name())); + Assertions.assertTrue(tagNames.contains(tag2.name())); + Assertions.assertFalse(tagNames.contains(tag3.name())); + + Tag retrivedTag = relationalCatalog.supportsTags().getTag(tag2.name()); + Assertions.assertEquals(tag2.name(), retrivedTag.name()); + Assertions.assertEquals(tag2.comment(), retrivedTag.comment()); + + boolean deleted = metalake.deleteTag("null"); + Assertions.assertFalse(deleted); + + deleted = metalake.deleteTag(tag1.name()); + Assertions.assertTrue(deleted); + deleted = metalake.deleteTag(tag1.name()); + Assertions.assertFalse(deleted); + + String[] associatedTags1 = relationalCatalog.supportsTags().listTags(); + Assertions.assertArrayEquals(new String[] {tag2.name()}, associatedTags1); + } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TagMetadataObjectRelBaseSQLProvider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TagMetadataObjectRelBaseSQLProvider.java index 2124dcbf917..5a9b066a006 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TagMetadataObjectRelBaseSQLProvider.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/TagMetadataObjectRelBaseSQLProvider.java @@ -133,7 +133,7 @@ public String softDeleteTagMetadataObjectRelsByMetalakeAndTagName( + " tm WHERE tm.metalake_id IN (SELECT mm.metalake_id FROM " + MetalakeMetaMapper.TABLE_NAME + " mm WHERE mm.metalake_name = #{metalakeName} AND mm.deleted_at = 0)" - + " AND tm.deleted_at = 0) AND te.deleted_at = 0"; + + " AND tm.tag_name = #{tagName} AND tm.deleted_at = 0) AND te.deleted_at = 0"; } public String softDeleteTagMetadataObjectRelsByMetalakeId(@Param("metalakeId") Long metalakeId) { diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/TagMetadataObjectRelPostgreSQLProvider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/TagMetadataObjectRelPostgreSQLProvider.java index a4feda63069..ee45f465f7f 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/TagMetadataObjectRelPostgreSQLProvider.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/TagMetadataObjectRelPostgreSQLProvider.java @@ -38,7 +38,7 @@ public String softDeleteTagMetadataObjectRelsByMetalakeAndTagName( + " tm WHERE tm.metalake_id IN (SELECT mm.metalake_id FROM " + MetalakeMetaMapper.TABLE_NAME + " mm WHERE mm.metalake_name = #{metalakeName} AND mm.deleted_at = 0)" - + " AND tm.deleted_at = 0) AND te.deleted_at = 0"; + + " AND tm.tag_name = #{tagName} AND tm.deleted_at = 0) AND te.deleted_at = 0"; } @Override From 5ec0d5412cb66b2c9484d93f590731d9932cf490 Mon Sep 17 00:00:00 2001 From: mchades Date: Fri, 25 Oct 2024 14:04:40 +0800 Subject: [PATCH 012/123] [#5252] fix(catalog): fix disable catalog miss invalidate cache (#5253) ### What changes were proposed in this pull request? fix disable catalog miss invalidate cache ### Why are the changes needed? Fix: #5252 ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? tests added --- .../gravitino/client/integration/test/CatalogIT.java | 7 +++++++ .../java/org/apache/gravitino/catalog/CatalogManager.java | 2 ++ 2 files changed, 9 insertions(+) diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java index 5360f9f7816..045c0ad694f 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java @@ -163,6 +163,7 @@ public void testCatalogAvailable() { catalogName, Catalog.Type.FILESET, "hadoop", "catalog comment", ImmutableMap.of()); Assertions.assertEquals("true", catalog.properties().get(PROPERTY_IN_USE)); + // test in-use and can't drop Exception exception = Assertions.assertThrows( CatalogInUseException.class, () -> metalake.dropCatalog(catalogName)); @@ -170,10 +171,16 @@ public void testCatalogAvailable() { exception.getMessage().contains("please disable it first or use force option"), exception.getMessage()); + // test disable and enable again Assertions.assertDoesNotThrow(() -> metalake.disableCatalog(catalogName)); Catalog loadedCatalog = metalake.loadCatalog(catalogName); Assertions.assertEquals("false", loadedCatalog.properties().get(PROPERTY_IN_USE)); + Assertions.assertDoesNotThrow(() -> metalake.enableCatalog(catalogName)); + loadedCatalog = metalake.loadCatalog(catalogName); + Assertions.assertEquals("true", loadedCatalog.properties().get(PROPERTY_IN_USE)); + + Assertions.assertDoesNotThrow(() -> metalake.disableCatalog(catalogName)); exception = Assertions.assertThrows( CatalogNotInUseException.class, diff --git a/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java b/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java index ed300e45c7e..959f91f5c9e 100644 --- a/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java +++ b/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java @@ -513,6 +513,8 @@ public void enableCatalog(NameIdentifier ident) return newCatalogBuilder.build(); }); + catalogCache.invalidate(ident); + } catch (IOException e) { throw new RuntimeException(e); } From cf09f9ab0a93bb75626daa8c098c47cdfb545e1e Mon Sep 17 00:00:00 2001 From: FANNG Date: Fri, 25 Oct 2024 14:16:51 +0800 Subject: [PATCH 013/123] [#5068] feat(core): support GCS token provider (#5224) ### What changes were proposed in this pull request? support GCS token provider ### Why are the changes needed? Fix: #5068 ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? add IT and run with real google acount --- LICENSE.bin | 3 + .../credential/GCSTokenCredential.java | 73 ++++++ bundles/gcp-bundle/build.gradle.kts | 11 +- .../gcs/credential/GCSTokenProvider.java | 218 ++++++++++++++++++ ...he.gravitino.credential.CredentialProvider | 19 ++ .../services/org.apache.hadoop.fs.FileSystem | 20 ++ .../credential/CredentialConstants.java | 2 + .../credential/CredentialPropertyUtils.java | 25 +- .../config/GCSCredentialConfig.java | 51 ++++ gradle/libs.versions.toml | 5 + iceberg/iceberg-rest-server/build.gradle.kts | 2 + .../integration/test/IcebergRESTGCSIT.java | 108 +++++++++ .../test/IcebergRESTJdbcCatalogIT.java | 24 +- .../test/IcebergRESTServiceBaseIT.java | 56 ++++- .../integration/test/util/BaseIT.java | 37 +-- .../integration/test/util/ITUtils.java | 16 ++ settings.gradle.kts | 3 +- 17 files changed, 634 insertions(+), 39 deletions(-) create mode 100644 api/src/main/java/org/apache/gravitino/credential/GCSTokenCredential.java create mode 100644 bundles/gcp-bundle/src/main/java/org/apache/gravitino/gcs/credential/GCSTokenProvider.java create mode 100644 bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.gravitino.credential.CredentialProvider create mode 100644 bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem create mode 100644 core/src/main/java/org/apache/gravitino/credential/config/GCSCredentialConfig.java create mode 100644 iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTGCSIT.java diff --git a/LICENSE.bin b/LICENSE.bin index e922f936771..1bdb9864d2f 100644 --- a/LICENSE.bin +++ b/LICENSE.bin @@ -306,6 +306,7 @@ Apache Iceberg core Apache Iceberg Hive metastore Apache Iceberg GCP + Apache Iceberg GCP bundle Apache Ivy Apache Log4j 1.x Compatibility API Apache Log4j API @@ -398,6 +399,8 @@ RE2/J ZSTD JNI fsspec + Google auth HTTP + Google auth Credentials This product bundles various third-party components also under the MIT license diff --git a/api/src/main/java/org/apache/gravitino/credential/GCSTokenCredential.java b/api/src/main/java/org/apache/gravitino/credential/GCSTokenCredential.java new file mode 100644 index 00000000000..98186e2dea7 --- /dev/null +++ b/api/src/main/java/org/apache/gravitino/credential/GCSTokenCredential.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.credential; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.Map; +import org.apache.commons.lang3.StringUtils; + +/** The GCS token credential to access GCS. */ +public class GCSTokenCredential implements Credential { + + /** GCS credential type. */ + public static final String GCS_TOKEN_CREDENTIAL_TYPE = "gcs-token"; + + /** GCS credential property, token name. */ + public static final String GCS_TOKEN_NAME = "token"; + + private String token; + private long expireMs; + + /** + * @param token The GCS token. + * @param expireMs The GCS token expire time at ms. + */ + public GCSTokenCredential(String token, long expireMs) { + Preconditions.checkArgument( + StringUtils.isNotBlank(token), "GCS session token should not be null"); + this.token = token; + this.expireMs = expireMs; + } + + @Override + public String credentialType() { + return GCS_TOKEN_CREDENTIAL_TYPE; + } + + @Override + public long expireTimeInMs() { + return expireMs; + } + + @Override + public Map credentialInfo() { + return (new ImmutableMap.Builder()).put(GCS_TOKEN_NAME, token).build(); + } + + /** + * Get GCS token. + * + * @return The GCS token. + */ + public String token() { + return token; + } +} diff --git a/bundles/gcp-bundle/build.gradle.kts b/bundles/gcp-bundle/build.gradle.kts index 6b373578c9d..e69ff345ea8 100644 --- a/bundles/gcp-bundle/build.gradle.kts +++ b/bundles/gcp-bundle/build.gradle.kts @@ -25,9 +25,17 @@ plugins { } dependencies { + compileOnly(project(":api")) + compileOnly(project(":core")) + compileOnly(project(":catalogs:catalog-common")) compileOnly(project(":catalogs:catalog-hadoop")) + compileOnly(libs.hadoop3.common) + + implementation(libs.commons.lang3) implementation(libs.hadoop3.gcs) + implementation(libs.google.auth.http) + implementation(libs.google.auth.credentials) } tasks.withType(ShadowJar::class.java) { @@ -38,8 +46,7 @@ tasks.withType(ShadowJar::class.java) { // Relocate dependencies to avoid conflicts relocate("org.apache.httpcomponents", "org.apache.gravitino.shaded.org.apache.httpcomponents") relocate("org.apache.commons", "org.apache.gravitino.shaded.org.apache.commons") - relocate("com.google.guava", "org.apache.gravitino.shaded.com.google.guava") - relocate("com.google.code", "org.apache.gravitino.shaded.com.google.code") + relocate("com.google", "org.apache.gravitino.shaded.com.google") } tasks.jar { diff --git a/bundles/gcp-bundle/src/main/java/org/apache/gravitino/gcs/credential/GCSTokenProvider.java b/bundles/gcp-bundle/src/main/java/org/apache/gravitino/gcs/credential/GCSTokenProvider.java new file mode 100644 index 00000000000..94234b2d98e --- /dev/null +++ b/bundles/gcp-bundle/src/main/java/org/apache/gravitino/gcs/credential/GCSTokenProvider.java @@ -0,0 +1,218 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.gcs.credential; + +import com.google.auth.oauth2.AccessToken; +import com.google.auth.oauth2.CredentialAccessBoundary; +import com.google.auth.oauth2.CredentialAccessBoundary.AccessBoundaryRule; +import com.google.auth.oauth2.DownscopedCredentials; +import com.google.auth.oauth2.GoogleCredentials; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Stream; +import org.apache.commons.lang3.StringUtils; +import org.apache.gravitino.credential.Credential; +import org.apache.gravitino.credential.CredentialConstants; +import org.apache.gravitino.credential.CredentialContext; +import org.apache.gravitino.credential.CredentialProvider; +import org.apache.gravitino.credential.GCSTokenCredential; +import org.apache.gravitino.credential.PathBasedCredentialContext; +import org.apache.gravitino.credential.config.GCSCredentialConfig; + +/** Generate GCS access token according to the read and write paths. */ +public class GCSTokenProvider implements CredentialProvider { + + private static final String INITIAL_SCOPE = "https://www.googleapis.com/auth/cloud-platform"; + + private GoogleCredentials sourceCredentials; + + @Override + public void initialize(Map properties) { + GCSCredentialConfig gcsCredentialConfig = new GCSCredentialConfig(properties); + try { + this.sourceCredentials = + getSourceCredentials(gcsCredentialConfig).createScoped(INITIAL_SCOPE); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void close() {} + + @Override + public String credentialType() { + return CredentialConstants.GCS_TOKEN_CREDENTIAL_PROVIDER_TYPE; + } + + @Override + public Credential getCredential(CredentialContext context) { + if (!(context instanceof PathBasedCredentialContext)) { + return null; + } + PathBasedCredentialContext pathBasedCredentialContext = (PathBasedCredentialContext) context; + try { + AccessToken accessToken = + getToken( + pathBasedCredentialContext.getReadPaths(), + pathBasedCredentialContext.getWritePaths()); + String tokenValue = accessToken.getTokenValue(); + long expireTime = accessToken.getExpirationTime().toInstant().toEpochMilli(); + return new GCSTokenCredential(tokenValue, expireTime); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private AccessToken getToken(Set readLocations, Set writeLocations) + throws IOException { + DownscopedCredentials downscopedCredentials = + DownscopedCredentials.newBuilder() + .setSourceCredential(sourceCredentials) + .setCredentialAccessBoundary(getAccessBoundary(readLocations, writeLocations)) + .build(); + return downscopedCredentials.refreshAccessToken(); + } + + private CredentialAccessBoundary getAccessBoundary( + Set readLocations, Set writeLocations) { + // bucketName -> read resource expressions + Map> readExpressions = new HashMap<>(); + // bucketName -> write resource expressions + Map> writeExpressions = new HashMap<>(); + + // Construct read and write resource expressions + HashSet readBuckets = new HashSet<>(); + HashSet writeBuckets = new HashSet<>(); + Stream.concat(readLocations.stream(), writeLocations.stream()) + .distinct() + .forEach( + location -> { + URI uri = URI.create(location); + String bucketName = getBucketName(uri); + readBuckets.add(bucketName); + String resourcePath = uri.getPath().substring(1); + List resourceExpressions = + readExpressions.computeIfAbsent(bucketName, key -> new ArrayList<>()); + // add read privilege + resourceExpressions.add( + String.format( + "resource.name.startsWith('projects/_/buckets/%s/objects/%s')", + bucketName, resourcePath)); + // add list privilege + resourceExpressions.add( + String.format( + "api.getAttribute('storage.googleapis.com/objectListPrefix', '').startsWith('%s')", + resourcePath)); + if (writeLocations.contains(location)) { + writeBuckets.add(bucketName); + resourceExpressions = + writeExpressions.computeIfAbsent(bucketName, key -> new ArrayList<>()); + // add write privilege + resourceExpressions.add( + String.format( + "resource.name.startsWith('projects/_/buckets/%s/objects/%s')", + bucketName, resourcePath)); + } + }); + + // Construct policy according to the resource expression and privilege. + CredentialAccessBoundary.Builder credentialAccessBoundaryBuilder = + CredentialAccessBoundary.newBuilder(); + readBuckets.forEach( + bucket -> { + List readConditions = readExpressions.get(bucket); + AccessBoundaryRule rule = + getAccessBoundaryRule( + bucket, + readConditions, + Arrays.asList( + "inRole:roles/storage.legacyObjectReader", + "inRole:roles/storage.objectViewer")); + if (rule == null) { + return; + } + credentialAccessBoundaryBuilder.addRule(rule); + }); + + writeBuckets.forEach( + bucket -> { + List writeConditions = writeExpressions.get(bucket); + AccessBoundaryRule rule = + getAccessBoundaryRule( + bucket, + writeConditions, + Arrays.asList("inRole:roles/storage.legacyBucketWriter")); + if (rule == null) { + return; + } + credentialAccessBoundaryBuilder.addRule(rule); + }); + + return credentialAccessBoundaryBuilder.build(); + } + + private AccessBoundaryRule getAccessBoundaryRule( + String bucketName, List resourceExpression, List permissions) { + if (resourceExpression == null || resourceExpression.isEmpty()) { + return null; + } + CredentialAccessBoundary.AccessBoundaryRule.Builder builder = + CredentialAccessBoundary.AccessBoundaryRule.newBuilder(); + builder.setAvailableResource(toGCSBucketResource(bucketName)); + builder.setAvailabilityCondition( + CredentialAccessBoundary.AccessBoundaryRule.AvailabilityCondition.newBuilder() + .setExpression(String.join(" || ", resourceExpression)) + .build()); + builder.setAvailablePermissions(permissions); + return builder.build(); + } + + private static String toGCSBucketResource(String bucketName) { + return "//storage.googleapis.com/projects/_/buckets/" + bucketName; + } + + private static String getBucketName(URI uri) { + return uri.getHost(); + } + + private GoogleCredentials getSourceCredentials(GCSCredentialConfig gcsCredentialConfig) + throws IOException { + String gcsCredentialFilePath = gcsCredentialConfig.gcsCredentialFilePath(); + if (StringUtils.isBlank(gcsCredentialFilePath)) { + return GoogleCredentials.getApplicationDefault(); + } else { + File credentialsFile = new File(gcsCredentialFilePath); + if (!credentialsFile.exists()) { + throw new IOException("GCS credential file does not exist." + gcsCredentialFilePath); + } + return GoogleCredentials.fromStream(new FileInputStream(credentialsFile)); + } + } +} diff --git a/bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.gravitino.credential.CredentialProvider b/bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.gravitino.credential.CredentialProvider new file mode 100644 index 00000000000..69510490549 --- /dev/null +++ b/bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.gravitino.credential.CredentialProvider @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +org.apache.gravitino.gcs.credential.GCSTokenProvider diff --git a/bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem new file mode 100644 index 00000000000..e67410de7b3 --- /dev/null +++ b/bundles/gcp-bundle/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +org.apache.gravitino.shaded.com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystem diff --git a/catalogs/catalog-common/src/main/java/org/apache/gravitino/credential/CredentialConstants.java b/catalogs/catalog-common/src/main/java/org/apache/gravitino/credential/CredentialConstants.java index 596268395e3..a141b637eba 100644 --- a/catalogs/catalog-common/src/main/java/org/apache/gravitino/credential/CredentialConstants.java +++ b/catalogs/catalog-common/src/main/java/org/apache/gravitino/credential/CredentialConstants.java @@ -22,5 +22,7 @@ public class CredentialConstants { public static final String CREDENTIAL_PROVIDER_TYPE = "credential-provider-type"; + public static final String GCS_TOKEN_CREDENTIAL_PROVIDER_TYPE = "gcs-token"; + private CredentialConstants() {} } diff --git a/common/src/main/java/org/apache/gravitino/credential/CredentialPropertyUtils.java b/common/src/main/java/org/apache/gravitino/credential/CredentialPropertyUtils.java index 255e54fbf3d..e380cc5d44b 100644 --- a/common/src/main/java/org/apache/gravitino/credential/CredentialPropertyUtils.java +++ b/common/src/main/java/org/apache/gravitino/credential/CredentialPropertyUtils.java @@ -19,12 +19,17 @@ package org.apache.gravitino.credential; +import com.google.common.collect.ImmutableMap; +import java.util.HashMap; import java.util.Map; /** * Helper class to generate specific credential properties for different table format and engine. */ public class CredentialPropertyUtils { + private static Map icebergCredentialPropertyMap = + ImmutableMap.of(GCSTokenCredential.GCS_TOKEN_NAME, "gcs.oauth2.token"); + /** * Transforms a specific credential into a map of Iceberg properties. * @@ -32,7 +37,25 @@ public class CredentialPropertyUtils { * @return a map of Iceberg properties derived from the credential */ public static Map toIcebergProperties(Credential credential) { - // todo: transform specific credential to iceberg properties + if (credential instanceof GCSTokenCredential) { + Map icebergGCSCredentialProperties = + transformProperties(credential.credentialInfo(), icebergCredentialPropertyMap); + icebergGCSCredentialProperties.put( + "gcs.oauth2.token-expires-at", String.valueOf(credential.expireTimeInMs())); + return icebergGCSCredentialProperties; + } return credential.toProperties(); } + + private static Map transformProperties( + Map originProperties, Map transformMap) { + HashMap properties = new HashMap(); + originProperties.forEach( + (k, v) -> { + if (transformMap.containsKey(k)) { + properties.put(transformMap.get(k), v); + } + }); + return properties; + } } diff --git a/core/src/main/java/org/apache/gravitino/credential/config/GCSCredentialConfig.java b/core/src/main/java/org/apache/gravitino/credential/config/GCSCredentialConfig.java new file mode 100644 index 00000000000..1a2b38ef641 --- /dev/null +++ b/core/src/main/java/org/apache/gravitino/credential/config/GCSCredentialConfig.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.credential.config; + +import com.google.common.annotations.VisibleForTesting; +import java.util.Map; +import javax.annotation.Nullable; +import org.apache.gravitino.Config; +import org.apache.gravitino.config.ConfigBuilder; +import org.apache.gravitino.config.ConfigConstants; +import org.apache.gravitino.config.ConfigEntry; + +public class GCSCredentialConfig extends Config { + + @VisibleForTesting + public static final String GRAVITINO_GCS_CREDENTIAL_FILE_PATH = "gcs-credential-file-path"; + + public static final ConfigEntry GCS_CREDENTIAL_FILE_PATH = + new ConfigBuilder(GRAVITINO_GCS_CREDENTIAL_FILE_PATH) + .doc("The path of GCS credential file") + .version(ConfigConstants.VERSION_0_7_0) + .stringConf() + .create(); + + public GCSCredentialConfig(Map properties) { + super(false); + loadFromMap(properties, k -> true); + } + + @Nullable + public String gcsCredentialFilePath() { + return this.get(GCS_CREDENTIAL_FILE_PATH); + } +} diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 472be136cd2..830fe5e747c 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -107,6 +107,7 @@ datanucleus-api-jdo = "4.2.4" datanucleus-rdbms = "4.1.19" datanucleus-jdo = "3.2.0-m3" hudi = "0.15.0" +google-auth = "1.28.0" [libraries] protobuf-java = { group = "com.google.protobuf", name = "protobuf-java", version.ref = "protoc" } @@ -180,6 +181,7 @@ iceberg-core = { group = "org.apache.iceberg", name = "iceberg-core", version.re iceberg-api = { group = "org.apache.iceberg", name = "iceberg-api", version.ref = "iceberg" } iceberg-hive-metastore = { group = "org.apache.iceberg", name = "iceberg-hive-metastore", version.ref = "iceberg" } iceberg-gcp = { group = "org.apache.iceberg", name = "iceberg-gcp", version.ref = "iceberg" } +iceberg-gcp-bundle = { group = "org.apache.iceberg", name = "iceberg-gcp-bundle", version.ref = "iceberg" } paimon-core = { group = "org.apache.paimon", name = "paimon-core", version.ref = "paimon" } paimon-format = { group = "org.apache.paimon", name = "paimon-format", version.ref = "paimon" } paimon-hive-catalog = { group = "org.apache.paimon", name = "paimon-hive-catalog", version.ref = "paimon" } @@ -246,6 +248,9 @@ mail = { group = "javax.mail", name = "mail", version.ref = "mail" } rome = { group = "rome", name = "rome", version.ref = "rome" } jettison = { group = "org.codehaus.jettison", name = "jettison", version.ref = "jettison" } +google-auth-http = { group = "com.google.auth", name = "google-auth-library-oauth2-http", version.ref = "google-auth" } +google-auth-credentials = { group = "com.google.auth", name = "google-auth-library-credentials", version.ref = "google-auth" } + [bundles] log4j = ["slf4j-api", "log4j-slf4j2-impl", "log4j-api", "log4j-core", "log4j-12-api"] jetty = ["jetty-server", "jetty-servlet", "jetty-webapp", "jetty-servlets"] diff --git a/iceberg/iceberg-rest-server/build.gradle.kts b/iceberg/iceberg-rest-server/build.gradle.kts index 594e6d04208..f088ce2926d 100644 --- a/iceberg/iceberg-rest-server/build.gradle.kts +++ b/iceberg/iceberg-rest-server/build.gradle.kts @@ -63,6 +63,7 @@ dependencies { compileOnly(libs.lombok) + testImplementation(project(":bundles:gcp-bundle", configuration = "shadow")) testImplementation(project(":integration-test-common", "testArtifacts")) testImplementation("org.scala-lang.modules:scala-collection-compat_$scalaVersion:$scalaCollectionCompatVersion") @@ -75,6 +76,7 @@ dependencies { exclude("org.rocksdb") } + testImplementation(libs.iceberg.gcp.bundle) testImplementation(libs.jersey.test.framework.core) { exclude(group = "org.junit.jupiter") } diff --git a/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTGCSIT.java b/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTGCSIT.java new file mode 100644 index 00000000000..89f56c51774 --- /dev/null +++ b/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTGCSIT.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.iceberg.integration.test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import org.apache.gravitino.catalog.lakehouse.iceberg.IcebergConstants; +import org.apache.gravitino.credential.CredentialConstants; +import org.apache.gravitino.credential.config.GCSCredentialConfig; +import org.apache.gravitino.iceberg.common.IcebergConfig; +import org.apache.gravitino.integration.test.util.BaseIT; +import org.apache.gravitino.integration.test.util.DownloaderUtils; +import org.apache.gravitino.integration.test.util.ITUtils; +import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; + +// You should export GRAVITINO_GCS_BUCKET and GOOGLE_APPLICATION_CREDENTIALS to run the test +@EnabledIfEnvironmentVariable(named = "GRAVITINO_TEST_CLOUD_IT", matches = "true") +public class IcebergRESTGCSIT extends IcebergRESTJdbcCatalogIT { + private String gcsWarehouse; + private String gcsCredentialPath; + + @Override + void initEnv() { + this.gcsWarehouse = + String.format("gs://%s/test", getFromEnvOrDefault("GRAVITINO_GCS_BUCKET", "bucketName")); + this.gcsCredentialPath = + getFromEnvOrDefault("GOOGLE_APPLICATION_CREDENTIALS", "credential.json"); + if (ITUtils.isEmbedded()) { + return; + } + + try { + downloadIcebergBundleJar(); + } catch (IOException e) { + throw new RuntimeException(e); + } + copyGCSBundleJar(); + } + + @Override + public Map getCatalogConfig() { + HashMap m = new HashMap(); + m.putAll(getCatalogJdbcConfig()); + m.putAll(getGCSConfig()); + return m; + } + + public boolean supportsCredentialVending() { + return true; + } + + private Map getGCSConfig() { + Map configMap = new HashMap(); + + configMap.put( + IcebergConfig.ICEBERG_CONFIG_PREFIX + CredentialConstants.CREDENTIAL_PROVIDER_TYPE, + CredentialConstants.GCS_TOKEN_CREDENTIAL_PROVIDER_TYPE); + configMap.put( + IcebergConfig.ICEBERG_CONFIG_PREFIX + + GCSCredentialConfig.GRAVITINO_GCS_CREDENTIAL_FILE_PATH, + gcsCredentialPath); + configMap.put( + IcebergConfig.ICEBERG_CONFIG_PREFIX + IcebergConstants.IO_IMPL, + "org.apache.iceberg.gcp.gcs.GCSFileIO"); + configMap.put(IcebergConfig.ICEBERG_CONFIG_PREFIX + IcebergConstants.WAREHOUSE, gcsWarehouse); + return configMap; + } + + private void copyGCSBundleJar() { + String gravitinoHome = System.getenv("GRAVITINO_HOME"); + String targetDir = String.format("%s/iceberg-rest-server/libs/", gravitinoHome); + BaseIT.copyBundleJarsToDirectory("gcp-bundle", targetDir); + } + + private void downloadIcebergBundleJar() throws IOException { + String icebergBundleJarName = "iceberg-gcp-bundle-1.5.2.jar"; + String icebergBundleJarUri = + "https://repo1.maven.org/maven2/org/apache/iceberg/iceberg-gcp-bundle/1.5.2/" + + icebergBundleJarName; + String gravitinoHome = System.getenv("GRAVITINO_HOME"); + String targetDir = String.format("%s/iceberg-rest-server/libs/", gravitinoHome); + DownloaderUtils.downloadFile(icebergBundleJarUri, targetDir); + } + + private String getFromEnvOrDefault(String envVar, String defaultValue) { + String envValue = System.getenv(envVar); + return Optional.ofNullable(envValue).orElse(defaultValue); + } +} diff --git a/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTJdbcCatalogIT.java b/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTJdbcCatalogIT.java index d53f8022091..c235451f2ff 100644 --- a/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTJdbcCatalogIT.java +++ b/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTJdbcCatalogIT.java @@ -33,7 +33,9 @@ @Tag("gravitino-docker-test") @TestInstance(Lifecycle.PER_CLASS) public class IcebergRESTJdbcCatalogIT extends IcebergRESTServiceIT { + private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); + private boolean hiveStarted = false; public IcebergRESTJdbcCatalogIT() { catalogType = IcebergCatalogBackend.JDBC; @@ -42,9 +44,15 @@ public IcebergRESTJdbcCatalogIT() { @Override void initEnv() { containerSuite.startHiveContainer(); + hiveStarted = true; } + @Override public Map getCatalogConfig() { + return getCatalogJdbcConfig(); + } + + protected Map getCatalogJdbcConfig() { Map configMap = new HashMap<>(); configMap.put( @@ -70,13 +78,15 @@ public Map getCatalogConfig() { configMap.put(IcebergConfig.ICEBERG_CONFIG_PREFIX + "jdbc.schema-version", "V1"); - configMap.put( - IcebergConfig.ICEBERG_CONFIG_PREFIX + IcebergConfig.CATALOG_WAREHOUSE.getKey(), - GravitinoITUtils.genRandomName( - String.format( - "hdfs://%s:%d/user/hive/warehouse-jdbc-sqlite", - containerSuite.getHiveContainer().getContainerIpAddress(), - HiveContainer.HDFS_DEFAULTFS_PORT))); + if (hiveStarted) { + configMap.put( + IcebergConfig.ICEBERG_CONFIG_PREFIX + IcebergConfig.CATALOG_WAREHOUSE.getKey(), + GravitinoITUtils.genRandomName( + String.format( + "hdfs://%s:%d/user/hive/warehouse-jdbc-sqlite", + containerSuite.getHiveContainer().getContainerIpAddress(), + HiveContainer.HDFS_DEFAULTFS_PORT))); + } return configMap; } } diff --git a/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTServiceBaseIT.java b/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTServiceBaseIT.java index 0ba781cabd8..67e7a3b8fd8 100644 --- a/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTServiceBaseIT.java +++ b/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRESTServiceBaseIT.java @@ -20,6 +20,8 @@ import com.google.common.collect.ImmutableList; import com.google.errorprone.annotations.FormatMethod; +import java.io.File; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -27,11 +29,14 @@ import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.apache.gravitino.iceberg.common.IcebergCatalogBackend; import org.apache.gravitino.iceberg.common.IcebergConfig; import org.apache.gravitino.iceberg.integration.test.util.IcebergRESTServerManager; +import org.apache.gravitino.integration.test.util.ITUtils; import org.apache.gravitino.server.web.JettyServerConfig; +import org.apache.spark.SparkConf; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.jupiter.api.AfterAll; @@ -46,6 +51,7 @@ @SuppressWarnings("FormatStringAnnotation") public abstract class IcebergRESTServiceBaseIT { + public static final Logger LOG = LoggerFactory.getLogger(IcebergRESTServiceBaseIT.class); private SparkSession sparkSession; protected IcebergCatalogBackend catalogType = IcebergCatalogBackend.MEMORY; @@ -84,6 +90,31 @@ boolean isSupportsViewCatalog() { abstract Map getCatalogConfig(); + protected boolean supportsCredentialVending() { + return false; + } + + private void copyBundleJar(String bundleName) { + String bundleFileName = ITUtils.getBundleJarName(bundleName); + + String rootDir = System.getenv("GRAVITINO_ROOT_DIR"); + String sourceFile = + String.format("%s/bundles/gcp-bundle/build/libs/%s", rootDir, bundleFileName); + String gravitinoHome = System.getenv("GRAVITINO_HOME"); + String targetDir = String.format("%s/iceberg-rest-server/libs/", gravitinoHome); + String targetFile = String.format("%s/%s", targetDir, bundleFileName); + LOG.info("Source file: {}, target directory: {}", sourceFile, targetDir); + try { + File target = new File(targetFile); + if (!target.exists()) { + LOG.info("Copy source file: {} to target directory: {}", sourceFile, targetDir); + FileUtils.copyFileToDirectory(new File(sourceFile), new File(targetDir)); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + private void registerIcebergCatalogConfig() { Map icebergConfigs = getCatalogConfig(); icebergRESTServerManager.registerCustomConfigs(icebergConfigs); @@ -100,19 +131,24 @@ private int getServerPort() { private void initSparkEnv() { int port = getServerPort(); LOG.info("Iceberg REST server port:{}", port); - String IcebergRESTUri = String.format("http://127.0.0.1:%d/iceberg/", port); - sparkSession = - SparkSession.builder() - .master("local[1]") - .config( + String icebergRESTUri = String.format("http://127.0.0.1:%d/iceberg/", port); + SparkConf sparkConf = + new SparkConf() + .set( "spark.sql.extensions", "org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions") - .config("spark.sql.catalog.rest", "org.apache.iceberg.spark.SparkCatalog") - .config("spark.sql.catalog.rest.type", "rest") - .config("spark.sql.catalog.rest.uri", IcebergRESTUri) + .set("spark.sql.catalog.rest", "org.apache.iceberg.spark.SparkCatalog") + .set("spark.sql.catalog.rest.type", "rest") + .set("spark.sql.catalog.rest.uri", icebergRESTUri) // drop Iceberg table purge may hang in spark local mode - .config("spark.locality.wait.node", "0") - .getOrCreate(); + .set("spark.locality.wait.node", "0"); + + if (supportsCredentialVending()) { + sparkConf.set( + "spark.sql.catalog.rest.header.X-Iceberg-Access-Delegation", "vended-credentials"); + } + + sparkSession = SparkSession.builder().master("local[1]").config(sparkConf).getOrCreate(); } private void stopSparkEnv() { diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/BaseIT.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/BaseIT.java index 8bbb5a3b23f..e7ed483f2f5 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/BaseIT.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/BaseIT.java @@ -75,6 +75,7 @@ @ExtendWith({PrintFuncNameExtension.class, CloseContainerExtension.class}) @TestInstance(TestInstance.Lifecycle.PER_CLASS) public class BaseIT { + protected static final ContainerSuite containerSuite = ContainerSuite.getInstance(); private static final Logger LOG = LoggerFactory.getLogger(BaseIT.class); @@ -127,7 +128,9 @@ private void rewriteGravitinoServerConfig() throws IOException { originConfig = FileUtils.readFileToString(configPath.toFile(), StandardCharsets.UTF_8); } - if (customConfigs.isEmpty()) return; + if (customConfigs.isEmpty()) { + return; + } String tmpFileName = GravitinoServer.CONF_FILE + ".tmp"; Path tmpPath = Paths.get(gravitinoHome, "conf", tmpFileName); @@ -397,26 +400,26 @@ private static boolean isDeploy() { return Objects.equals(mode, ITUtils.DEPLOY_TEST_MODE); } - protected void copyBundleJarsToHadoop(String bundleName) { + public static void copyBundleJarsToDirectory(String bundleName, String directory) { + String bundleJarSourceFile = ITUtils.getBundleJarSourceFile(bundleName); + try { + DownloaderUtils.downloadFile(bundleJarSourceFile, directory); + } catch (Exception e) { + throw new RuntimeException( + String.format( + "Failed to copy the %s dependency jars: %s to %s", + bundleName, bundleJarSourceFile, directory), + e); + } + } + + protected static void copyBundleJarsToHadoop(String bundleName) { if (!isDeploy()) { return; } String gravitinoHome = System.getenv("GRAVITINO_HOME"); - String jarName = - String.format("gravitino-%s-%s.jar", bundleName, System.getenv("PROJECT_VERSION")); - String gcsJars = - ITUtils.joinPath( - gravitinoHome, "..", "..", "bundles", bundleName, "build", "libs", jarName); - gcsJars = "file://" + gcsJars; - try { - if (!ITUtils.EMBEDDED_TEST_MODE.equals(testMode)) { - String hadoopLibDirs = ITUtils.joinPath(gravitinoHome, "catalogs", "hadoop", "libs"); - DownloaderUtils.downloadFile(gcsJars, hadoopLibDirs); - } - } catch (Exception e) { - throw new RuntimeException( - String.format("Failed to copy the %s dependency jars: %s", bundleName, gcsJars), e); - } + String hadoopLibDirs = ITUtils.joinPath(gravitinoHome, "catalogs", "hadoop", "libs"); + copyBundleJarsToDirectory(bundleName, hadoopLibDirs); } } diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/ITUtils.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/ITUtils.java index 9a6d7b13010..d7c099dc7ac 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/ITUtils.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/ITUtils.java @@ -48,6 +48,7 @@ import org.junit.jupiter.api.Assertions; public class ITUtils { + public static final String TEST_MODE = "testMode"; public static final String EMBEDDED_TEST_MODE = "embedded"; public static final String DEPLOY_TEST_MODE = "deploy"; @@ -186,5 +187,20 @@ public static boolean isEmbedded() { return Objects.equals(mode, ITUtils.EMBEDDED_TEST_MODE); } + public static String getBundleJarSourceFile(String bundleName) { + String jarName = ITUtils.getBundleJarName(bundleName); + String gcsJars = ITUtils.joinPath(ITUtils.getBundleJarDirectory(bundleName), jarName); + return "file://" + gcsJars; + } + + public static String getBundleJarName(String bundleName) { + return String.format("gravitino-%s-%s.jar", bundleName, System.getenv("PROJECT_VERSION")); + } + + public static String getBundleJarDirectory(String bundleName) { + return ITUtils.joinPath( + System.getenv("GRAVITINO_ROOT_DIR"), "bundles", bundleName, "build", "libs"); + } + private ITUtils() {} } diff --git a/settings.gradle.kts b/settings.gradle.kts index 2eb340baad3..1f3efb49544 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -73,5 +73,4 @@ include("docs") include("integration-test-common") include(":bundles:aws-bundle") include(":bundles:gcp-bundle") -include("bundles:aliyun-bundle") -findProject(":bundles:aliyun-bundle")?.name = "aliyun-bundle" +include(":bundles:aliyun-bundle") From aedc52f6908267ec21e3c1acea2925c05f2e84bc Mon Sep 17 00:00:00 2001 From: yuanoOo Date: Fri, 25 Oct 2024 15:22:48 +0800 Subject: [PATCH 014/123] [#5227] feat(oceanbase-catalog): Support table operations for OceanBase JDBC catalog (#5228) ### What changes were proposed in this pull request? Support table operations for OceanBase JDBC catalog. ### Why are the changes needed? Fix: #5227 ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? Added unit test: `TestOceanBaseTableOperations` --- .../jdbc/operation/JdbcTableOperations.java | 103 +- .../doris/operation/DorisTableOperations.java | 32 - .../mysql/operation/MysqlTableOperations.java | 92 +- .../integration/test/CatalogMysqlIT.java | 4 +- .../operation/TestMysqlTableOperations.java | 4 +- .../operation/OceanBaseTableOperations.java | 600 ++++++++++- .../TestOceanBaseTableOperations.java | 997 ++++++++++++++++++ .../operation/PostgreSqlTableOperations.java | 31 +- 8 files changed, 1689 insertions(+), 174 deletions(-) create mode 100644 catalogs/catalog-jdbc-oceanbase/src/test/java/org/apache/gravitino/catalog/oceanbase/operation/TestOceanBaseTableOperations.java diff --git a/catalogs/catalog-jdbc-common/src/main/java/org/apache/gravitino/catalog/jdbc/operation/JdbcTableOperations.java b/catalogs/catalog-jdbc-common/src/main/java/org/apache/gravitino/catalog/jdbc/operation/JdbcTableOperations.java index e9b6bf6abc2..e9cd14cf3b1 100644 --- a/catalogs/catalog-jdbc-common/src/main/java/org/apache/gravitino/catalog/jdbc/operation/JdbcTableOperations.java +++ b/catalogs/catalog-jdbc-common/src/main/java/org/apache/gravitino/catalog/jdbc/operation/JdbcTableOperations.java @@ -18,6 +18,7 @@ */ package org.apache.gravitino.catalog.jdbc.operation; +import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -30,6 +31,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import javax.sql.DataSource; @@ -46,6 +48,7 @@ import org.apache.gravitino.exceptions.NoSuchTableException; import org.apache.gravitino.exceptions.TableAlreadyExistsException; import org.apache.gravitino.meta.AuditInfo; +import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.TableChange; import org.apache.gravitino.rel.expressions.Expression; import org.apache.gravitino.rel.expressions.distributions.Distribution; @@ -125,15 +128,20 @@ public boolean drop(String databaseName, String tableName) { return true; } + /** + * The default implementation of this method is based on MySQL, and if the catalog does not + * compatible with MySQL, this method needs to be rewritten. + */ @Override public List listTables(String databaseName) throws NoSuchSchemaException { - try (Connection connection = getConnection(databaseName)) { - final List names = Lists.newArrayList(); - try (ResultSet tables = getTables(connection)) { - while (tables.next()) { - if (Objects.equals(tables.getString("TABLE_SCHEM"), databaseName)) { - names.add(tables.getString("TABLE_NAME")); - } + + final List names = Lists.newArrayList(); + + try (Connection connection = getConnection(databaseName); + ResultSet tables = getTables(connection)) { + while (tables.next()) { + if (Objects.equals(tables.getString("TABLE_CAT"), databaseName)) { + names.add(tables.getString("TABLE_NAME")); } } LOG.info("Finished listing tables size {} for database name {} ", names.size(), databaseName); @@ -454,17 +462,35 @@ protected abstract String generateCreateTableSql( Distribution distribution, Index[] indexes); - protected abstract String generateRenameTableSql(String oldTableName, String newTableName); + /** + * The default implementation of this method is based on MySQL syntax, and if the catalog does not + * support MySQL syntax, this method needs to be rewritten. + */ + protected String generateRenameTableSql(String oldTableName, String newTableName) { + return String.format("RENAME TABLE `%s` TO `%s`", oldTableName, newTableName); + } - protected abstract String generateDropTableSql(String tableName); + /** + * The default implementation of this method is based on MySQL syntax, and if the catalog does not + * support MySQL syntax, this method needs to be rewritten. + */ + protected String generateDropTableSql(String tableName) { + return String.format("DROP TABLE `%s`", tableName); + } protected abstract String generatePurgeTableSql(String tableName); protected abstract String generateAlterTableSql( String databaseName, String tableName, TableChange... changes); - protected abstract JdbcTable getOrCreateTable( - String databaseName, String tableName, JdbcTable lazyLoadCreateTable); + /** + * The default implementation of this method is based on MySQL syntax, and if the catalog does not + * support MySQL syntax, this method needs to be rewritten. + */ + protected JdbcTable getOrCreateTable( + String databaseName, String tableName, JdbcTable lazyLoadCreateTable) { + return null != lazyLoadCreateTable ? lazyLoadCreateTable : load(databaseName, tableName); + } protected void validateUpdateColumnNullable( TableChange.UpdateColumnNullability change, JdbcTable table) { @@ -479,6 +505,61 @@ protected void validateUpdateColumnNullable( } } + /** + * The auto-increment column will be verified. There can only be one auto-increment column and it + * must be the primary key or unique index. + * + * @param columns jdbc column + * @param indexes table indexes + */ + protected static void validateIncrementCol(JdbcColumn[] columns, Index[] indexes) { + // Check auto increment column + List autoIncrementCols = + Arrays.stream(columns).filter(Column::autoIncrement).collect(Collectors.toList()); + String autoIncrementColsStr = + autoIncrementCols.stream().map(JdbcColumn::name).collect(Collectors.joining(",", "[", "]")); + Preconditions.checkArgument( + autoIncrementCols.size() <= 1, + "Only one column can be auto-incremented. There are multiple auto-increment columns in your table: " + + autoIncrementColsStr); + if (!autoIncrementCols.isEmpty()) { + Optional existAutoIncrementColIndexOptional = + Arrays.stream(indexes) + .filter( + index -> + Arrays.stream(index.fieldNames()) + .flatMap(Arrays::stream) + .anyMatch( + s -> + StringUtils.equalsIgnoreCase(autoIncrementCols.get(0).name(), s))) + .filter( + index -> + index.type() == Index.IndexType.PRIMARY_KEY + || index.type() == Index.IndexType.UNIQUE_KEY) + .findAny(); + Preconditions.checkArgument( + existAutoIncrementColIndexOptional.isPresent(), + "Incorrect table definition; there can be only one auto column and it must be defined as a key"); + } + } + + /** + * The default implementation of this method is based on MySQL syntax, and if the catalog does not + * support MySQL syntax, this method needs to be rewritten. + */ + protected static String getIndexFieldStr(String[][] fieldNames) { + return Arrays.stream(fieldNames) + .map( + colNames -> { + if (colNames.length > 1) { + throw new IllegalArgumentException( + "Index does not support complex fields in this Catalog"); + } + return String.format("`%s`", colNames[0]); + }) + .collect(Collectors.joining(", ")); + } + protected JdbcColumn getJdbcColumnFromTable(JdbcTable jdbcTable, String colName) { return (JdbcColumn) Arrays.stream(jdbcTable.columns()) diff --git a/catalogs/catalog-jdbc-doris/src/main/java/org/apache/gravitino/catalog/doris/operation/DorisTableOperations.java b/catalogs/catalog-jdbc-doris/src/main/java/org/apache/gravitino/catalog/doris/operation/DorisTableOperations.java index ebd7027b168..aa6348e2f71 100644 --- a/catalogs/catalog-jdbc-doris/src/main/java/org/apache/gravitino/catalog/doris/operation/DorisTableOperations.java +++ b/catalogs/catalog-jdbc-doris/src/main/java/org/apache/gravitino/catalog/doris/operation/DorisTableOperations.java @@ -26,7 +26,6 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -38,7 +37,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -53,7 +51,6 @@ import org.apache.gravitino.catalog.jdbc.operation.JdbcTableOperations; import org.apache.gravitino.catalog.jdbc.operation.JdbcTablePartitionOperations; import org.apache.gravitino.exceptions.NoSuchColumnException; -import org.apache.gravitino.exceptions.NoSuchSchemaException; import org.apache.gravitino.exceptions.NoSuchTableException; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.TableChange; @@ -73,24 +70,6 @@ public class DorisTableOperations extends JdbcTableOperations { private static final String DORIS_AUTO_INCREMENT = "AUTO_INCREMENT"; private static final String NEW_LINE = "\n"; - @Override - public List listTables(String databaseName) throws NoSuchSchemaException { - final List names = Lists.newArrayList(); - - try (Connection connection = getConnection(databaseName); - ResultSet tables = getTables(connection)) { - while (tables.next()) { - if (Objects.equals(tables.getString("TABLE_CAT"), databaseName)) { - names.add(tables.getString("TABLE_NAME")); - } - } - LOG.info("Finished listing tables size {} for database name {} ", names.size(), databaseName); - return names; - } catch (final SQLException se) { - throw this.exceptionMapper.toGravitinoException(se); - } - } - @Override public JdbcTablePartitionOperations createJdbcTablePartitionOperations(JdbcTable loadedTable) { return new DorisTablePartitionOperations( @@ -497,11 +476,6 @@ protected String generateRenameTableSql(String oldTableName, String newTableName return String.format("ALTER TABLE `%s` RENAME `%s`", oldTableName, newTableName); } - @Override - protected String generateDropTableSql(String tableName) { - return String.format("DROP TABLE `%s`", tableName); - } - @Override protected String generatePurgeTableSql(String tableName) { throw new UnsupportedOperationException( @@ -635,12 +609,6 @@ private String generateTableProperties(List setProperti .collect(Collectors.joining(",\n")); } - @Override - protected JdbcTable getOrCreateTable( - String databaseName, String tableName, JdbcTable lazyLoadCreateTable) { - return null != lazyLoadCreateTable ? lazyLoadCreateTable : load(databaseName, tableName); - } - private String updateColumnCommentFieldDefinition( TableChange.UpdateColumnComment updateColumnComment) { String newComment = updateColumnComment.getNewComment(); diff --git a/catalogs/catalog-jdbc-mysql/src/main/java/org/apache/gravitino/catalog/mysql/operation/MysqlTableOperations.java b/catalogs/catalog-jdbc-mysql/src/main/java/org/apache/gravitino/catalog/mysql/operation/MysqlTableOperations.java index 8aa1534c9b1..b8cc2f87233 100644 --- a/catalogs/catalog-jdbc-mysql/src/main/java/org/apache/gravitino/catalog/mysql/operation/MysqlTableOperations.java +++ b/catalogs/catalog-jdbc-mysql/src/main/java/org/apache/gravitino/catalog/mysql/operation/MysqlTableOperations.java @@ -24,7 +24,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -36,7 +35,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.stream.Collectors; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -48,7 +46,6 @@ import org.apache.gravitino.catalog.jdbc.JdbcTable; import org.apache.gravitino.catalog.jdbc.operation.JdbcTableOperations; import org.apache.gravitino.exceptions.NoSuchColumnException; -import org.apache.gravitino.exceptions.NoSuchSchemaException; import org.apache.gravitino.exceptions.NoSuchTableException; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.TableChange; @@ -62,29 +59,11 @@ /** Table operations for MySQL. */ public class MysqlTableOperations extends JdbcTableOperations { - public static final String BACK_QUOTE = "`"; - public static final String MYSQL_AUTO_INCREMENT = "AUTO_INCREMENT"; + private static final String BACK_QUOTE = "`"; + private static final String MYSQL_AUTO_INCREMENT = "AUTO_INCREMENT"; private static final String MYSQL_NOT_SUPPORT_NESTED_COLUMN_MSG = "Mysql does not support nested column names."; - @Override - public List listTables(String databaseName) throws NoSuchSchemaException { - final List names = Lists.newArrayList(); - - try (Connection connection = getConnection(databaseName); - ResultSet tables = getTables(connection)) { - while (tables.next()) { - if (Objects.equals(tables.getString("TABLE_CAT"), databaseName)) { - names.add(tables.getString("TABLE_NAME")); - } - } - LOG.info("Finished listing tables size {} for database name {} ", names.size(), databaseName); - return names; - } catch (final SQLException se) { - throw this.exceptionMapper.toGravitinoException(se); - } - } - @Override protected String generateCreateTableSql( String tableName, @@ -151,44 +130,6 @@ protected String generateCreateTableSql( return result; } - /** - * The auto-increment column will be verified. There can only be one auto-increment column and it - * must be the primary key or unique index. - * - * @param columns jdbc column - * @param indexes table indexes - */ - private static void validateIncrementCol(JdbcColumn[] columns, Index[] indexes) { - // Check auto increment column - List autoIncrementCols = - Arrays.stream(columns).filter(Column::autoIncrement).collect(Collectors.toList()); - String autoIncrementColsStr = - autoIncrementCols.stream().map(JdbcColumn::name).collect(Collectors.joining(",", "[", "]")); - Preconditions.checkArgument( - autoIncrementCols.size() <= 1, - "Only one column can be auto-incremented. There are multiple auto-increment columns in your table: " - + autoIncrementColsStr); - if (!autoIncrementCols.isEmpty()) { - Optional existAutoIncrementColIndexOptional = - Arrays.stream(indexes) - .filter( - index -> - Arrays.stream(index.fieldNames()) - .flatMap(Arrays::stream) - .anyMatch( - s -> - StringUtils.equalsIgnoreCase(autoIncrementCols.get(0).name(), s))) - .filter( - index -> - index.type() == Index.IndexType.PRIMARY_KEY - || index.type() == Index.IndexType.UNIQUE_KEY) - .findAny(); - Preconditions.checkArgument( - existAutoIncrementColIndexOptional.isPresent(), - "Incorrect table definition; there can be only one auto column and it must be defined as a key"); - } - } - public static void appendIndexesSql(Index[] indexes, StringBuilder sqlBuilder) { for (Index index : indexes) { String fieldStr = getIndexFieldStr(index.fieldNames()); @@ -215,19 +156,6 @@ public static void appendIndexesSql(Index[] indexes, StringBuilder sqlBuilder) { } } - private static String getIndexFieldStr(String[][] fieldNames) { - return Arrays.stream(fieldNames) - .map( - colNames -> { - if (colNames.length > 1) { - throw new IllegalArgumentException( - "Index does not support complex fields in MySQL"); - } - return BACK_QUOTE + colNames[0] + BACK_QUOTE; - }) - .collect(Collectors.joining(", ")); - } - @Override protected boolean getAutoIncrementInfo(ResultSet resultSet) throws SQLException { return "YES".equalsIgnoreCase(resultSet.getString("IS_AUTOINCREMENT")); @@ -276,16 +204,6 @@ protected void correctJdbcTableFields( } } - @Override - protected String generateRenameTableSql(String oldTableName, String newTableName) { - return String.format("RENAME TABLE `%s` TO `%s`", oldTableName, newTableName); - } - - @Override - protected String generateDropTableSql(String tableName) { - return "DROP TABLE " + BACK_QUOTE + tableName + BACK_QUOTE; - } - @Override protected String generatePurgeTableSql(String tableName) { throw new UnsupportedOperationException( @@ -492,12 +410,6 @@ private String generateTableProperties(List setProperti .collect(Collectors.joining(",\n")); } - @Override - protected JdbcTable getOrCreateTable( - String databaseName, String tableName, JdbcTable lazyLoadCreateTable) { - return null != lazyLoadCreateTable ? lazyLoadCreateTable : load(databaseName, tableName); - } - private String updateColumnCommentFieldDefinition( TableChange.UpdateColumnComment updateColumnComment, JdbcTable jdbcTable) { String newComment = updateColumnComment.getNewComment(); diff --git a/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java b/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java index f6b91b00ee4..9d1a80b491d 100644 --- a/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java +++ b/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/integration/test/CatalogMysqlIT.java @@ -934,7 +934,7 @@ void testCreateTableIndex() { Assertions.assertTrue( StringUtils.contains( illegalArgumentException.getMessage(), - "Index does not support complex fields in MySQL")); + "Index does not support complex fields in this Catalog")); Index[] indexes3 = new Index[] {Indexes.unique("u1_key", new String[][] {{"col_2", "col_3"}})}; illegalArgumentException = @@ -954,7 +954,7 @@ void testCreateTableIndex() { Assertions.assertTrue( StringUtils.contains( illegalArgumentException.getMessage(), - "Index does not support complex fields in MySQL")); + "Index does not support complex fields in this Catalog")); NameIdentifier tableIdent = NameIdentifier.of(schemaName, "test_null_key"); tableCatalog.createTable( diff --git a/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/operation/TestMysqlTableOperations.java b/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/operation/TestMysqlTableOperations.java index 93783da3bba..ce1343dd59a 100644 --- a/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/operation/TestMysqlTableOperations.java +++ b/catalogs/catalog-jdbc-mysql/src/test/java/org/apache/gravitino/catalog/mysql/operation/TestMysqlTableOperations.java @@ -51,8 +51,8 @@ @Tag("gravitino-docker-test") public class TestMysqlTableOperations extends TestMysql { - private static Type VARCHAR = Types.VarCharType.of(255); - private static Type INT = Types.IntegerType.get(); + private static final Type VARCHAR = Types.VarCharType.of(255); + private static final Type INT = Types.IntegerType.get(); @Test public void testOperationTable() { diff --git a/catalogs/catalog-jdbc-oceanbase/src/main/java/org/apache/gravitino/catalog/oceanbase/operation/OceanBaseTableOperations.java b/catalogs/catalog-jdbc-oceanbase/src/main/java/org/apache/gravitino/catalog/oceanbase/operation/OceanBaseTableOperations.java index b1744a0c61c..77c97290927 100644 --- a/catalogs/catalog-jdbc-oceanbase/src/main/java/org/apache/gravitino/catalog/oceanbase/operation/OceanBaseTableOperations.java +++ b/catalogs/catalog-jdbc-oceanbase/src/main/java/org/apache/gravitino/catalog/oceanbase/operation/OceanBaseTableOperations.java @@ -18,35 +18,50 @@ */ package org.apache.gravitino.catalog.oceanbase.operation; +import static org.apache.gravitino.rel.Column.DEFAULT_VALUE_NOT_SET; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.collections4.MapUtils; import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.gravitino.StringIdentifier; import org.apache.gravitino.catalog.jdbc.JdbcColumn; import org.apache.gravitino.catalog.jdbc.JdbcTable; import org.apache.gravitino.catalog.jdbc.operation.JdbcTableOperations; -import org.apache.gravitino.exceptions.GravitinoRuntimeException; -import org.apache.gravitino.exceptions.NoSuchSchemaException; +import org.apache.gravitino.catalog.jdbc.utils.JdbcConnectorUtils; +import org.apache.gravitino.exceptions.NoSuchColumnException; import org.apache.gravitino.exceptions.NoSuchTableException; +import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.TableChange; import org.apache.gravitino.rel.expressions.distributions.Distribution; +import org.apache.gravitino.rel.expressions.distributions.Distributions; import org.apache.gravitino.rel.expressions.transforms.Transform; import org.apache.gravitino.rel.indexes.Index; +import org.apache.gravitino.rel.indexes.Indexes; +import org.apache.gravitino.rel.types.Types; /** Table operations for OceanBase. */ public class OceanBaseTableOperations extends JdbcTableOperations { - @Override - public List listTables(String databaseName) throws NoSuchSchemaException { - throw new GravitinoRuntimeException("Not implemented yet."); - } - - @Override - public JdbcTable load(String databaseName, String tableName) throws NoSuchTableException { - return super.load(databaseName, tableName.toLowerCase()); - } + private static final String BACK_QUOTE = "`"; + private static final String OCEANBASE_AUTO_INCREMENT = "AUTO_INCREMENT"; + private static final String OCEANBASE_NOT_SUPPORT_NESTED_COLUMN_MSG = + "OceanBase does not support nested column names."; @Override protected String generateCreateTableSql( @@ -58,10 +73,83 @@ protected String generateCreateTableSql( Distribution distribution, Index[] indexes) { if (ArrayUtils.isNotEmpty(partitioning)) { - throw new UnsupportedOperationException("Currently not support Partition tables."); + throw new UnsupportedOperationException( + "Currently we do not support Partitioning in oceanbase"); + } + + if (!Distributions.NONE.equals(distribution)) { + throw new UnsupportedOperationException("OceanBase does not support distribution"); + } + + validateIncrementCol(columns, indexes); + StringBuilder sqlBuilder = new StringBuilder(); + sqlBuilder.append(String.format("CREATE TABLE `%s` (\n", tableName)); + + // Add columns + for (int i = 0; i < columns.length; i++) { + JdbcColumn column = columns[i]; + sqlBuilder + .append(SPACE) + .append(SPACE) + .append(BACK_QUOTE) + .append(column.name()) + .append(BACK_QUOTE); + + appendColumnDefinition(column, sqlBuilder); + // Add a comma for the next column, unless it's the last one + if (i < columns.length - 1) { + sqlBuilder.append(",\n"); + } } - throw new UnsupportedOperationException("Not implemented yet."); + appendIndexesSql(indexes, sqlBuilder); + + sqlBuilder.append("\n)"); + + // Add table comment if specified + if (StringUtils.isNotEmpty(comment)) { + sqlBuilder.append(" COMMENT='").append(comment).append("'"); + } + + // Add table properties + if (MapUtils.isNotEmpty(properties)) { + sqlBuilder.append( + properties.entrySet().stream() + .map(entry -> String.format("%s = %s", entry.getKey(), entry.getValue())) + .collect(Collectors.joining(",\n", "\n", ""))); + } + + // Return the generated SQL statement + String result = sqlBuilder.append(";").toString(); + + LOG.info("Generated create table:{} sql: {}", tableName, result); + return result; + } + + public static void appendIndexesSql(Index[] indexes, StringBuilder sqlBuilder) { + for (Index index : indexes) { + String fieldStr = getIndexFieldStr(index.fieldNames()); + sqlBuilder.append(",\n"); + switch (index.type()) { + case PRIMARY_KEY: + if (null != index.name() + && !StringUtils.equalsIgnoreCase( + index.name(), Indexes.DEFAULT_MYSQL_PRIMARY_KEY_NAME)) { + throw new IllegalArgumentException("Primary key name must be PRIMARY in OceanBase"); + } + sqlBuilder.append("CONSTRAINT ").append("PRIMARY KEY (").append(fieldStr).append(")"); + break; + case UNIQUE_KEY: + sqlBuilder.append("CONSTRAINT "); + if (null != index.name()) { + sqlBuilder.append(BACK_QUOTE).append(index.name()).append(BACK_QUOTE); + } + sqlBuilder.append(" UNIQUE (").append(fieldStr).append(")"); + break; + default: + throw new IllegalArgumentException("OceanBase doesn't support index : " + index.type()); + } + } } @Override @@ -72,17 +160,38 @@ protected boolean getAutoIncrementInfo(ResultSet resultSet) throws SQLException @Override protected Map getTableProperties(Connection connection, String tableName) throws SQLException { - throw new UnsupportedOperationException("Not implemented yet."); - } + try (PreparedStatement statement = connection.prepareStatement("SHOW TABLE STATUS LIKE ?")) { + statement.setString(1, tableName); + try (ResultSet resultSet = statement.executeQuery()) { + while (resultSet.next()) { + String name = resultSet.getString("NAME"); + if (Objects.equals(name, tableName)) { + return Collections.unmodifiableMap( + new HashMap() { + { + put(COMMENT, resultSet.getString(COMMENT)); + String autoIncrement = resultSet.getString("AUTO_INCREMENT"); + if (StringUtils.isNotEmpty(autoIncrement)) { + put("AUTO_INCREMENT", autoIncrement); + } + } + }); + } + } - @Override - protected String generateRenameTableSql(String oldTableName, String newTableName) { - return String.format("RENAME TABLE `%s` TO `%s`", oldTableName, newTableName); + throw new NoSuchTableException( + "Table %s does not exist in %s.", tableName, connection.getCatalog()); + } + } } - @Override - protected String generateDropTableSql(String tableName) { - return String.format("DROP TABLE `%s`", tableName); + protected void correctJdbcTableFields( + Connection connection, String databaseName, String tableName, JdbcTable.Builder tableBuilder) + throws SQLException { + if (StringUtils.isEmpty(tableBuilder.comment())) { + tableBuilder.withComment( + tableBuilder.properties().getOrDefault(COMMENT, tableBuilder.comment())); + } } @Override @@ -90,21 +199,458 @@ protected String generatePurgeTableSql(String tableName) { return String.format("TRUNCATE TABLE `%s`", tableName); } + /** + * OceanBase does not support some multiple changes in one statement, So rewrite this method, one + * by one to apply TableChange to the table. + * + * @param databaseName The name of the database. + * @param tableName The name of the table. + * @param changes The changes to apply to the table. + */ @Override public void alterTable(String databaseName, String tableName, TableChange... changes) throws NoSuchTableException { - throw new UnsupportedOperationException("Not implemented yet."); + LOG.info("Attempting to alter table {} from database {}", tableName, databaseName); + try (Connection connection = getConnection(databaseName)) { + for (TableChange change : changes) { + String sql = generateAlterTableSql(databaseName, tableName, change); + if (StringUtils.isEmpty(sql)) { + LOG.info("No changes to alter table {} from database {}", tableName, databaseName); + return; + } + JdbcConnectorUtils.executeUpdate(connection, sql); + } + LOG.info("Alter table {} from database {}", tableName, databaseName); + } catch (final SQLException se) { + throw this.exceptionMapper.toGravitinoException(se); + } } @Override protected String generateAlterTableSql( String databaseName, String tableName, TableChange... changes) { - throw new UnsupportedOperationException("Not implemented yet."); + // Not all operations require the original table information, so lazy loading is used here + JdbcTable lazyLoadTable = null; + TableChange.UpdateComment updateComment = null; + List setProperties = new ArrayList<>(); + List alterSql = new ArrayList<>(); + for (TableChange change : changes) { + if (change instanceof TableChange.UpdateComment) { + updateComment = (TableChange.UpdateComment) change; + } else if (change instanceof TableChange.SetProperty) { + // The set attribute needs to be added at the end. + setProperties.add(((TableChange.SetProperty) change)); + } else if (change instanceof TableChange.RemoveProperty) { + // OceanBase does not support deleting table attributes, it can be replaced by Set Property + throw new IllegalArgumentException("Remove property is not supported yet"); + } else if (change instanceof TableChange.AddColumn) { + TableChange.AddColumn addColumn = (TableChange.AddColumn) change; + lazyLoadTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + alterSql.add(addColumnFieldDefinition(addColumn)); + } else if (change instanceof TableChange.RenameColumn) { + lazyLoadTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + TableChange.RenameColumn renameColumn = (TableChange.RenameColumn) change; + alterSql.add(renameColumnFieldDefinition(renameColumn, lazyLoadTable)); + } else if (change instanceof TableChange.UpdateColumnDefaultValue) { + lazyLoadTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + TableChange.UpdateColumnDefaultValue updateColumnDefaultValue = + (TableChange.UpdateColumnDefaultValue) change; + alterSql.add( + updateColumnDefaultValueFieldDefinition(updateColumnDefaultValue, lazyLoadTable)); + } else if (change instanceof TableChange.UpdateColumnType) { + lazyLoadTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + TableChange.UpdateColumnType updateColumnType = (TableChange.UpdateColumnType) change; + alterSql.add(updateColumnTypeFieldDefinition(updateColumnType, lazyLoadTable)); + } else if (change instanceof TableChange.UpdateColumnComment) { + lazyLoadTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + TableChange.UpdateColumnComment updateColumnComment = + (TableChange.UpdateColumnComment) change; + alterSql.add(updateColumnCommentFieldDefinition(updateColumnComment, lazyLoadTable)); + } else if (change instanceof TableChange.UpdateColumnPosition) { + lazyLoadTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + TableChange.UpdateColumnPosition updateColumnPosition = + (TableChange.UpdateColumnPosition) change; + alterSql.add(updateColumnPositionFieldDefinition(updateColumnPosition, lazyLoadTable)); + } else if (change instanceof TableChange.DeleteColumn) { + TableChange.DeleteColumn deleteColumn = (TableChange.DeleteColumn) change; + lazyLoadTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + String deleteColSql = deleteColumnFieldDefinition(deleteColumn, lazyLoadTable); + if (StringUtils.isNotEmpty(deleteColSql)) { + alterSql.add(deleteColSql); + } + } else if (change instanceof TableChange.UpdateColumnNullability) { + lazyLoadTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + alterSql.add( + updateColumnNullabilityDefinition( + (TableChange.UpdateColumnNullability) change, lazyLoadTable)); + } else if (change instanceof TableChange.AddIndex) { + alterSql.add(addIndexDefinition((TableChange.AddIndex) change)); + } else if (change instanceof TableChange.DeleteIndex) { + lazyLoadTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + alterSql.add(deleteIndexDefinition(lazyLoadTable, (TableChange.DeleteIndex) change)); + } else if (change instanceof TableChange.UpdateColumnAutoIncrement) { + lazyLoadTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + alterSql.add( + updateColumnAutoIncrementDefinition( + lazyLoadTable, (TableChange.UpdateColumnAutoIncrement) change)); + } else { + throw new IllegalArgumentException( + "Unsupported table change type: " + change.getClass().getName()); + } + } + if (!setProperties.isEmpty()) { + alterSql.add(generateTableProperties(setProperties)); + } + + // Last modified comment + if (null != updateComment) { + String newComment = updateComment.getNewComment(); + if (null == StringIdentifier.fromComment(newComment)) { + // Detect and add Gravitino id. + JdbcTable jdbcTable = getOrCreateTable(databaseName, tableName, lazyLoadTable); + StringIdentifier identifier = StringIdentifier.fromComment(jdbcTable.comment()); + if (null != identifier) { + newComment = StringIdentifier.addToComment(identifier, newComment); + } + } + alterSql.add("COMMENT '" + newComment + "'"); + } + + if (!setProperties.isEmpty()) { + alterSql.add(generateTableProperties(setProperties)); + } + + if (CollectionUtils.isEmpty(alterSql)) { + return ""; + } + // Return the generated SQL statement + String result = "ALTER TABLE `" + tableName + "`\n" + String.join(",\n", alterSql) + ";"; + LOG.info("Generated alter table:{} sql: {}", databaseName + "." + tableName, result); + return result; } - @Override - protected JdbcTable getOrCreateTable( - String databaseName, String tableName, JdbcTable lazyLoadCreateTable) { - return null != lazyLoadCreateTable ? lazyLoadCreateTable : load(databaseName, tableName); + private String updateColumnAutoIncrementDefinition( + JdbcTable table, TableChange.UpdateColumnAutoIncrement change) { + if (change.fieldName().length > 1) { + throw new UnsupportedOperationException("Nested column names are not supported"); + } + String col = change.fieldName()[0]; + JdbcColumn column = getJdbcColumnFromTable(table, col); + if (change.isAutoIncrement()) { + Preconditions.checkArgument( + Types.allowAutoIncrement(column.dataType()), + "Auto increment is not allowed, type: " + column.dataType()); + } + JdbcColumn updateColumn = + JdbcColumn.builder() + .withName(col) + .withDefaultValue(column.defaultValue()) + .withNullable(column.nullable()) + .withType(column.dataType()) + .withComment(column.comment()) + .withAutoIncrement(change.isAutoIncrement()) + .build(); + return MODIFY_COLUMN + + BACK_QUOTE + + col + + BACK_QUOTE + + appendColumnDefinition(updateColumn, new StringBuilder()); + } + + @VisibleForTesting + static String deleteIndexDefinition( + JdbcTable lazyLoadTable, TableChange.DeleteIndex deleteIndex) { + if (deleteIndex.isIfExists()) { + if (Arrays.stream(lazyLoadTable.index()) + .anyMatch(index -> index.name().equals(deleteIndex.getName()))) { + throw new IllegalArgumentException("Index does not exist"); + } + } + return "DROP INDEX " + BACK_QUOTE + deleteIndex.getName() + BACK_QUOTE; + } + + private String updateColumnNullabilityDefinition( + TableChange.UpdateColumnNullability change, JdbcTable table) { + validateUpdateColumnNullable(change, table); + String col = change.fieldName()[0]; + JdbcColumn column = getJdbcColumnFromTable(table, col); + JdbcColumn updateColumn = + JdbcColumn.builder() + .withName(col) + .withDefaultValue(column.defaultValue()) + .withNullable(change.nullable()) + .withType(column.dataType()) + .withComment(column.comment()) + .withAutoIncrement(column.autoIncrement()) + .build(); + return MODIFY_COLUMN + + BACK_QUOTE + + col + + BACK_QUOTE + + appendColumnDefinition(updateColumn, new StringBuilder()); + } + + @VisibleForTesting + static String addIndexDefinition(TableChange.AddIndex addIndex) { + StringBuilder sqlBuilder = new StringBuilder(); + sqlBuilder.append("ADD "); + switch (addIndex.getType()) { + case PRIMARY_KEY: + if (null != addIndex.getName() + && !StringUtils.equalsIgnoreCase( + addIndex.getName(), Indexes.DEFAULT_MYSQL_PRIMARY_KEY_NAME)) { + throw new IllegalArgumentException("Primary key name must be PRIMARY in OceanBase"); + } + sqlBuilder.append("PRIMARY KEY "); + break; + case UNIQUE_KEY: + sqlBuilder + .append("UNIQUE INDEX ") + .append(BACK_QUOTE) + .append(addIndex.getName()) + .append(BACK_QUOTE); + break; + default: + break; + } + sqlBuilder.append(" (").append(getIndexFieldStr(addIndex.getFieldNames())).append(")"); + return sqlBuilder.toString(); + } + + private String generateTableProperties(List setProperties) { + return setProperties.stream() + .map( + setProperty -> + String.format("%s = %s", setProperty.getProperty(), setProperty.getValue())) + .collect(Collectors.joining(",\n")); + } + + private String updateColumnCommentFieldDefinition( + TableChange.UpdateColumnComment updateColumnComment, JdbcTable jdbcTable) { + String newComment = updateColumnComment.getNewComment(); + if (updateColumnComment.fieldName().length > 1) { + throw new UnsupportedOperationException(OCEANBASE_NOT_SUPPORT_NESTED_COLUMN_MSG); + } + String col = updateColumnComment.fieldName()[0]; + JdbcColumn column = getJdbcColumnFromTable(jdbcTable, col); + JdbcColumn updateColumn = + JdbcColumn.builder() + .withName(col) + .withDefaultValue(column.defaultValue()) + .withNullable(column.nullable()) + .withType(column.dataType()) + .withComment(newComment) + .withAutoIncrement(column.autoIncrement()) + .build(); + return MODIFY_COLUMN + + BACK_QUOTE + + col + + BACK_QUOTE + + appendColumnDefinition(updateColumn, new StringBuilder()); + } + + private String addColumnFieldDefinition(TableChange.AddColumn addColumn) { + String dataType = typeConverter.fromGravitino(addColumn.getDataType()); + if (addColumn.fieldName().length > 1) { + throw new UnsupportedOperationException(OCEANBASE_NOT_SUPPORT_NESTED_COLUMN_MSG); + } + String col = addColumn.fieldName()[0]; + + StringBuilder columnDefinition = new StringBuilder(); + columnDefinition + .append("ADD COLUMN ") + .append(BACK_QUOTE) + .append(col) + .append(BACK_QUOTE) + .append(SPACE) + .append(dataType) + .append(SPACE); + + if (addColumn.isAutoIncrement()) { + Preconditions.checkArgument( + Types.allowAutoIncrement(addColumn.getDataType()), + "Auto increment is not allowed, type: " + addColumn.getDataType()); + columnDefinition.append(OCEANBASE_AUTO_INCREMENT).append(SPACE); + } + + if (!addColumn.isNullable()) { + columnDefinition.append("NOT NULL "); + } + // Append comment if available + if (StringUtils.isNotEmpty(addColumn.getComment())) { + columnDefinition.append("COMMENT '").append(addColumn.getComment()).append("' "); + } + + // Append default value if available + if (!Column.DEFAULT_VALUE_NOT_SET.equals(addColumn.getDefaultValue())) { + columnDefinition + .append("DEFAULT ") + .append(columnDefaultValueConverter.fromGravitino(addColumn.getDefaultValue())) + .append(SPACE); + } + + // Append position if available + if (addColumn.getPosition() instanceof TableChange.First) { + columnDefinition.append("FIRST"); + } else if (addColumn.getPosition() instanceof TableChange.After) { + TableChange.After afterPosition = (TableChange.After) addColumn.getPosition(); + columnDefinition + .append(AFTER) + .append(BACK_QUOTE) + .append(afterPosition.getColumn()) + .append(BACK_QUOTE); + } else if (addColumn.getPosition() instanceof TableChange.Default) { + // do nothing, follow the default behavior of oceanbase + } else { + throw new IllegalArgumentException("Invalid column position."); + } + return columnDefinition.toString(); + } + + private String renameColumnFieldDefinition( + TableChange.RenameColumn renameColumn, JdbcTable jdbcTable) { + if (renameColumn.fieldName().length > 1) { + throw new UnsupportedOperationException(OCEANBASE_NOT_SUPPORT_NESTED_COLUMN_MSG); + } + + String oldColumnName = renameColumn.fieldName()[0]; + String newColumnName = renameColumn.getNewName(); + JdbcColumn column = getJdbcColumnFromTable(jdbcTable, oldColumnName); + StringBuilder sqlBuilder = + new StringBuilder( + "CHANGE COLUMN " + + BACK_QUOTE + + oldColumnName + + BACK_QUOTE + + SPACE + + BACK_QUOTE + + newColumnName + + BACK_QUOTE); + JdbcColumn newColumn = + JdbcColumn.builder() + .withName(newColumnName) + .withType(column.dataType()) + .withComment(column.comment()) + .withDefaultValue(column.defaultValue()) + .withNullable(column.nullable()) + .withAutoIncrement(column.autoIncrement()) + .build(); + return appendColumnDefinition(newColumn, sqlBuilder).toString(); + } + + private String updateColumnPositionFieldDefinition( + TableChange.UpdateColumnPosition updateColumnPosition, JdbcTable jdbcTable) { + if (updateColumnPosition.fieldName().length > 1) { + throw new UnsupportedOperationException(OCEANBASE_NOT_SUPPORT_NESTED_COLUMN_MSG); + } + String col = updateColumnPosition.fieldName()[0]; + JdbcColumn column = getJdbcColumnFromTable(jdbcTable, col); + StringBuilder columnDefinition = new StringBuilder(); + columnDefinition.append(MODIFY_COLUMN).append(col); + appendColumnDefinition(column, columnDefinition); + if (updateColumnPosition.getPosition() instanceof TableChange.First) { + columnDefinition.append("FIRST"); + } else if (updateColumnPosition.getPosition() instanceof TableChange.After) { + TableChange.After afterPosition = (TableChange.After) updateColumnPosition.getPosition(); + columnDefinition.append(AFTER).append(afterPosition.getColumn()); + } else { + Arrays.stream(jdbcTable.columns()) + .reduce((column1, column2) -> column2) + .map(Column::name) + .ifPresent(s -> columnDefinition.append(AFTER).append(s)); + } + return columnDefinition.toString(); + } + + private String deleteColumnFieldDefinition( + TableChange.DeleteColumn deleteColumn, JdbcTable jdbcTable) { + if (deleteColumn.fieldName().length > 1) { + throw new UnsupportedOperationException(OCEANBASE_NOT_SUPPORT_NESTED_COLUMN_MSG); + } + String col = deleteColumn.fieldName()[0]; + boolean colExists = true; + try { + getJdbcColumnFromTable(jdbcTable, col); + } catch (NoSuchColumnException noSuchColumnException) { + colExists = false; + } + if (!colExists) { + if (BooleanUtils.isTrue(deleteColumn.getIfExists())) { + return ""; + } else { + throw new IllegalArgumentException("Delete column does not exist: " + col); + } + } + return "DROP COLUMN " + BACK_QUOTE + col + BACK_QUOTE; + } + + private String updateColumnDefaultValueFieldDefinition( + TableChange.UpdateColumnDefaultValue updateColumnDefaultValue, JdbcTable jdbcTable) { + if (updateColumnDefaultValue.fieldName().length > 1) { + throw new UnsupportedOperationException(OCEANBASE_NOT_SUPPORT_NESTED_COLUMN_MSG); + } + String col = updateColumnDefaultValue.fieldName()[0]; + JdbcColumn column = getJdbcColumnFromTable(jdbcTable, col); + StringBuilder sqlBuilder = new StringBuilder(MODIFY_COLUMN + col); + JdbcColumn newColumn = + JdbcColumn.builder() + .withName(col) + .withType(column.dataType()) + .withNullable(column.nullable()) + .withComment(column.comment()) + .withDefaultValue(updateColumnDefaultValue.getNewDefaultValue()) + .build(); + return appendColumnDefinition(newColumn, sqlBuilder).toString(); + } + + private String updateColumnTypeFieldDefinition( + TableChange.UpdateColumnType updateColumnType, JdbcTable jdbcTable) { + if (updateColumnType.fieldName().length > 1) { + throw new UnsupportedOperationException(OCEANBASE_NOT_SUPPORT_NESTED_COLUMN_MSG); + } + String col = updateColumnType.fieldName()[0]; + JdbcColumn column = getJdbcColumnFromTable(jdbcTable, col); + StringBuilder sqlBuilder = new StringBuilder(MODIFY_COLUMN + col); + JdbcColumn newColumn = + JdbcColumn.builder() + .withName(col) + .withType(updateColumnType.getNewDataType()) + .withComment(column.comment()) + .withDefaultValue(DEFAULT_VALUE_NOT_SET) + .withNullable(column.nullable()) + .withAutoIncrement(column.autoIncrement()) + .build(); + return appendColumnDefinition(newColumn, sqlBuilder).toString(); + } + + private StringBuilder appendColumnDefinition(JdbcColumn column, StringBuilder sqlBuilder) { + // Add data type + sqlBuilder.append(SPACE).append(typeConverter.fromGravitino(column.dataType())).append(SPACE); + + // Add NOT NULL if the column is marked as such + if (column.nullable()) { + sqlBuilder.append("NULL "); + } else { + sqlBuilder.append("NOT NULL "); + } + + // Add DEFAULT value if specified + if (!DEFAULT_VALUE_NOT_SET.equals(column.defaultValue())) { + sqlBuilder + .append("DEFAULT ") + .append(columnDefaultValueConverter.fromGravitino(column.defaultValue())) + .append(SPACE); + } + + // Add column auto_increment if specified + if (column.autoIncrement()) { + sqlBuilder.append(OCEANBASE_AUTO_INCREMENT).append(" "); + } + + // Add column comment if specified + if (StringUtils.isNotEmpty(column.comment())) { + sqlBuilder.append("COMMENT '").append(column.comment()).append("' "); + } + return sqlBuilder; } } diff --git a/catalogs/catalog-jdbc-oceanbase/src/test/java/org/apache/gravitino/catalog/oceanbase/operation/TestOceanBaseTableOperations.java b/catalogs/catalog-jdbc-oceanbase/src/test/java/org/apache/gravitino/catalog/oceanbase/operation/TestOceanBaseTableOperations.java new file mode 100644 index 00000000000..6f2a422fbdf --- /dev/null +++ b/catalogs/catalog-jdbc-oceanbase/src/test/java/org/apache/gravitino/catalog/oceanbase/operation/TestOceanBaseTableOperations.java @@ -0,0 +1,997 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.oceanbase.operation; + +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.gravitino.catalog.jdbc.JdbcColumn; +import org.apache.gravitino.catalog.jdbc.JdbcTable; +import org.apache.gravitino.rel.Column; +import org.apache.gravitino.rel.TableChange; +import org.apache.gravitino.rel.expressions.distributions.Distributions; +import org.apache.gravitino.rel.expressions.literals.Literals; +import org.apache.gravitino.rel.expressions.transforms.Transforms; +import org.apache.gravitino.rel.indexes.Index; +import org.apache.gravitino.rel.indexes.Indexes; +import org.apache.gravitino.rel.types.Decimal; +import org.apache.gravitino.rel.types.Type; +import org.apache.gravitino.rel.types.Types; +import org.apache.gravitino.utils.RandomNameUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +@Tag("gravitino-docker-test") +public class TestOceanBaseTableOperations extends TestOceanBase { + private static final Type VARCHAR = Types.VarCharType.of(255); + private static final Type INT = Types.IntegerType.get(); + + @BeforeAll + public static void setUp() { + DATABASE_OPERATIONS.create(TEST_DB_NAME, null, new HashMap<>()); + } + + @Test + public void testOperationTable() { + String tableName = RandomStringUtils.randomAlphabetic(16).toLowerCase() + "_op_table"; + String tableComment = "test_comment"; + List columns = new ArrayList<>(); + columns.add( + JdbcColumn.builder() + .withName("col_1") + .withType(VARCHAR) + .withComment("test_comment") + .withNullable(true) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_2") + .withType(INT) + .withNullable(false) + .withComment("set primary key") + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_3") + .withType(INT) + .withNullable(true) + .withDefaultValue(Literals.NULL) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_4") + .withType(VARCHAR) + .withDefaultValue(Literals.of("hello world", VARCHAR)) + .withNullable(false) + .build()); + Map properties = new HashMap<>(); + + Index[] indexes = new Index[] {Indexes.unique("test", new String[][] {{"col_1"}, {"col_2"}})}; + // create table + TABLE_OPERATIONS.create( + TEST_DB_NAME, + tableName, + columns.toArray(new JdbcColumn[0]), + tableComment, + properties, + null, + Distributions.NONE, + indexes); + + // list table + List tables = TABLE_OPERATIONS.listTables(TEST_DB_NAME); + Assertions.assertTrue(tables.contains(tableName)); + + // load table + JdbcTable load = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + assertionsTableInfo( + tableName, tableComment, columns, properties, indexes, Transforms.EMPTY_TRANSFORM, load); + + // rename table + String newName = "new_table"; + Assertions.assertDoesNotThrow(() -> TABLE_OPERATIONS.rename(TEST_DB_NAME, tableName, newName)); + Assertions.assertDoesNotThrow(() -> TABLE_OPERATIONS.load(TEST_DB_NAME, newName)); + + // alter table + JdbcColumn newColumn = + JdbcColumn.builder() + .withName("col_5") + .withType(VARCHAR) + .withComment("new_add") + .withNullable(true) + .withDefaultValue(Literals.of("hello test", VARCHAR)) + .build(); + TABLE_OPERATIONS.alterTable( + TEST_DB_NAME, + newName, + TableChange.addColumn( + new String[] {newColumn.name()}, + newColumn.dataType(), + newColumn.comment(), + TableChange.ColumnPosition.after("col_1"), + newColumn.defaultValue())); + load = TABLE_OPERATIONS.load(TEST_DB_NAME, newName); + List alterColumns = + new ArrayList() { + { + add(columns.get(0)); + add(newColumn); + add(columns.get(1)); + add(columns.get(2)); + add(columns.get(3)); + } + }; + assertionsTableInfo( + newName, tableComment, alterColumns, properties, indexes, Transforms.EMPTY_TRANSFORM, load); + + // delete column + TABLE_OPERATIONS.alterTable( + TEST_DB_NAME, newName, TableChange.deleteColumn(new String[] {newColumn.name()}, true)); + load = TABLE_OPERATIONS.load(TEST_DB_NAME, newName); + assertionsTableInfo( + newName, tableComment, columns, properties, indexes, Transforms.EMPTY_TRANSFORM, load); + + TableChange deleteColumn = TableChange.deleteColumn(new String[] {newColumn.name()}, false); + IllegalArgumentException illegalArgumentException = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> TABLE_OPERATIONS.alterTable(TEST_DB_NAME, newName, deleteColumn)); + Assertions.assertEquals( + "Delete column does not exist: " + newColumn.name(), illegalArgumentException.getMessage()); + Assertions.assertDoesNotThrow( + () -> + TABLE_OPERATIONS.alterTable( + TEST_DB_NAME, + newName, + TableChange.deleteColumn(new String[] {newColumn.name()}, true))); + + TABLE_OPERATIONS.alterTable( + TEST_DB_NAME, newName, TableChange.deleteColumn(new String[] {newColumn.name()}, true)); + Assertions.assertTrue(TABLE_OPERATIONS.drop(TEST_DB_NAME, newName), "table should be dropped"); + Assertions.assertFalse( + TABLE_OPERATIONS.drop(TEST_DB_NAME, newName), "table should be non-existent"); + } + + @Test + public void testAlterTable() { + String tableName = RandomStringUtils.randomAlphabetic(16).toLowerCase() + "_al_table"; + String tableComment = "test_comment"; + List columns = new ArrayList<>(); + JdbcColumn col_1 = + JdbcColumn.builder() + .withName("col_1") + .withType(INT) + .withComment("id") + .withNullable(false) + .build(); + columns.add(col_1); + JdbcColumn col_2 = + JdbcColumn.builder() + .withName("col_2") + .withType(VARCHAR) + .withComment("name") + .withDefaultValue(Literals.of("hello world", VARCHAR)) + .withNullable(false) + .build(); + columns.add(col_2); + JdbcColumn col_3 = + JdbcColumn.builder() + .withName("col_3") + .withType(VARCHAR) + .withComment("name") + .withDefaultValue(Literals.NULL) + .build(); + // `col_1` int NOT NULL COMMENT 'id' , + // `col_2` varchar(255) NOT NULL DEFAULT 'hello world' COMMENT 'name' , + // `col_3` varchar(255) NULL DEFAULT NULL COMMENT 'name' , + columns.add(col_3); + Map properties = new HashMap<>(); + + Index[] indexes = + new Index[] { + Indexes.createMysqlPrimaryKey(new String[][] {{"col_1"}, {"col_2"}}), + Indexes.unique("uk_2", new String[][] {{"col_1"}, {"col_2"}}) + }; + // create table + TABLE_OPERATIONS.create( + TEST_DB_NAME, + tableName, + columns.toArray(new JdbcColumn[0]), + tableComment, + properties, + null, + Distributions.NONE, + indexes); + JdbcTable load = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + assertionsTableInfo( + tableName, tableComment, columns, properties, indexes, Transforms.EMPTY_TRANSFORM, load); + + TABLE_OPERATIONS.alterTable( + TEST_DB_NAME, + tableName, + TableChange.updateColumnType(new String[] {col_1.name()}, VARCHAR)); + + load = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + + // After modifying the type, some attributes of the corresponding column are not + // supported. + columns.clear(); + col_1 = + JdbcColumn.builder() + .withName(col_1.name()) + .withType(VARCHAR) + .withComment(col_1.comment()) + .withNullable(col_1.nullable()) + .withDefaultValue(col_1.defaultValue()) + .build(); + columns.add(col_1); + columns.add(col_2); + columns.add(col_3); + assertionsTableInfo( + tableName, tableComment, columns, properties, indexes, Transforms.EMPTY_TRANSFORM, load); + + String newComment = "new_comment"; + // update table comment and column comment + // `col_1` int NOT NULL COMMENT 'id' , + // `col_2` varchar(255) NOT NULL DEFAULT 'hello world' COMMENT 'new_comment' , + // `col_3` varchar(255) NULL DEFAULT NULL COMMENT 'name' , + TABLE_OPERATIONS.alterTable( + TEST_DB_NAME, + tableName, + TableChange.updateColumnType(new String[] {col_1.name()}, INT), + TableChange.updateColumnComment(new String[] {col_2.name()}, newComment)); + load = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + + columns.clear(); + col_1 = + JdbcColumn.builder() + .withName(col_1.name()) + .withType(INT) + .withComment(col_1.comment()) + .withAutoIncrement(col_1.autoIncrement()) + .withNullable(col_1.nullable()) + .withDefaultValue(col_1.defaultValue()) + .build(); + col_2 = + JdbcColumn.builder() + .withName(col_2.name()) + .withType(col_2.dataType()) + .withComment(newComment) + .withAutoIncrement(col_2.autoIncrement()) + .withNullable(col_2.nullable()) + .withDefaultValue(col_2.defaultValue()) + .build(); + columns.add(col_1); + columns.add(col_2); + columns.add(col_3); + assertionsTableInfo( + tableName, tableComment, columns, properties, indexes, Transforms.EMPTY_TRANSFORM, load); + + String newColName_1 = "new_col_1"; + String newColName_2 = "new_col_2"; + // rename column + // update table comment and column comment + // `new_col_1` int NOT NULL COMMENT 'id' , + // `new_col_2` varchar(255) NOT NULL DEFAULT 'hello world' COMMENT 'new_comment' + // , + // `col_3` varchar(255) NULL DEFAULT NULL COMMENT 'name' , + TABLE_OPERATIONS.alterTable( + TEST_DB_NAME, + tableName, + TableChange.renameColumn(new String[] {col_1.name()}, newColName_1), + TableChange.renameColumn(new String[] {col_2.name()}, newColName_2)); + + load = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + + columns.clear(); + col_1 = + JdbcColumn.builder() + .withName(newColName_1) + .withType(col_1.dataType()) + .withComment(col_1.comment()) + .withAutoIncrement(col_1.autoIncrement()) + .withNullable(col_1.nullable()) + .withDefaultValue(col_1.defaultValue()) + .build(); + col_2 = + JdbcColumn.builder() + .withName(newColName_2) + .withType(col_2.dataType()) + .withComment(col_2.comment()) + .withAutoIncrement(col_2.autoIncrement()) + .withNullable(col_2.nullable()) + .withDefaultValue(col_2.defaultValue()) + .build(); + columns.add(col_1); + columns.add(col_2); + columns.add(col_3); + assertionsTableInfo( + tableName, tableComment, columns, properties, indexes, Transforms.EMPTY_TRANSFORM, load); + + newComment = "txt3"; + String newCol2Comment = "xxx"; + // update column position add column、set table properties + // `new_col_2` varchar(255) NOT NULL DEFAULT 'hello world' COMMENT 'xxx' , + // `new_col_1` int NOT NULL COMMENT 'id' , + // `col_3` varchar(255) NULL DEFAULT NULL COMMENT 'name' , + // `col_4` varchar(255) NOT NULL COMMENT 'txt4' , + // `col_5` varchar(255) COMMENT 'hello world' DEFAULT 'hello world' , + TABLE_OPERATIONS.alterTable( + TEST_DB_NAME, + tableName, + TableChange.updateColumnPosition( + new String[] {newColName_1}, TableChange.ColumnPosition.after(newColName_2)), + TableChange.addColumn(new String[] {"col_4"}, VARCHAR, "txt4", false), + TableChange.updateColumnComment(new String[] {newColName_2}, newCol2Comment), + TableChange.addColumn( + new String[] {"col_5"}, VARCHAR, "txt5", Literals.of("hello world", VARCHAR))); + TABLE_OPERATIONS.alterTable(TEST_DB_NAME, tableName, TableChange.updateComment(newComment)); + load = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + + columns.clear(); + + columns.add( + JdbcColumn.builder() + .withName(col_2.name()) + .withType(col_2.dataType()) + .withComment(newCol2Comment) + .withAutoIncrement(col_2.autoIncrement()) + .withDefaultValue(col_2.defaultValue()) + .withNullable(col_2.nullable()) + .build()); + columns.add(col_1); + columns.add(col_3); + columns.add( + JdbcColumn.builder() + .withName("col_4") + .withType(VARCHAR) + .withComment("txt4") + .withDefaultValue(Column.DEFAULT_VALUE_NOT_SET) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_5") + .withType(VARCHAR) + .withComment("txt5") + .withDefaultValue(Literals.of("hello world", VARCHAR)) + .withNullable(true) + .build()); + assertionsTableInfo( + tableName, newComment, columns, properties, indexes, Transforms.EMPTY_TRANSFORM, load); + + // `new_col_2` varchar(255) NOT NULL DEFAULT 'hello world' COMMENT 'xxx' , + // `col_3` varchar(255) NULL DEFAULT NULL COMMENT 'name' , + // `col_4` varchar(255) NULL COMMENT 'txt4' , + // `col_5` varchar(255) COMMENT 'hello world' DEFAULT 'hello world' , + // `new_col_1` int NOT NULL COMMENT 'id' , + TABLE_OPERATIONS.alterTable( + TEST_DB_NAME, + tableName, + TableChange.updateColumnPosition(new String[] {columns.get(1).name()}, null), + TableChange.updateColumnNullability( + new String[] {columns.get(3).name()}, !columns.get(3).nullable())); + + load = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + col_1 = columns.remove(1); + JdbcColumn col3 = columns.remove(1); + JdbcColumn col_4 = columns.remove(1); + JdbcColumn col_5 = columns.remove(1); + columns.clear(); + + columns.add( + JdbcColumn.builder() + .withName("new_col_2") + .withType(VARCHAR) + .withNullable(false) + .withComment("xxx") + .withDefaultValue(Literals.of("hello world", VARCHAR)) + .build()); + columns.add(col3); + columns.add( + JdbcColumn.builder() + .withName(col_4.name()) + .withType(col_4.dataType()) + .withNullable(!col_4.nullable()) + .withComment(col_4.comment()) + .withDefaultValue(col_4.defaultValue()) + .build()); + columns.add(col_5); + columns.add(col_1); + + assertionsTableInfo( + tableName, newComment, columns, properties, indexes, Transforms.EMPTY_TRANSFORM, load); + + TableChange updateColumn = + TableChange.updateColumnNullability(new String[] {col3.name()}, !col3.nullable()); + IllegalArgumentException exception = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> TABLE_OPERATIONS.alterTable(TEST_DB_NAME, tableName, updateColumn)); + Assertions.assertTrue( + exception.getMessage().contains("with null default value cannot be changed to not null")); + } + + @Test + public void testAlterTableUpdateColumnDefaultValue() { + String tableName = RandomNameUtils.genRandomName("properties_table_"); + String tableComment = "test_comment"; + List columns = new ArrayList<>(); + columns.add( + JdbcColumn.builder() + .withName("col_1") + .withType(Types.DecimalType.of(10, 2)) + .withComment("test_decimal") + .withNullable(false) + .withDefaultValue(Literals.decimalLiteral(Decimal.of("0.00", 10, 2))) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_2") + .withType(Types.LongType.get()) + .withNullable(false) + .withDefaultValue(Literals.longLiteral(0L)) + .withComment("long type") + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_3") + .withType(Types.TimestampType.withoutTimeZone()) + .withNullable(false) + .withComment("timestamp") + .withDefaultValue(Literals.timestampLiteral(LocalDateTime.parse("2013-01-01T00:00:00"))) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_4") + .withType(Types.VarCharType.of(255)) + .withNullable(false) + .withComment("varchar") + .withDefaultValue(Literals.of("hello", Types.VarCharType.of(255))) + .build()); + Map properties = new HashMap<>(); + + Index[] indexes = + new Index[] { + Indexes.createMysqlPrimaryKey(new String[][] {{"col_2"}}), + Indexes.unique("uk_col_4", new String[][] {{"col_4"}}) + }; + // create table + TABLE_OPERATIONS.create( + TEST_DB_NAME, + tableName, + columns.toArray(new JdbcColumn[0]), + tableComment, + properties, + null, + Distributions.NONE, + indexes); + + JdbcTable loaded = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + assertionsTableInfo( + tableName, tableComment, columns, properties, indexes, Transforms.EMPTY_TRANSFORM, loaded); + + TABLE_OPERATIONS.alterTable( + TEST_DB_NAME, + tableName, + TableChange.updateColumnDefaultValue( + new String[] {columns.get(0).name()}, + Literals.decimalLiteral(Decimal.of("1.23", 10, 2))), + TableChange.updateColumnDefaultValue( + new String[] {columns.get(1).name()}, Literals.longLiteral(1L)), + TableChange.updateColumnDefaultValue( + new String[] {columns.get(2).name()}, + Literals.timestampLiteral(LocalDateTime.parse("2024-04-01T00:00:00"))), + TableChange.updateColumnDefaultValue( + new String[] {columns.get(3).name()}, Literals.of("world", Types.VarCharType.of(255)))); + + loaded = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + Assertions.assertEquals( + Literals.decimalLiteral(Decimal.of("1.234", 10, 2)), loaded.columns()[0].defaultValue()); + Assertions.assertEquals(Literals.longLiteral(1L), loaded.columns()[1].defaultValue()); + Assertions.assertEquals( + Literals.timestampLiteral(LocalDateTime.parse("2024-04-01T00:00:00")), + loaded.columns()[2].defaultValue()); + Assertions.assertEquals( + Literals.of("world", Types.VarCharType.of(255)), loaded.columns()[3].defaultValue()); + } + + @Test + public void testCreateAndLoadTable() { + String tableName = RandomStringUtils.randomAlphabetic(16).toLowerCase() + "_cl_table"; + String tableComment = "test_comment"; + List columns = new ArrayList<>(); + columns.add( + JdbcColumn.builder() + .withName("col_1") + .withType(Types.DecimalType.of(10, 2)) + .withComment("test_decimal") + .withNullable(false) + .withDefaultValue(Literals.decimalLiteral(Decimal.of("0.00", 10, 2))) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_2") + .withType(Types.LongType.get()) + .withNullable(false) + .withDefaultValue(Literals.longLiteral(0L)) + .withComment("long type") + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_3") + .withType(Types.TimestampType.withoutTimeZone()) + .withNullable(false) + .withComment("timestamp") + .withDefaultValue(Literals.timestampLiteral(LocalDateTime.parse("2013-01-01T00:00:00"))) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_4") + .withType(Types.DateType.get()) + .withNullable(true) + .withComment("date") + .withDefaultValue(Column.DEFAULT_VALUE_NOT_SET) + .build()); + Map properties = new HashMap<>(); + + Index[] indexes = + new Index[] { + Indexes.createMysqlPrimaryKey(new String[][] {{"col_2"}}), + Indexes.unique("uk_col_4", new String[][] {{"col_4"}}) + }; + // create table + TABLE_OPERATIONS.create( + TEST_DB_NAME, + tableName, + columns.toArray(new JdbcColumn[0]), + tableComment, + properties, + null, + Distributions.NONE, + indexes); + + JdbcTable loaded = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + assertionsTableInfo( + tableName, tableComment, columns, properties, indexes, Transforms.EMPTY_TRANSFORM, loaded); + } + + @Test + public void testCreateAllTypeTable() { + String tableName = RandomNameUtils.genRandomName("type_table_"); + String tableComment = "test_comment"; + List columns = new ArrayList<>(); + columns.add( + JdbcColumn.builder() + .withName("col_1") + .withType(Types.ByteType.get()) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_2") + .withType(Types.ShortType.get()) + .withNullable(true) + .build()); + columns.add(JdbcColumn.builder().withName("col_3").withType(INT).withNullable(false).build()); + columns.add( + JdbcColumn.builder() + .withName("col_4") + .withType(Types.LongType.get()) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_5") + .withType(Types.FloatType.get()) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_6") + .withType(Types.DoubleType.get()) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_7") + .withType(Types.DateType.get()) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_8") + .withType(Types.TimeType.get()) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_9") + .withType(Types.TimestampType.withoutTimeZone()) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder().withName("col_10").withType(Types.DecimalType.of(10, 2)).build()); + columns.add( + JdbcColumn.builder().withName("col_11").withType(VARCHAR).withNullable(false).build()); + columns.add( + JdbcColumn.builder() + .withName("col_12") + .withType(Types.FixedCharType.of(10)) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_13") + .withType(Types.StringType.get()) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_14") + .withType(Types.BinaryType.get()) + .withNullable(false) + .build()); + columns.add( + JdbcColumn.builder() + .withName("col_15") + .withType(Types.FixedCharType.of(10)) + .withNullable(false) + .build()); + + // create table + TABLE_OPERATIONS.create( + TEST_DB_NAME, + tableName, + columns.toArray(new JdbcColumn[0]), + tableComment, + Collections.emptyMap(), + null, + Distributions.NONE, + Indexes.EMPTY_INDEXES); + + JdbcTable load = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + assertionsTableInfo( + tableName, + tableComment, + columns, + Collections.emptyMap(), + null, + Transforms.EMPTY_TRANSFORM, + load); + } + + @Test + public void testCreateNotSupportTypeTable() { + String tableName = RandomNameUtils.genRandomName("type_table_"); + String tableComment = "test_comment"; + List columns = new ArrayList<>(); + List notSupportType = + Arrays.asList( + Types.FixedType.of(10), + Types.IntervalDayType.get(), + Types.IntervalYearType.get(), + Types.UUIDType.get(), + Types.ListType.of(Types.DateType.get(), true), + Types.MapType.of(Types.StringType.get(), Types.IntegerType.get(), true), + Types.UnionType.of(Types.IntegerType.get()), + Types.StructType.of( + Types.StructType.Field.notNullField("col_1", Types.IntegerType.get()))); + + for (Type type : notSupportType) { + columns.clear(); + columns.add( + JdbcColumn.builder().withName("col_1").withType(type).withNullable(false).build()); + + JdbcColumn[] jdbcCols = columns.toArray(new JdbcColumn[0]); + Map emptyMap = Collections.emptyMap(); + IllegalArgumentException illegalArgumentException = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> { + TABLE_OPERATIONS.create( + TEST_DB_NAME, + tableName, + jdbcCols, + tableComment, + emptyMap, + null, + Distributions.NONE, + Indexes.EMPTY_INDEXES); + }); + System.out.println(illegalArgumentException.getMessage()); + Assertions.assertTrue( + illegalArgumentException + .getMessage() + .contains( + String.format( + "Couldn't convert Gravitino type %s to OceanBase type", + type.simpleString()))); + } + } + + @Test + public void testCreateMultipleTables() { + String test_table_1 = "test_table_1"; + TABLE_OPERATIONS.create( + TEST_DB_NAME, + test_table_1, + new JdbcColumn[] { + JdbcColumn.builder() + .withName("col_1") + .withType(Types.DecimalType.of(10, 2)) + .withComment("test_decimal") + .withNullable(false) + .withDefaultValue(Literals.decimalLiteral(Decimal.of("0.00"))) + .build() + }, + "test_comment", + null, + null, + Distributions.NONE, + Indexes.EMPTY_INDEXES); + + String testDb = "test_db_2"; + + DATABASE_OPERATIONS.create(testDb, null, null); + List tables = TABLE_OPERATIONS.listTables(testDb); + Assertions.assertFalse(tables.contains(test_table_1)); + + String test_table_2 = "test_table_2"; + TABLE_OPERATIONS.create( + testDb, + test_table_2, + new JdbcColumn[] { + JdbcColumn.builder() + .withName("col_1") + .withType(Types.DecimalType.of(10, 2)) + .withComment("test_decimal") + .withNullable(false) + .withDefaultValue(Literals.decimalLiteral(Decimal.of("0.00"))) + .build() + }, + "test_comment", + null, + null, + Distributions.NONE, + Indexes.EMPTY_INDEXES); + + tables = TABLE_OPERATIONS.listTables(TEST_DB_NAME); + Assertions.assertFalse(tables.contains(test_table_2)); + } + + @Test + public void testAutoIncrement() { + String tableName = "test_increment_table_1"; + String comment = "test_comment"; + Map properties = + new HashMap() { + { + put("AUTO_INCREMENT", "10"); + } + }; + JdbcColumn[] columns = { + JdbcColumn.builder() + .withName("col_1") + .withType(Types.LongType.get()) + .withComment("id") + .withAutoIncrement(true) + .withNullable(false) + .build(), + JdbcColumn.builder() + .withName("col_2") + .withType(Types.VarCharType.of(255)) + .withComment("city") + .withNullable(false) + .build(), + JdbcColumn.builder() + .withName("col_3") + .withType(Types.VarCharType.of(255)) + .withComment("name") + .withNullable(false) + .build() + }; + // Test create increment key for unique index. + Index[] indexes = + new Index[] { + Indexes.createMysqlPrimaryKey(new String[][] {{"col_2"}}), + Indexes.unique("uk_1", new String[][] {{"col_1"}}) + }; + TABLE_OPERATIONS.create( + TEST_DB_NAME, tableName, columns, comment, properties, null, Distributions.NONE, indexes); + + JdbcTable table = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + assertionsTableInfo( + tableName, + comment, + Arrays.stream(columns).collect(Collectors.toList()), + properties, + indexes, + Transforms.EMPTY_TRANSFORM, + table); + TABLE_OPERATIONS.drop(TEST_DB_NAME, tableName); + + // Test create increment key for primary index. + indexes = + new Index[] { + Indexes.createMysqlPrimaryKey(new String[][] {{"col_1"}}), + Indexes.unique("uk_2", new String[][] {{"col_2"}}) + }; + TABLE_OPERATIONS.create( + TEST_DB_NAME, tableName, columns, comment, properties, null, Distributions.NONE, indexes); + + table = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + assertionsTableInfo( + tableName, + comment, + Arrays.stream(columns).collect(Collectors.toList()), + properties, + indexes, + Transforms.EMPTY_TRANSFORM, + table); + TABLE_OPERATIONS.drop(TEST_DB_NAME, tableName); + + // Test create increment key for col_1 + col_3 uk. + indexes = new Index[] {Indexes.unique("uk_2_3", new String[][] {{"col_1"}, {"col_3"}})}; + TABLE_OPERATIONS.create( + TEST_DB_NAME, tableName, columns, comment, properties, null, Distributions.NONE, indexes); + + table = TABLE_OPERATIONS.load(TEST_DB_NAME, tableName); + assertionsTableInfo( + tableName, + comment, + Arrays.stream(columns).collect(Collectors.toList()), + properties, + indexes, + Transforms.EMPTY_TRANSFORM, + table); + TABLE_OPERATIONS.drop(TEST_DB_NAME, tableName); + + // Test create auto increment fail + IllegalArgumentException exception = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + TABLE_OPERATIONS.create( + TEST_DB_NAME, + tableName, + columns, + comment, + properties, + null, + Distributions.NONE, + Indexes.EMPTY_INDEXES)); + Assertions.assertTrue( + StringUtils.contains( + exception.getMessage(), + "Incorrect table definition; there can be only one auto column and it must be defined as a key")); + + // Test create many auto increment col + JdbcColumn[] newColumns = { + columns[0], + columns[1], + columns[2], + JdbcColumn.builder() + .withName("col_4") + .withType(Types.IntegerType.get()) + .withComment("test_id") + .withAutoIncrement(true) + .withNullable(false) + .build() + }; + + final Index[] primaryIndex = + new Index[] {Indexes.createMysqlPrimaryKey(new String[][] {{"col_1"}, {"col_4"}})}; + exception = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + TABLE_OPERATIONS.create( + TEST_DB_NAME, + tableName, + newColumns, + comment, + properties, + null, + Distributions.NONE, + primaryIndex)); + Assertions.assertTrue( + StringUtils.contains( + exception.getMessage(), + "Only one column can be auto-incremented. There are multiple auto-increment columns in your table: [col_1,col_4]")); + } + + @Test + public void testAppendIndexesBuilder() { + Index[] indexes = + new Index[] { + Indexes.createMysqlPrimaryKey(new String[][] {{"col_2"}, {"col_1"}}), + Indexes.unique("uk_col_4", new String[][] {{"col_4"}}), + Indexes.unique("uk_col_5", new String[][] {{"col_4"}, {"col_5"}}), + Indexes.unique("uk_col_6", new String[][] {{"col_4"}, {"col_5"}, {"col_6"}}) + }; + StringBuilder sql = new StringBuilder(); + OceanBaseTableOperations.appendIndexesSql(indexes, sql); + String expectedStr = + ",\n" + + "CONSTRAINT PRIMARY KEY (`col_2`, `col_1`),\n" + + "CONSTRAINT `uk_col_4` UNIQUE (`col_4`),\n" + + "CONSTRAINT `uk_col_5` UNIQUE (`col_4`, `col_5`),\n" + + "CONSTRAINT `uk_col_6` UNIQUE (`col_4`, `col_5`, `col_6`)"; + Assertions.assertEquals(expectedStr, sql.toString()); + + indexes = + new Index[] { + Indexes.unique("uk_1", new String[][] {{"col_4"}}), + Indexes.unique("uk_2", new String[][] {{"col_4"}, {"col_3"}}), + Indexes.createMysqlPrimaryKey(new String[][] {{"col_2"}, {"col_1"}, {"col_3"}}), + Indexes.unique("uk_3", new String[][] {{"col_4"}, {"col_5"}, {"col_6"}, {"col_7"}}) + }; + sql = new StringBuilder(); + OceanBaseTableOperations.appendIndexesSql(indexes, sql); + expectedStr = + ",\n" + + "CONSTRAINT `uk_1` UNIQUE (`col_4`),\n" + + "CONSTRAINT `uk_2` UNIQUE (`col_4`, `col_3`),\n" + + "CONSTRAINT PRIMARY KEY (`col_2`, `col_1`, `col_3`),\n" + + "CONSTRAINT `uk_3` UNIQUE (`col_4`, `col_5`, `col_6`, `col_7`)"; + Assertions.assertEquals(expectedStr, sql.toString()); + } + + @Test + public void testOperationIndexDefinition() { + TableChange.AddIndex failIndex = + new TableChange.AddIndex(Index.IndexType.PRIMARY_KEY, "pk_1", new String[][] {{"col_1"}}); + IllegalArgumentException illegalArgumentException = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> OceanBaseTableOperations.addIndexDefinition(failIndex)); + Assertions.assertTrue( + illegalArgumentException + .getMessage() + .contains("Primary key name must be PRIMARY in OceanBase")); + + TableChange.AddIndex successIndex = + new TableChange.AddIndex( + Index.IndexType.UNIQUE_KEY, "uk_1", new String[][] {{"col_1"}, {"col_2"}}); + String sql = OceanBaseTableOperations.addIndexDefinition(successIndex); + Assertions.assertEquals("ADD UNIQUE INDEX `uk_1` (`col_1`, `col_2`)", sql); + + successIndex = + new TableChange.AddIndex( + Index.IndexType.PRIMARY_KEY, + Indexes.DEFAULT_MYSQL_PRIMARY_KEY_NAME, + new String[][] {{"col_1"}, {"col_2"}}); + sql = OceanBaseTableOperations.addIndexDefinition(successIndex); + Assertions.assertEquals("ADD PRIMARY KEY (`col_1`, `col_2`)", sql); + + TableChange.DeleteIndex deleteIndex = new TableChange.DeleteIndex("uk_1", false); + sql = OceanBaseTableOperations.deleteIndexDefinition(null, deleteIndex); + Assertions.assertEquals("DROP INDEX `uk_1`", sql); + } +} diff --git a/catalogs/catalog-jdbc-postgresql/src/main/java/org/apache/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java b/catalogs/catalog-jdbc-postgresql/src/main/java/org/apache/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java index 775687abd47..13a0ff3be0a 100644 --- a/catalogs/catalog-jdbc-postgresql/src/main/java/org/apache/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java +++ b/catalogs/catalog-jdbc-postgresql/src/main/java/org/apache/gravitino/catalog/postgresql/operation/PostgreSqlTableOperations.java @@ -22,6 +22,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.ResultSet; @@ -46,6 +47,7 @@ import org.apache.gravitino.catalog.jdbc.converter.JdbcTypeConverter; import org.apache.gravitino.catalog.jdbc.operation.JdbcTableOperations; import org.apache.gravitino.exceptions.NoSuchColumnException; +import org.apache.gravitino.exceptions.NoSuchSchemaException; import org.apache.gravitino.exceptions.NoSuchTableException; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.TableChange; @@ -86,6 +88,24 @@ public void initialize( "The `jdbc-database` configuration item is mandatory in PostgreSQL."); } + @Override + public List listTables(String databaseName) throws NoSuchSchemaException { + try (Connection connection = getConnection(databaseName)) { + final List names = Lists.newArrayList(); + try (ResultSet tables = getTables(connection)) { + while (tables.next()) { + if (Objects.equals(tables.getString("TABLE_SCHEM"), databaseName)) { + names.add(tables.getString("TABLE_NAME")); + } + } + } + LOG.info("Finished listing tables size {} for database name {} ", names.size(), databaseName); + return names; + } catch (final SQLException se) { + throw this.exceptionMapper.toGravitinoException(se); + } + } + @Override protected JdbcTable.Builder getTableBuilder( ResultSet tablesResult, String databaseName, String tableName) throws SQLException { @@ -226,7 +246,7 @@ static void appendIndexesSql(Index[] indexes, StringBuilder sqlBuilder) { } } - private static String getIndexFieldStr(String[][] fieldNames) { + protected static String getIndexFieldStr(String[][] fieldNames) { return Arrays.stream(fieldNames) .map( colNames -> { @@ -593,15 +613,6 @@ private String renameColumnFieldDefinition( + ";"; } - @Override - public JdbcTable getOrCreateTable( - String databaseName, String tableName, JdbcTable lazyLoadTable) { - if (null == lazyLoadTable) { - return load(databaseName, tableName); - } - return lazyLoadTable; - } - private List addColumnFieldDefinition( TableChange.AddColumn addColumn, JdbcTable lazyLoadTable) { if (addColumn.fieldName().length > 1) { From d9c2be255919bae97dc08fd70ef2aa007bac8538 Mon Sep 17 00:00:00 2001 From: mchades Date: Fri, 25 Oct 2024 15:31:22 +0800 Subject: [PATCH 015/123] [#5217] docs(metalake-catalog): refine docs for deleting metalake and catalog (#5218) ### What changes were proposed in this pull request? refine docs for deleting metalake and catalog ### Why are the changes needed? Fix: #5217 ### Does this PR introduce _any_ user-facing change? yes, user docs changed ### How was this patch tested? setup the gravitino-site locally --- ...manage-fileset-metadata-using-gravitino.md | 2 +- ...nage-messaging-metadata-using-gravitino.md | 4 +- docs/manage-metalake-using-gravitino.md | 111 ++++++++++- ...age-relational-metadata-using-gravitino.md | 179 +++++++++++++++++- docs/open-api/catalogs.yaml | 48 +++++ docs/open-api/metalakes.yaml | 46 +++++ docs/open-api/openapi.yaml | 28 +++ 7 files changed, 398 insertions(+), 20 deletions(-) diff --git a/docs/manage-fileset-metadata-using-gravitino.md b/docs/manage-fileset-metadata-using-gravitino.md index 22b12f5b1a7..de478efb731 100644 --- a/docs/manage-fileset-metadata-using-gravitino.md +++ b/docs/manage-fileset-metadata-using-gravitino.md @@ -25,7 +25,7 @@ control mechanism without needing to set access controls across different storag To use fileset, please make sure that: - Gravitino server has started, and the host and port is [http://localhost:8090](http://localhost:8090). - - A metalake has been created. + - A metalake has been created and [enabled](./manage-metalake-using-gravitino.md#enable-a-metalake) ## Catalog operations diff --git a/docs/manage-messaging-metadata-using-gravitino.md b/docs/manage-messaging-metadata-using-gravitino.md index 9fc5d6adb70..953b9989cea 100644 --- a/docs/manage-messaging-metadata-using-gravitino.md +++ b/docs/manage-messaging-metadata-using-gravitino.md @@ -16,7 +16,7 @@ Through Gravitino, you can create, update, delete, and list topics via unified R To use messaging catalog, please make sure that: - Gravitino server has started, and the host and port is [http://localhost:8090](http://localhost:8090). - - A metalake has been created. + - A metalake has been created and [enabled](./manage-metalake-using-gravitino.md#enable-a-metalake). ## Catalog operations @@ -138,7 +138,7 @@ in relational catalog for more details. For a messaging catalog, the list operat ## Topic operations :::tip -Users should create a metalake, a catalog and a schema before creating a table. +Users should create a metalake, a catalog and a schema, then ensure that the metalake and catalog are enabled before operating topics. ::: ### Create a topic diff --git a/docs/manage-metalake-using-gravitino.md b/docs/manage-metalake-using-gravitino.md index 0aa464fe9c8..1e15cd7318f 100644 --- a/docs/manage-metalake-using-gravitino.md +++ b/docs/manage-metalake-using-gravitino.md @@ -151,9 +151,108 @@ The following table outlines the supported modifications that you can make to a | Set property | `{"@type":"setProperty","property":"key1","value":"value1"}` | `MetalakeChange.setProperty("key1", "value1")` | `MetalakeChange.set_property("key1", "value1")` | | Remove property | `{"@type":"removeProperty","property":"key1"}` | `MetalakeChange.removeProperty("key1")` | `MetalakeChange.remove_property("key1")` | +## Enable a metalake + +Metalake has a reserved property - `in-use`, which indicates whether the metalake is available for use. By default, the `in-use` property is set to `true`. +To enable a disabled metalake, you can send a `PATCH` request to the `/api/metalakes/{metalake_name}` endpoint or use the Gravitino Admin client. + +The following is an example of enabling a metalake: + + + + +```shell +curl -X PATCH -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" -d '{"in-use": true}' \ +http://localhost:8090/api/metalakes/metalake +``` + + + + +```java +GravitinoAdminClient gravitinoAdminClient = GravitinoAdminClient + .builder("http://localhost:8090") + .build(); + +gravitinoAdminClient.enableMetalake("metalake"); + // ... +``` + + + + +```python +gravitino_admin_client: GravitinoAdminClient = GravitinoAdminClient(uri="http://localhost:8090") +gravitino_admin_client.enable_metalake("metalake") +``` + + + + +:::info +This operation does nothing if the metalake is already enabled. +::: + +## Disable a metalake + +Once a metalake is disabled: + - Users can only [list](#list-all-metalakes), [load](#load-a-metalake), [drop](#drop-a-metalake), or [enable](#enable-a-metalake) it. + - Any other operation on the metalake or its sub-entities will result in an error. + +To disable a metalake, you can send a `PATCH` request to the `/api/metalakes/{metalake_name}` endpoint or use the Gravitino Admin client. + +The following is an example of disabling a metalake: + + + + +```shell +curl -X PATCH -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" -d '{"in-use": false}' \ +http://localhost:8090/api/metalakes/metalake +``` + + + + +```java +GravitinoAdminClient gravitinoAdminClient = GravitinoAdminClient + .builder("http://localhost:8090") + .build(); + +gravitinoAdminClient.disableMetalake("metalake"); + // ... +``` + + + + +```python +gravitino_admin_client: GravitinoAdminClient = GravitinoAdminClient(uri="http://localhost:8090") +gravitino_admin_client.disable_metalake("metalake") +``` + + + + +:::info +This operation does nothing if the metalake is already disabled. +::: ## Drop a metalake +Deleting a metalake by "force" is not a default behavior, so please make sure: + +- There are no catalogs under the metalake. Otherwise, you will get an error. +- The metalake is [disabled](#disable-a-metalake). Otherwise, you will get an error. + +Deleting a metalake by "force" will: + +- Delete all sub-entities (tags, catalogs, schemas, etc.) under the metalake. +- Delete the metalake itself even if it is enabled. +- Not delete the external resources (such as database, table, etc.) associated with sub-entities unless they are managed (such as managed fileset). + To drop a metalake, you can send a `DELETE` request to the `/api/metalakes/{metalake_name}` endpoint or use the Gravitino Admin client. The following is an example of dropping a metalake: @@ -163,7 +262,7 @@ The following is an example of dropping a metalake: ```shell curl -X DELETE -H "Accept: application/vnd.gravitino.v1+json" \ --H "Content-Type: application/json" http://localhost:8090/api/metalakes/metalake +-H "Content-Type: application/json" http://localhost:8090/api/metalakes/metalake?force=false ``` @@ -171,7 +270,8 @@ curl -X DELETE -H "Accept: application/vnd.gravitino.v1+json" \ ```java // ... -boolean success = gravitinoAdminClient.dropMetalake("metalake"); +// force can be true or false +boolean success = gravitinoAdminClient.dropMetalake("metalake", false); // ... ``` @@ -179,17 +279,12 @@ boolean success = gravitinoAdminClient.dropMetalake("metalake"); ```python -gravitino_admin_client.drop_metalake("metalake") +gravitino_admin_client.drop_metalake("metalake", force=True) ``` -:::note -Dropping a metalake in cascade mode is not allowed. That is, all the -catalogs, schemas, and tables under a metalake must be removed before you can drop the metalake. -::: - ## List all metalakes To view all your metalakes, you can send a `GET` request to the `/api/metalakes` endpoint or use the Gravitino Admin client. diff --git a/docs/manage-relational-metadata-using-gravitino.md b/docs/manage-relational-metadata-using-gravitino.md index f810b4aa325..c4fffaea8db 100644 --- a/docs/manage-relational-metadata-using-gravitino.md +++ b/docs/manage-relational-metadata-using-gravitino.md @@ -29,7 +29,7 @@ For more details, please refer to the related doc. Assuming: - Gravitino has just started, and the host and port is [http://localhost:8090](http://localhost:8090). - - Metalake has been created. + - A metalake has been created and [enabled](./manage-metalake-using-gravitino.md#enable-a-metalake). ## Catalog operations @@ -84,6 +84,19 @@ Catalog catalog = gravitinoClient.createCatalog("catalog", // ... ``` + + + +```python +# Assuming you have just created a metalake named `metalake` +gravitino_client = GravitinoClient(uri="http://localhost:8090", metalake_name="metalake") +gravitino_client.create_catalog(name="catalog", + catalog_type=CatalogType.RELATIONAL, + provider="hive", + comment="This is a hive catalog", + properties={"metastore.uris": "thrift://localhost:9083"}) +``` + @@ -121,6 +134,15 @@ Catalog catalog = gravitinoClient.loadCatalog("catalog"); // ... ``` + + + +```python +# ... +# Assuming you have created a metalake named `metalake` and a catalog named `catalog` +catalog = gravitino_client.load_catalog("catalog") +``` + @@ -159,6 +181,17 @@ Catalog catalog = gravitinoClient.alterCatalog("catalog", // ... ``` + + + +```python +# ... +# Assuming you have created a metalake named `metalake` and a catalog named `catalog` +changes = (CatalogChange.update_comment("new comment")) +catalog = gravitino_client.alterCatalog("catalog", *changes) +# ... +``` + @@ -180,8 +213,108 @@ Therefore, do not change the catalog's URI unless you fully understand the conse ::: +### Enable a catalog + +Catalog has a reserved property - `in-use`, which indicates whether the catalog is available for use. By default, the `in-use` property is set to `true`. +To enable a disabled catalog, you can send a `PATCH` request to the `/api/metalakes/{metalake_name}/catalogs/{catalog_name}` endpoint or use the Gravitino Java client. + +The following is an example of enabling a catalog: + + + + +```shell +curl -X PATCH -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" -d '{"in-use": true}' \ +http://localhost:8090/api/metalakes/metalake/catalogs/catalog +``` + + + + +```java +// ... +// Assuming you have created a metalake named `metalake` and a catalog named `catalog` +gravitinoClient.enableCatalog("catalog"); +// ... +``` + + + + +```python +# ... +# Assuming you have created a metalake named `metalake` and a catalog named `catalog` +gravitino_client.enable_catalog("catalog") +# ... +``` + + + + +:::info +This operation does nothing if the catalog is already enabled. +::: + +### Disable a catalog + +Once a catalog is disabled: +- Users can only [list](#list-all-catalogs-in-a-metalake), [load](#load-a-catalog), [drop](#drop-a-catalog), or [enable](#enable-a-catalog) it. +- Any other operation on the catalog or its sub-entities will result in an error. + +To disable a catalog, you can send a `PATCH` request to the `/api/metalakes/{metalake_name}/catalogs/{catalog_name}` endpoint or use the Gravitino Java client. + +The following is an example of disabling a catalog: + + + + +```shell +curl -X PATCH -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" -d '{"in-use": false}' \ +http://localhost:8090/api/metalakes/metalake/catalogs/catalog +``` + + + + +```java +// ... +// Assuming you have created a metalake named `metalake` and a catalog named `catalog` +gravitinoClient.disableCatalog("catalog"); +// ... +``` + + + + +```python +# ... +# Assuming you have created a metalake named `metalake` and a catalog named `catalog` +gravitino_client.disable_catalog("catalog") +# ... +``` + + + + +:::info +This operation does nothing if the catalog is already disabled. +::: + ### Drop a catalog +Deleting a catalog by "force" is not a default behavior, so please make sure: + +- There are no schemas under the catalog. Otherwise, you will get an error. +- The catalog is [disabled](#disable-a-catalog). Otherwise, you will get an error. + +Deleting a catalog by "force" will: + +- Delete all sub-entities (schemas, tables, etc.) under the catalog. +- Delete the catalog itself even if it is enabled. +- Not delete the external resources (such as database, table, etc.) associated with sub-entities unless they are managed (such as managed fileset). + You can remove a catalog by sending a `DELETE` request to the `/api/metalakes/{metalake_name}/catalogs/{catalog_name}` endpoint or just use the Gravitino Java client. The following is an example of dropping a catalog: @@ -190,7 +323,7 @@ You can remove a catalog by sending a `DELETE` request to the `/api/metalakes/{m ```shell curl -X DELETE -H "Accept: application/vnd.gravitino.v1+json" \ -H "Content-Type: application/json" \ -http://localhost:8090/api/metalakes/metalake/catalogs/catalog +http://localhost:8090/api/metalakes/metalake/catalogs/catalog?force=false ``` @@ -199,17 +332,25 @@ http://localhost:8090/api/metalakes/metalake/catalogs/catalog ```java // ... // Assuming you have created a metalake named `metalake` and a catalog named `catalog` -gravitinoClient.dropCatalog("catalog"); +// force can be true or false +gravitinoClient.dropCatalog("catalog", false); // ... ``` - + -:::note -Dropping a catalog only removes metadata about the catalog, schemas, and tables under the catalog in Gravitino, It doesn't remove the real data (table and schema) in Apache Hive. -::: +```python +# ... +# Assuming you have created a metalake named `metalake` and a catalog named `catalog` +# force can be true or false +gravitino_client.drop_catalog(name="catalog", force=False) +# ... +``` + + + ### List all catalogs in a metalake @@ -235,6 +376,16 @@ String[] catalogNames = gravitinoClient.listCatalogs(); // ... ``` + + + +```python +# ... +# Assuming you have created a metalake named `metalake` and a catalog named `catalog` +catalog_names = gravitino_client.list_catalogs() +# ... +``` + @@ -261,6 +412,16 @@ Catalog[] catalogsInfos = gravitinoMetaLake.listCatalogsInfo(); // ... ``` + + + +```python +# ... +# Assuming you have created a metalake named `metalake` and a catalog named `catalog` +catalogs_info = gravitino_client.list_catalogs_info() +# ... +``` + @@ -268,7 +429,7 @@ Catalog[] catalogsInfos = gravitinoMetaLake.listCatalogsInfo(); ## Schema operations :::tip -Users should create a metalake and a catalog before creating a schema. +Users should create a metalake and a catalog, then ensure that the metalake and catalog are enabled before operating schemas. ::: ### Create a schema @@ -518,7 +679,7 @@ schema_list: List[NameIdentifier] = catalog.as_schemas().list_schemas() ## Table operations :::tip -Users should create a metalake, a catalog and a schema before creating a table. +Users should create a metalake, a catalog and a schema, then ensure that the metalake and catalog are enabled before before operating tables. ::: ### Create a table diff --git a/docs/open-api/catalogs.yaml b/docs/open-api/catalogs.yaml index 828b74d2d69..8e48a7a3938 100644 --- a/docs/open-api/catalogs.yaml +++ b/docs/open-api/catalogs.yaml @@ -201,11 +201,47 @@ paths: "5xx": $ref: "./openapi.yaml#/components/responses/ServerErrorResponse" + patch: + tags: + - catalog + summary: set catalog in-use + operationId: setCatalog + description: Set a specific catalog in-use or not in-use + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CatalogSetRequest" + responses: + "200": + $ref: "./openapi.yaml#/components/responses/BaseResponse" + "404": + description: Not Found - The metalake does not exist + content: + application/vnd.gravitino.v1+json: + schema: + $ref: "./openapi.yaml#/components/schemas/ErrorModel" + examples: + NoSuchMetalakeException: + $ref: "#/components/examples/NoSuchCatalogException" + "500": + description: + Internal server error. It is possible that the server + encountered a storage issue. + content: + application/vnd.gravitino.v1+json: + schema: + $ref: "./openapi.yaml#/components/schemas/ErrorModel" + + + delete: tags: - catalog summary: Drop catalog operationId: dropCatalog + parameters: + - $ref: "./openapi.yaml#/components/parameters/force" responses: "200": $ref: "./openapi.yaml#/components/responses/DropResponse" @@ -339,6 +375,18 @@ components: additionalProperties: type: string + CatalogSetRequest: + type: object + required: + - inUse + properties: + inUse: + type: boolean + description: The in-use status of the catalog to set + example: { + "inUse": true + } + CatalogUpdatesRequest: type: object required: diff --git a/docs/open-api/metalakes.yaml b/docs/open-api/metalakes.yaml index 78739a453f2..80e40e184a2 100644 --- a/docs/open-api/metalakes.yaml +++ b/docs/open-api/metalakes.yaml @@ -135,12 +135,46 @@ paths: schema: $ref: "./openapi.yaml#/components/schemas/ErrorModel" + patch: + tags: + - metalake + summary: set metalake in-use + operationId: setMetalake + description: Set a specified metalake in-use or not in-use + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/MetalakeSetRequest" + responses: + "200": + $ref: "./openapi.yaml#/components/responses/BaseResponse" + "404": + description: Not Found - The metalake does not exist + content: + application/vnd.gravitino.v1+json: + schema: + $ref: "./openapi.yaml#/components/schemas/ErrorModel" + examples: + NoSuchMetalakeException: + $ref: "#/components/examples/NoSuchMetalakeException" + "500": + description: + Internal server error. It is possible that the server + encountered a storage issue. + content: + application/vnd.gravitino.v1+json: + schema: + $ref: "./openapi.yaml#/components/schemas/ErrorModel" + delete: tags: - metalake summary: Drop metalake operationId: dropMetalake description: Drops a specified metalake + parameters: + - $ref: "./openapi.yaml#/components/parameters/force" responses: "200": $ref: "./openapi.yaml#/components/responses/DropResponse" @@ -173,6 +207,18 @@ components: additionalProperties: type: string + MetalakeSetRequest: + type: object + required: + - inUse + properties: + inUse: + type: boolean + description: The in-use status of the metalake to set + example: { + "inUse": true + } + MetalakeUpdatesRequest: type: object required: diff --git a/docs/open-api/openapi.yaml b/docs/open-api/openapi.yaml index 24bc0f2ce00..69d3f4cbb9f 100644 --- a/docs/open-api/openapi.yaml +++ b/docs/open-api/openapi.yaml @@ -362,6 +362,25 @@ components: type: boolean description: Whether the set operation was successful + BaseResponse: + description: Represents a response for a base operation + content: + application/vnd.gravitino.v1+json: + schema: + type: object + required: + - code + properties: + code: + type: integer + format: int32 + description: Status code of the response + enum: + - 0 + example: { + "code": 0 + } + parameters: metalake: name: metalake @@ -467,6 +486,15 @@ components: schema: type: string + force: + name: force + in: query + description: Force the operation to be executed + required: false + schema: + type: boolean + default: false + securitySchemes: OAuth2WithJWT: From 65950f3d244d68b865e723bd780bdc4912108ec0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 15:47:26 +0800 Subject: [PATCH 016/123] build(deps): bump micromatch from 4.0.7 to 4.0.8 in /web/web (#5259) Bumps [micromatch](https://github.com/micromatch/micromatch) from 4.0.7 to 4.0.8.
Release notes

Sourced from micromatch's releases.

4.0.8

Ultimate release that fixes both CVE-2024-4067 and CVE-2024-4068. We consider the issues low-priority, so even if you see automated scanners saying otherwise, don't be scared.

Changelog

Sourced from micromatch's changelog.

[4.0.8] - 2024-08-22

  • backported CVE-2024-4067 fix (from v4.0.6) over to 4.x branch
Commits
  • 8bd704e 4.0.8
  • a0e6841 run verb to generate README documentation
  • 4ec2884 Merge branch 'v4' into hauserkristof-feature/v4.0.8
  • 03aa805 Merge pull request #266 from hauserkristof/feature/v4.0.8
  • 814f5f7 lint
  • 67fcce6 fix: CHANGELOG about braces & CVE-2024-4068, v4.0.5
  • 113f2e3 fix: CVE numbers in CHANGELOG
  • d9dbd9a feat: updated CHANGELOG
  • 2ab1315 fix: use actions/setup-node@v4
  • 1406ea3 feat: rework test to work on macos with node 10,12 and 14
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=micromatch&package-manager=npm_and_yarn&previous-version=4.0.7&new-version=4.0.8)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/apache/gravitino/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/web/pnpm-lock.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/web/web/pnpm-lock.yaml b/web/web/pnpm-lock.yaml index d085a9181cc..e5c77f19178 100644 --- a/web/web/pnpm-lock.yaml +++ b/web/web/pnpm-lock.yaml @@ -1994,8 +1994,8 @@ packages: resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} engines: {node: '>= 8'} - micromatch@4.0.7: - resolution: {integrity: sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==} + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} engines: {node: '>=8.6'} mime-db@1.52.0: @@ -4782,7 +4782,7 @@ snapshots: '@nodelib/fs.walk': 1.2.8 glob-parent: 5.1.2 merge2: 1.4.1 - micromatch: 4.0.7 + micromatch: 4.0.8 fast-json-stable-stringify@2.1.0: {} @@ -5241,7 +5241,7 @@ snapshots: merge2@1.4.1: {} - micromatch@4.0.7: + micromatch@4.0.8: dependencies: braces: 3.0.3 picomatch: 2.3.1 @@ -6247,7 +6247,7 @@ snapshots: is-glob: 4.0.3 jiti: 1.21.6 lilconfig: 2.1.0 - micromatch: 4.0.7 + micromatch: 4.0.8 normalize-path: 3.0.0 object-hash: 3.0.0 picocolors: 1.0.1 From be305faf58d0d17a45b9667a295fbacb7e4e3ff2 Mon Sep 17 00:00:00 2001 From: mchades Date: Fri, 25 Oct 2024 17:15:05 +0800 Subject: [PATCH 017/123] [#4913] fix(API): fix call bucketTransform arguments failed (#5268) ### What changes were proposed in this pull request? fix call bucketTransform arguments failed ### Why are the changes needed? Fix: #4913 ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? tests added --- .../rel/expressions/transforms/Transforms.java | 2 +- .../org/apache/gravitino/rel/TestTransforms.java | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/api/src/main/java/org/apache/gravitino/rel/expressions/transforms/Transforms.java b/api/src/main/java/org/apache/gravitino/rel/expressions/transforms/Transforms.java index 73cb33b553b..035b3305955 100644 --- a/api/src/main/java/org/apache/gravitino/rel/expressions/transforms/Transforms.java +++ b/api/src/main/java/org/apache/gravitino/rel/expressions/transforms/Transforms.java @@ -447,7 +447,7 @@ public String name() { /** @return The arguments to the transform. */ @Override public Expression[] arguments() { - return ObjectArrays.concat(numBuckets, fields); + return ObjectArrays.concat(new Expression[] {numBuckets}, fields, Expression.class); } @Override diff --git a/api/src/test/java/org/apache/gravitino/rel/TestTransforms.java b/api/src/test/java/org/apache/gravitino/rel/TestTransforms.java index 615e2e39004..6404ac555a6 100644 --- a/api/src/test/java/org/apache/gravitino/rel/TestTransforms.java +++ b/api/src/test/java/org/apache/gravitino/rel/TestTransforms.java @@ -35,11 +35,25 @@ import org.apache.gravitino.rel.expressions.NamedReference; import org.apache.gravitino.rel.expressions.literals.Literals; import org.apache.gravitino.rel.expressions.transforms.Transform; +import org.apache.gravitino.rel.expressions.transforms.Transforms; import org.apache.gravitino.rel.types.Types; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; public class TestTransforms { + @Test + public void testBucketTransform() { + Column column = Column.of("col_1", Types.ByteType.get()); + String[] fieldName = new String[] {column.name()}; + + Transform bucket = Transforms.bucket(10, fieldName); + Expression[] arguments = bucket.arguments(); + Assertions.assertEquals(2, arguments.length); + Assertions.assertInstanceOf(Literals.LiteralImpl.class, arguments[0]); + Assertions.assertEquals(10, ((Literals.LiteralImpl) arguments[0]).value()); + Assertions.assertEquals(fieldName, ((NamedReference.FieldReference) arguments[1]).fieldName()); + } + @Test public void testSingleFieldTransform() { Column column = Column.of("col_1", Types.ByteType.get()); From e751df97d1e50a9400019db30c0c62bcae880b92 Mon Sep 17 00:00:00 2001 From: Justin Mclean Date: Mon, 28 Oct 2024 10:46:23 +1100 Subject: [PATCH 018/123] fix README --- clients/cli/docs/README.md | 128 +------------------------------------ 1 file changed, 2 insertions(+), 126 deletions(-) diff --git a/clients/cli/docs/README.md b/clients/cli/docs/README.md index 1f3224d9b77..2864757f563 100644 --- a/clients/cli/docs/README.md +++ b/clients/cli/docs/README.md @@ -23,28 +23,18 @@ Apache Gravitino CLI is a command-line tool that interacts with the Gravitino se ## Table of Contents -- [Features](#features) - [Installation](#installation) -- [Usage](#usage) -- [Commands](#commands) - [Running Tests](#running-tests) - [Contributing](#contributing) - [License](#license) -## Features - -- Retrieve server version -- Provide help on usage -- Manage Gravitino entities such as Metalakes, Catalogs, Schemas, and Tables -- List details about Graviotino entities - ## Installation ### Prerequisites Before you can build and run this project, it is suggested you have the following installed: -- Java 11 or higher +- Java 8 or higher ### Build the Project @@ -62,127 +52,13 @@ Before you can build and run this project, it is suggested you have the followin 3. Create an alias: ```bash - alias gcli='java -jar clients/cli/build/libs/gravitino-cli-0.7.0-incubating-SNAPSHOT.jar' + alias gcli='java -jar clients/cli/build/libs/gravitino-cli-*-incubating-SNAPSHOT.jar' ``` 3. Test the command: ```bash gcli --help ``` -## Usage - -To run the Gravitino CLI, use the following command structure: - -```bash -usage: gcli [metalake|catalog|schema|table] [list|details|create|delete|update|set|remove|properties] [options] - -b,--bootstrap Kafka bootstrap servers - -C,--create create an entity - -c,--comment entity comment - -D,--details list details about an entity - -d,--database database name - -e,--entity entity type - -h,--help command help information - -j,--jdbcurl JDBC URL - -L,--list list entity children - -l,--user database username - -m,--metastore Hive metastore URI - -n,--name full entity name (dot separated) - -P,--properties show an entities properties - -p,--password database password - -R,--delete delete an entity - -r,--rename new entity name - -s,--schema schema name - -t,--table table name - -u,--user database username - -U,--update update an entity - -v,--value property value - -w,--warehouse warehouse name - -x,--command one of: list, details, create, delete, or update -``` - -The command line can be used in several ways to achieve the same results, depending on your preference. -```bash -gcli catalog details --name metalake_demo.catalog_postgres -gcli catalog --command details -name metalake_demo.catalog_postgres -gcli --entity catalog --command details -name metalake_demo.catalog_postgres -gcli catalog details --metalake metalake_demo --catalog catalog_postgres -gcli details --metalake metalake_demo --catalog catalog_postgres -gcli --metalake metalake_demo --catalog catalog_postgres -gcli --command details --metalake metalake_demo --catalog catalog_postgres -``` -The form `gcli [options]` is used in this document. - -## Commands -The following commands are available for entity management: - -list: List available entities -details: Show detailed information about an entity -create: Create a new entity -delete: Delete an existing entity -update: Update an existing entity -set: Used to set properties and tags -remove: Used to remove properties and tags -properties: Used to list properties - -### Examples -List All Metalakes - -```bash -gcli list -``` - -Get Details of a Specific Metalake - -```bash -gcli metalake details -name my-metalake -``` - -List Tables in a Catalog - -```bash -gcli metalake list -name my_metalake.my_catalog -``` - -Create a Metalake - -```bash -gcli metalake create -name my_metalake -comment "This is my metalake" -``` - -Create a Catalog - -```bash -gcli catalog create -name metalake_demo.iceberg --provider iceberg --metastore thrift://hive-host:9083 --warehouse hdfs://hdfs-host:9000/user/iceberg/warehouse -``` - -Delete a Catalog - -```bash -gcli catalog delete -name my_metalake.my_catalog -``` - -Rename a Metalake - -```bash -gcli metalake update -name metalake_demo -rename demo -``` - -Update a Metalake's comment - -```bash -gcli metalake update -name metalake_demo -comment "new comment" -``` - -### Setting Metalake name - -As dealing with one Metalake is a typical scenario, you can set the Metalake name in several ways. - -1. Passed in on the command line either as the first part of the entities name or via the `--metalake` parameter. -2. Set via the 'GRAVITINO_METALAKE' environment variable. -3. Placed in the Gravitino configuration file `~/.gravitino` by adding a line like `metalake=metalake_demo`. - -The command line option overrides the other options and the environment variable overrides the value in the configuration file. - ## Running Tests This project includes a suite of unit tests to verify its functionality. From aa0e958500ef75adb2a76c5524c6440ba66799b9 Mon Sep 17 00:00:00 2001 From: mchades Date: Mon, 28 Oct 2024 10:15:05 +0800 Subject: [PATCH 019/123] [MINOR] improvement(release): update docs version when release (#5090) ### What changes were proposed in this pull request? - remove upload Python packages to RELEASE_STAGING - update the docs version when preparing release ### Why are the changes needed? improve the release script ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? by hand --- dev/release/release-build.sh | 26 ++++++++++++++++++-------- dev/release/release-tag.sh | 3 +++ dev/release/update-java-doc-version.sh | 15 ++++++++++----- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/dev/release/release-build.sh b/dev/release/release-build.sh index a66b338dd0e..73271119772 100755 --- a/dev/release/release-build.sh +++ b/dev/release/release-build.sh @@ -96,6 +96,9 @@ BASE_DIR=$(pwd) init_java init_gradle +function uriencode { jq -nSRr --arg v "$1" '$v|@uri'; } +declare -r ENCODED_ASF_PASSWORD=$(uriencode "$ASF_PASSWORD") + if [[ "$1" == "finalize" ]]; then if [[ -z "$PYPI_API_TOKEN" ]]; then error 'The environment variable PYPI_API_TOKEN is not set. Exiting.' @@ -110,7 +113,7 @@ if [[ "$1" == "finalize" ]]; then echo "v$RELEASE_VERSION already exists. Skip creating it." else rm -rf gravitino - git clone "https://$ASF_USERNAME:$ASF_PASSWORD@$ASF_GRAVITINO_REPO" -b main + git clone "https://$ASF_USERNAME:$ENCODED_ASF_PASSWORD@$ASF_GRAVITINO_REPO" -b main cd gravitino git tag "v$RELEASE_VERSION" "$RELEASE_TAG" git push origin "v$RELEASE_VERSION" @@ -119,19 +122,20 @@ if [[ "$1" == "finalize" ]]; then echo "git tag v$RELEASE_VERSION created" fi - # download Gravitino Python binary from the dev directory and upload to PyPi. + # upload to PyPi. + # todo: uncomment below codes if possible, it will download Gravitino Python binary from the dev directory echo "Uploading Gravitino to PyPi" - svn co --depth=empty "$RELEASE_STAGING_LOCATION/$RELEASE_TAG" svn-gravitino - cd svn-gravitino + # svn co --depth=empty "$RELEASE_STAGING_LOCATION/$RELEASE_TAG" svn-gravitino + # cd svn-gravitino PYGRAVITINO_VERSION=`echo "$RELEASE_VERSION" | sed -e "s/-/./" -e "s/preview/dev/"` - svn update "apache_gravitino-$PYGRAVITINO_VERSION.tar.gz" - svn update "apache_gravitino-$PYGRAVITINO_VERSION.tar.gz.asc" + # svn update "apache_gravitino-$PYGRAVITINO_VERSION.tar.gz" + # svn update "apache_gravitino-$PYGRAVITINO_VERSION.tar.gz.asc" twine upload -u __token__ -p $PYPI_API_TOKEN \ --repository-url https://upload.pypi.org/legacy/ \ "apache_gravitino-$PYGRAVITINO_VERSION.tar.gz" \ "apache_gravitino-$PYGRAVITINO_VERSION.tar.gz.asc" - cd .. - rm -rf svn-gravitino + # cd .. + # rm -rf svn-gravitino echo "Python Gravitino package uploaded" # Moves the binaries from dev directory to release directory. @@ -252,6 +256,12 @@ if [[ "$1" == "package" ]]; then echo "Copying release tarballs" cp gravitino-* "svn-gravitino/${DEST_DIR_NAME}/" cp apache_gravitino-* "svn-gravitino/${DEST_DIR_NAME}/" + # remove python client tarball + # todo: remove this when python version supports include '-incubating' or the project is graduated + rm "svn-gravitino/${DEST_DIR_NAME}/apache_gravitino-$PYGRAVITINO_VERSION.tar.gz" + rm "svn-gravitino/${DEST_DIR_NAME}/apache_gravitino-$PYGRAVITINO_VERSION.tar.gz.asc" + rm "svn-gravitino/${DEST_DIR_NAME}/apache_gravitino-$PYGRAVITINO_VERSION.tar.gz.sha512" + svn add "svn-gravitino/${DEST_DIR_NAME}" cd svn-gravitino diff --git a/dev/release/release-tag.sh b/dev/release/release-tag.sh index 98fc8db85b7..112ab8353dd 100755 --- a/dev/release/release-tag.sh +++ b/dev/release/release-tag.sh @@ -83,6 +83,9 @@ PYGRAVITINO_NEXT_VERSION=$(echo $NEXT_VERSION | sed 's/-incubating-SNAPSHOT/.dev sed -i".tmp1" 's/version = .*$/version = '"$RELEASE_VERSION"'/g' gradle.properties sed -i".tmp2" 's/ version=.*$/ version="'"$PYGRAVITINO_RELEASE_VERSION"'",/g' clients/client-python/setup.py +# update docs version +"$SELF/update-java-doc-version.sh" "$RELEASE_VERSION" "$SELF/gravitino" + git commit -a -m "Preparing Gravitino release $RELEASE_TAG" echo "Creating tag $RELEASE_TAG at the head of $GIT_BRANCH" git tag $RELEASE_TAG diff --git a/dev/release/update-java-doc-version.sh b/dev/release/update-java-doc-version.sh index 13a29c37d20..4085ad8ab55 100755 --- a/dev/release/update-java-doc-version.sh +++ b/dev/release/update-java-doc-version.sh @@ -20,27 +20,32 @@ set -e -if [[ $# -ne 1 ]]; then - echo "Usage: $0 " +if [[ $# -lt 1 || $# -gt 2 ]]; then + echo "Usage: $0 [project_dir]" exit 1 fi NEW_VERSION=$1 -cd "$(cd "$(dirname "$0")" && pwd)/../../docs" +PROJECT_DIR=${2:-$(cd "$(dirname "$0")" && pwd)/../../} +cd "${PROJECT_DIR}/docs" CURRENT_VERSION=`cat index.md| grep pathname:///docs | head -n 1 | awk -F '///docs' '{print $2}' | awk -F '/' '{print $2}'` if [[ "${NEW_VERSION}" == "${CURRENT_VERSION}" ]]; then - echo "The new version is the same as the current version." - exit 1 + echo "The new version is the same as the current version: ${NEW_VERSION}" + exit 0 fi # Detect the operating system if [[ "$OSTYPE" == "darwin"* ]]; then # macOS find "$(pwd)" -name "*.md" | xargs sed -i '' "s|/docs/${CURRENT_VERSION}/api|/docs/${NEW_VERSION}/api|g" + # modify open-api/openapi.yaml + sed -i '' "s|version: ${CURRENT_VERSION}|version: ${NEW_VERSION}|g" open-api/openapi.yaml elif [[ "$OSTYPE" == "linux-gnu"* ]]; then # Linux find "$(pwd)" -name "*.md" | xargs sed -i "s|/docs/${CURRENT_VERSION}/api|/docs/${NEW_VERSION}/api|g" + # modify open-api/openapi.yaml + sed -i "s|version: ${CURRENT_VERSION}|version: ${NEW_VERSION}|g" open-api/openapi.yaml else echo "Unsupported OS" exit 1 From 3d09940005cf959d7069994295eccf5cf8ce0556 Mon Sep 17 00:00:00 2001 From: Justin Mclean Date: Mon, 28 Oct 2024 13:42:54 +1100 Subject: [PATCH 020/123] add entity and options --- .../main/java/org/apache/gravitino/cli/CommandEntities.java | 2 ++ .../main/java/org/apache/gravitino/cli/GravitinoOptions.java | 2 ++ .../java/org/apache/gravitino/cli/TestCommandEntities.java | 3 ++- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/CommandEntities.java b/clients/cli/src/main/java/org/apache/gravitino/cli/CommandEntities.java index 12d869c429c..0f826b4f8bd 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/CommandEntities.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/CommandEntities.java @@ -34,6 +34,7 @@ public class CommandEntities { public static final String USER = "user"; public static final String GROUP = "group"; public static final String TAG = "tag"; + public static final String OWNER = "owner"; private static final HashSet VALID_ENTITIES = new HashSet<>(); @@ -46,6 +47,7 @@ public class CommandEntities { VALID_ENTITIES.add(USER); VALID_ENTITIES.add(GROUP); VALID_ENTITIES.add(TAG); + VALID_ENTITIES.add(OWNER); } /** diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java index eba0e06750a..b9e1e22a9db 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java @@ -46,6 +46,7 @@ public class GravitinoOptions { public static final String BOOTSTRAP = "bootstrap"; public static final String GROUP = "group"; public static final String TAG = "tag"; + public static final String OWNER = "owner"; /** * Builds and returns the CLI options for Gravitino. @@ -81,6 +82,7 @@ public Options options() { options.addOption(createArgOption("d", DATABASE, "database name")); options.addOption(createArgOption("g", GROUP, "group name")); options.addOption(createArgOption("a", TAG, "tag name")); + options.addOption(createArgOption("o", OWNER, "owner name or group name")); return options; } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestCommandEntities.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestCommandEntities.java index cd2a55e2a3b..4ea4ee5fe84 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestCommandEntities.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestCommandEntities.java @@ -37,7 +37,8 @@ public void validEntities() { CommandEntities.isValidEntity(CommandEntities.SCHEMA), "SCHEMA should be a valid entity"); assertTrue( CommandEntities.isValidEntity(CommandEntities.TABLE), "TABLE should be a valid entity"); - assertTrue(CommandEntities.isValidEntity(CommandEntities.TAG), "TAG should be a valid entity"); + assertTrue(CommandEntities.isValidEntity(CommandEntities.TAG), "TAG should be a valid entity"); + assertTrue(CommandEntities.isValidEntity(CommandEntities.OWNER), "OWNER should be a valid entity"); } @Test From d2dbd3dc71545ea21fdf96e02831680578aed83c Mon Sep 17 00:00:00 2001 From: Qian Xia Date: Mon, 28 Oct 2024 10:44:43 +0800 Subject: [PATCH 021/123] [#4641] fix(web): improvement scroll bar on the catalog detail page ellipsis display (#5271) ### What changes were proposed in this pull request? improvement scroll bar on the catalog detail page ellipsis display ![image](https://github.com/user-attachments/assets/de7e44ea-0c37-4b34-ab52-c171259e3bf2) Fix: #4641 ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? manually --- web/web/src/components/DetailsDrawer.js | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/web/web/src/components/DetailsDrawer.js b/web/web/src/components/DetailsDrawer.js index c470af43cc5..858e2d5ab85 100644 --- a/web/web/src/components/DetailsDrawer.js +++ b/web/web/src/components/DetailsDrawer.js @@ -205,16 +205,19 @@ const DetailsDrawer = props => { {properties.map((item, index) => { return ( - + {item.key}} placement='bottom' > - {item.key.length > 22 ? `${item.key.substring(0, 22)}...` : item.key} + {item.key} @@ -224,7 +227,7 @@ const DetailsDrawer = props => { title={{item.value}} placement='bottom' > - {item.value.length > 22 ? `${item.value.substring(0, 22)}...` : item.value} + {item.value} )} From c5ecc852959daaf575b45d27194795a6477086ef Mon Sep 17 00:00:00 2001 From: Ricco Chen Date: Mon, 28 Oct 2024 11:42:16 +0800 Subject: [PATCH 022/123] [#4968] improvement(api): Unify the modification behavior of the comment field (#5121) ### What changes were proposed in this pull request? - deprecated the removeComment change - the new comment of updateComment supports null and empty string ### Why are the changes needed? Fix: #4968 ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? Pass existing tests Co-authored-by: chenzeping.ricco --- .../org/apache/gravitino/file/FilesetChange.java | 7 ++++++- .../hadoop/TestHadoopCatalogOperations.java | 2 +- .../hadoop/integration/test/HadoopCatalogIT.java | 2 +- .../kafka/integration/test/CatalogKafkaIT.java | 4 ++++ .../gravitino/client/TestFilesetCatalog.java | 2 +- .../client/integration/test/CatalogIT.java | 6 +++++- .../client/integration/test/MetalakeIT.java | 8 +++++++- .../gravitino/client/integration/test/TagIT.java | 8 ++++++++ .../client-python/gravitino/api/fileset_change.py | 11 +++++++++-- .../gravitino/catalog/fileset_catalog.py | 2 +- .../dto/requests/catalog_update_request.py | 4 ++-- .../dto/requests/fileset_update_request.py | 15 +++++++-------- .../dto/requests/metalake_update_request.py | 9 ++------- .../tests/integration/test_catalog.py | 8 ++++++++ .../tests/integration/test_fileset_catalog.py | 2 +- .../tests/integration/test_metalake.py | 8 ++++++++ .../dto/requests/CatalogUpdateRequest.java | 12 ++---------- .../dto/requests/FilesetUpdateRequest.java | 14 +++----------- .../dto/requests/MetalakeUpdateRequest.java | 12 ++---------- .../dto/requests/TableUpdateRequest.java | 12 ++---------- .../gravitino/dto/requests/TagUpdateRequest.java | 6 ++---- .../dto/requests/TopicUpdateRequest.java | 12 ++---------- .../catalog/TestFilesetOperationDispatcher.java | 2 +- docs/manage-fileset-metadata-using-gravitino.md | 14 +++++++------- .../server/web/rest/TestFilesetOperations.java | 4 ++-- 25 files changed, 94 insertions(+), 92 deletions(-) diff --git a/api/src/main/java/org/apache/gravitino/file/FilesetChange.java b/api/src/main/java/org/apache/gravitino/file/FilesetChange.java index 2df992ece5b..6b79aed41ac 100644 --- a/api/src/main/java/org/apache/gravitino/file/FilesetChange.java +++ b/api/src/main/java/org/apache/gravitino/file/FilesetChange.java @@ -73,7 +73,9 @@ static FilesetChange removeProperty(String property) { * Creates a new fileset change to remove comment from the fileset. * * @return The fileset change. + * @deprecated Use {@link #updateComment(String)} with null value as the argument instead. */ + @Deprecated static FilesetChange removeComment() { return RemoveComment.getInstance(); } @@ -310,7 +312,10 @@ public String toString() { } } - /** A fileset change to remove comment from the fileset. */ + /** + * A fileset change to remove comment from the fileset. Use {@link UpdateFilesetComment} with null + * value as the argument instead. + */ final class RemoveComment implements FilesetChange { private static final RemoveComment INSTANCE = new RemoveComment(); diff --git a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/TestHadoopCatalogOperations.java b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/TestHadoopCatalogOperations.java index 2b89180a8d1..9b5b61f27b0 100644 --- a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/TestHadoopCatalogOperations.java +++ b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/TestHadoopCatalogOperations.java @@ -770,7 +770,7 @@ public void testRemoveFilesetComment() throws IOException { String name = "fileset27"; Fileset fileset = createFileset(name, schemaName, comment, Fileset.Type.MANAGED, null, null); - FilesetChange change1 = FilesetChange.removeComment(); + FilesetChange change1 = FilesetChange.updateComment(null); try (SecureHadoopCatalogOperations ops = new SecureHadoopCatalogOperations(store)) { ops.initialize(Maps.newHashMap(), randomCatalogInfo(), HADOOP_PROPERTIES_METADATA); NameIdentifier filesetIdent = NameIdentifier.of("m1", "c1", schemaName, name); diff --git a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopCatalogIT.java b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopCatalogIT.java index b272bd7a889..6cd10cbf24e 100644 --- a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopCatalogIT.java +++ b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopCatalogIT.java @@ -575,7 +575,7 @@ public void testFilesetRemoveComment() throws IOException { catalog .asFilesetCatalog() .alterFileset( - NameIdentifier.of(schemaName, filesetName), FilesetChange.removeComment()); + NameIdentifier.of(schemaName, filesetName), FilesetChange.updateComment(null)); assertFilesetExists(filesetName); // verify fileset is updated diff --git a/catalogs/catalog-kafka/src/test/java/org/apache/gravitino/catalog/kafka/integration/test/CatalogKafkaIT.java b/catalogs/catalog-kafka/src/test/java/org/apache/gravitino/catalog/kafka/integration/test/CatalogKafkaIT.java index dc91a3dda57..52ebb8dab0d 100644 --- a/catalogs/catalog-kafka/src/test/java/org/apache/gravitino/catalog/kafka/integration/test/CatalogKafkaIT.java +++ b/catalogs/catalog-kafka/src/test/java/org/apache/gravitino/catalog/kafka/integration/test/CatalogKafkaIT.java @@ -172,6 +172,10 @@ public void testCatalog() throws ExecutionException, InterruptedException { Assertions.assertEquals("new comment", alteredCatalog.comment()); Assertions.assertFalse(alteredCatalog.properties().containsKey("key1")); + Catalog updateNullComment = + metalake.alterCatalog(catalogName, CatalogChange.updateComment(null)); + Assertions.assertNull(updateNullComment.comment()); + // test drop catalog boolean dropped = metalake.dropCatalog(catalogName, true); Assertions.assertTrue(dropped); diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/TestFilesetCatalog.java b/clients/client-java/src/test/java/org/apache/gravitino/client/TestFilesetCatalog.java index 446af40ed9b..f45adfab5f6 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/TestFilesetCatalog.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/TestFilesetCatalog.java @@ -378,7 +378,7 @@ public void testAlterFileset() throws JsonProcessingException { assertFileset(mockFileset3, res3); // Test remove fileset comment - FilesetUpdateRequest req4 = new FilesetUpdateRequest.RemoveFilesetCommentRequest(); + FilesetUpdateRequest req4 = new FilesetUpdateRequest.UpdateFilesetCommentRequest(null); FilesetDTO mockFileset4 = mockFilesetDTO("new name", Fileset.Type.MANAGED, null, "mock location", ImmutableMap.of()); FilesetResponse resp4 = new FilesetResponse(mockFileset4); diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java index 045c0ad694f..bb6394b6c72 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/CatalogIT.java @@ -221,7 +221,7 @@ public void testCatalogAvailable() { CatalogNotInUseException.class, () -> filesetOps.alterFileset( - NameIdentifier.of("dummy", "dummy"), FilesetChange.removeComment())); + NameIdentifier.of("dummy", "dummy"), FilesetChange.updateComment(null))); Assertions.assertTrue(metalake.dropCatalog(catalogName), "catalog should be dropped"); Assertions.assertFalse(metalake.dropCatalog(catalogName), "catalog should be non-existent"); @@ -399,6 +399,10 @@ void testUpdateCatalogWithNullableComment() { metalake.alterCatalog(catalogName, CatalogChange.updateComment("new catalog comment")); Assertions.assertEquals("new catalog comment", updatedCatalog.comment()); + Catalog updateNullComment = + metalake.alterCatalog(catalogName, CatalogChange.updateComment(null)); + Assertions.assertNull(updateNullComment.comment()); + metalake.disableCatalog(catalogName); metalake.dropCatalog(catalogName); } diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java index 3a650646416..7911f02cdf9 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/MetalakeIT.java @@ -22,6 +22,7 @@ import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -219,6 +220,11 @@ public void testUpdateMetalakeWithNullableComment() { new MetalakeChange[] {MetalakeChange.updateComment("new metalake comment")}; GravitinoMetalake updatedMetalake = client.alterMetalake(metalakeNameA, changes); assertEquals("new metalake comment", updatedMetalake.comment()); + + GravitinoMetalake updateNullComment = + client.alterMetalake(metalakeNameA, MetalakeChange.updateComment(null)); + assertNull(updateNullComment.comment()); + assertTrue(client.dropMetalake(metalakeNameA, true)); } @@ -317,7 +323,7 @@ public void testMetalakeAvailable() { MetalakeNotInUseException.class, () -> filesetOps.alterFileset( - NameIdentifier.of("dummy", "dummy"), FilesetChange.removeComment())); + NameIdentifier.of("dummy", "dummy"), FilesetChange.updateComment(null))); Assertions.assertThrows( NonEmptyMetalakeException.class, () -> client.dropMetalake(metalakeName)); diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java index bd95f2ae34e..4278d3f8861 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/TagIT.java @@ -197,6 +197,14 @@ public void testCreateGetAndListTag() { Assertions.assertEquals(tag2, tag3); } + @Test + public void testNullableComment() { + String tagName = GravitinoITUtils.genRandomName("tag_it_tag"); + metalake.createTag(tagName, "comment", Collections.emptyMap()); + Tag alteredTag6 = metalake.alterTag(tagName, TagChange.updateComment(null)); + Assertions.assertNull(alteredTag6.comment()); + } + @Test public void testCreateAndAlterTag() { String tagName = GravitinoITUtils.genRandomName("tag_it_tag"); diff --git a/clients/client-python/gravitino/api/fileset_change.py b/clients/client-python/gravitino/api/fileset_change.py index 2be9bc56804..b723a8a29cc 100644 --- a/clients/client-python/gravitino/api/fileset_change.py +++ b/clients/client-python/gravitino/api/fileset_change.py @@ -81,8 +81,11 @@ def remove_comment(): Returns: The fileset change. + + Deprecated: + Please use `update_comment(str)` with null value as the argument instead. """ - return FilesetChange.RemoveComment() + return FilesetChange.UpdateFilesetComment(None) @dataclass class RenameFileset: @@ -279,7 +282,11 @@ def __str__(self): @dataclass class RemoveComment: - """A fileset change to remove comment from the fileset.""" + """A fileset change to remove comment from the fileset. + + Deprecated: + Please use `UpdateFilesetComment(str)` with null value as the argument instead. + """ def __eq__(self, other) -> bool: """Compares this RemoveComment instance with another object for equality. diff --git a/clients/client-python/gravitino/catalog/fileset_catalog.py b/clients/client-python/gravitino/catalog/fileset_catalog.py index cf91dbdfb5c..ffa252e621e 100644 --- a/clients/client-python/gravitino/catalog/fileset_catalog.py +++ b/clients/client-python/gravitino/catalog/fileset_catalog.py @@ -319,5 +319,5 @@ def to_fileset_update_request(change: FilesetChange): if isinstance(change, FilesetChange.RemoveProperty): return FilesetUpdateRequest.RemoveFilesetPropertyRequest(change.property()) if isinstance(change, FilesetChange.RemoveComment): - return FilesetUpdateRequest.RemoveFilesetCommentRequest() + return FilesetUpdateRequest.UpdateFilesetCommentRequest(None) raise ValueError(f"Unknown change type: {type(change).__name__}") diff --git a/clients/client-python/gravitino/dto/requests/catalog_update_request.py b/clients/client-python/gravitino/dto/requests/catalog_update_request.py index 1164db2990b..1ea4d9953c0 100644 --- a/clients/client-python/gravitino/dto/requests/catalog_update_request.py +++ b/clients/client-python/gravitino/dto/requests/catalog_update_request.py @@ -78,8 +78,8 @@ def catalog_change(self): return CatalogChange.update_comment(self._new_comment) def validate(self): - if not self._new_comment: - raise ValueError('"newComment" field is required and cannot be empty') + """Validates the fields of the request. Always pass.""" + pass @dataclass class SetCatalogPropertyRequest(CatalogUpdateRequestBase): diff --git a/clients/client-python/gravitino/dto/requests/fileset_update_request.py b/clients/client-python/gravitino/dto/requests/fileset_update_request.py index 66e001bee9c..9a640d2071d 100644 --- a/clients/client-python/gravitino/dto/requests/fileset_update_request.py +++ b/clients/client-python/gravitino/dto/requests/fileset_update_request.py @@ -79,13 +79,8 @@ def __init__(self, new_comment: str): self._new_comment = new_comment def validate(self): - """Validates the fields of the request. - - Raises: - IllegalArgumentException if the new comment is not set. - """ - if not self._new_comment: - raise ValueError('"new_comment" field is required and cannot be empty') + """Validates the fields of the request. Always pass.""" + pass def fileset_change(self): """Returns the fileset change""" @@ -149,7 +144,11 @@ def fileset_change(self): @dataclass class RemoveFilesetCommentRequest(FilesetUpdateRequestBase): - """Represents a request to remove comment from a Fileset.""" + """Represents a request to remove comment from a Fileset. + + Deprecated: + Please use `UpdateFilesetCommentRequest` with null value as the argument instead. + """ def __init__(self): super().__init__("removeComment") diff --git a/clients/client-python/gravitino/dto/requests/metalake_update_request.py b/clients/client-python/gravitino/dto/requests/metalake_update_request.py index 8e54fe360e1..79551e5dc0d 100644 --- a/clients/client-python/gravitino/dto/requests/metalake_update_request.py +++ b/clients/client-python/gravitino/dto/requests/metalake_update_request.py @@ -74,13 +74,8 @@ def __init__(self, new_comment: str): self._new_comment = new_comment def validate(self): - """Validates the fields of the request. - - Raises: - IllegalArgumentException if the new comment is not set. - """ - if not self._new_comment: - raise ValueError('"newComment" field is required and cannot be empty') + """Validates the fields of the request. Always pass.""" + pass def metalake_change(self): return MetalakeChange.update_comment(self._new_comment) diff --git a/clients/client-python/tests/integration/test_catalog.py b/clients/client-python/tests/integration/test_catalog.py index 64208315e6e..bd7933e00ae 100644 --- a/clients/client-python/tests/integration/test_catalog.py +++ b/clients/client-python/tests/integration/test_catalog.py @@ -125,6 +125,14 @@ def test_failed_create_catalog(self): with self.assertRaises(CatalogAlreadyExistsException): _ = self.create_catalog(self.catalog_name) + def test_nullable_comment_catalog(self): + self.create_catalog(self.catalog_name) + changes = (CatalogChange.update_comment(None),) + null_comment_catalog = self.gravitino_client.alter_catalog( + self.catalog_name, *changes + ) + self.assertIsNone(null_comment_catalog.comment()) + def test_alter_catalog(self): catalog = self.create_catalog(self.catalog_name) diff --git a/clients/client-python/tests/integration/test_fileset_catalog.py b/clients/client-python/tests/integration/test_fileset_catalog.py index 754735b16e8..ac3d0a82164 100644 --- a/clients/client-python/tests/integration/test_fileset_catalog.py +++ b/clients/client-python/tests/integration/test_fileset_catalog.py @@ -239,7 +239,7 @@ def test_alter_fileset(self): self.assertEqual(fileset_new.comment(), fileset_new_comment) fileset_comment_removed = catalog.as_fileset_catalog().alter_fileset( - self.fileset_ident, FilesetChange.remove_comment() + self.fileset_ident, FilesetChange.update_comment(None) ) self.assertEqual(fileset_comment_removed.name(), self.fileset_name) self.assertIsNone(fileset_comment_removed.comment()) diff --git a/clients/client-python/tests/integration/test_metalake.py b/clients/client-python/tests/integration/test_metalake.py index 75d3a06f26c..f2b14b67877 100644 --- a/clients/client-python/tests/integration/test_metalake.py +++ b/clients/client-python/tests/integration/test_metalake.py @@ -90,6 +90,14 @@ def test_failed_create_metalake(self): with self.assertRaises(MetalakeAlreadyExistsException): _ = self.create_metalake(self.metalake_name) + def test_nullable_comment_metalake(self): + self.create_metalake(self.metalake_name) + changes = (MetalakeChange.update_comment(None),) + null_comment_metalake = self.gravitino_admin_client.alter_metalake( + self.metalake_name, *changes + ) + self.assertIsNone(null_comment_metalake.comment()) + def test_alter_metalake(self): self.create_metalake(self.metalake_name) diff --git a/common/src/main/java/org/apache/gravitino/dto/requests/CatalogUpdateRequest.java b/common/src/main/java/org/apache/gravitino/dto/requests/CatalogUpdateRequest.java index 9dfbb4efd2f..aabdf2901fa 100644 --- a/common/src/main/java/org/apache/gravitino/dto/requests/CatalogUpdateRequest.java +++ b/common/src/main/java/org/apache/gravitino/dto/requests/CatalogUpdateRequest.java @@ -117,17 +117,9 @@ public UpdateCatalogCommentRequest() { this(null); } - /** - * Validates the fields of the request. - * - * @throws IllegalArgumentException if the new comment is not set. - */ + /** Validates the fields of the request. Always pass. */ @Override - public void validate() throws IllegalArgumentException { - Preconditions.checkArgument( - StringUtils.isNotBlank(newComment), - "\"newComment\" field is required and cannot be empty"); - } + public void validate() throws IllegalArgumentException {} @Override public CatalogChange catalogChange() { diff --git a/common/src/main/java/org/apache/gravitino/dto/requests/FilesetUpdateRequest.java b/common/src/main/java/org/apache/gravitino/dto/requests/FilesetUpdateRequest.java index c7aa79cb800..82915dcdc19 100644 --- a/common/src/main/java/org/apache/gravitino/dto/requests/FilesetUpdateRequest.java +++ b/common/src/main/java/org/apache/gravitino/dto/requests/FilesetUpdateRequest.java @@ -109,17 +109,9 @@ public FilesetChange filesetChange() { return FilesetChange.updateComment(newComment); } - /** - * Validates the request. - * - * @throws IllegalArgumentException if the request is invalid. - */ + /** Validates the fields of the request. Always pass. */ @Override - public void validate() throws IllegalArgumentException { - Preconditions.checkArgument( - StringUtils.isNotBlank(newComment), - "\"newComment\" field is required and cannot be empty"); - } + public void validate() throws IllegalArgumentException {} } /** The fileset update request for setting the properties of a fileset. */ @@ -194,7 +186,7 @@ class RemoveFilesetCommentRequest implements FilesetUpdateRequest { /** @return The fileset change. */ @Override public FilesetChange filesetChange() { - return FilesetChange.removeComment(); + return FilesetChange.updateComment(null); } /** diff --git a/common/src/main/java/org/apache/gravitino/dto/requests/MetalakeUpdateRequest.java b/common/src/main/java/org/apache/gravitino/dto/requests/MetalakeUpdateRequest.java index a5b747aff77..6e01ace8052 100644 --- a/common/src/main/java/org/apache/gravitino/dto/requests/MetalakeUpdateRequest.java +++ b/common/src/main/java/org/apache/gravitino/dto/requests/MetalakeUpdateRequest.java @@ -117,17 +117,9 @@ public UpdateMetalakeCommentRequest() { this(null); } - /** - * Validates the fields of the request. - * - * @throws IllegalArgumentException if the new comment is not set. - */ + /** Validates the fields of the request. Always pass. */ @Override - public void validate() throws IllegalArgumentException { - Preconditions.checkArgument( - StringUtils.isNotBlank(newComment), - "\"newComment\" field is required and cannot be empty"); - } + public void validate() throws IllegalArgumentException {} @Override public MetalakeChange metalakeChange() { diff --git a/common/src/main/java/org/apache/gravitino/dto/requests/TableUpdateRequest.java b/common/src/main/java/org/apache/gravitino/dto/requests/TableUpdateRequest.java index 936597f4dad..db1702ce6de 100644 --- a/common/src/main/java/org/apache/gravitino/dto/requests/TableUpdateRequest.java +++ b/common/src/main/java/org/apache/gravitino/dto/requests/TableUpdateRequest.java @@ -164,17 +164,9 @@ public UpdateTableCommentRequest() { this(null); } - /** - * Validates the request. - * - * @throws IllegalArgumentException If the request is invalid, this exception is thrown. - */ + /** Validates the fields of the request. Always pass. */ @Override - public void validate() throws IllegalArgumentException { - Preconditions.checkArgument( - StringUtils.isNotBlank(newComment), - "\"newComment\" field is required and cannot be empty"); - } + public void validate() throws IllegalArgumentException {} /** * Returns the table change. diff --git a/common/src/main/java/org/apache/gravitino/dto/requests/TagUpdateRequest.java b/common/src/main/java/org/apache/gravitino/dto/requests/TagUpdateRequest.java index 5323ac56548..f27d8359e71 100644 --- a/common/src/main/java/org/apache/gravitino/dto/requests/TagUpdateRequest.java +++ b/common/src/main/java/org/apache/gravitino/dto/requests/TagUpdateRequest.java @@ -114,11 +114,9 @@ public TagChange tagChange() { return TagChange.updateComment(newComment); } + /** Validates the fields of the request. Always pass. */ @Override - public void validate() throws IllegalArgumentException { - Preconditions.checkArgument( - StringUtils.isNotBlank(newComment), "\"newComment\" must not be blank"); - } + public void validate() throws IllegalArgumentException {} } /** The tag update request for setting a tag property. */ diff --git a/common/src/main/java/org/apache/gravitino/dto/requests/TopicUpdateRequest.java b/common/src/main/java/org/apache/gravitino/dto/requests/TopicUpdateRequest.java index 67103b2e5df..add5ae7e99b 100644 --- a/common/src/main/java/org/apache/gravitino/dto/requests/TopicUpdateRequest.java +++ b/common/src/main/java/org/apache/gravitino/dto/requests/TopicUpdateRequest.java @@ -72,17 +72,9 @@ public UpdateTopicCommentRequest() { this(null); } - /** - * Validates the request. - * - * @throws IllegalArgumentException If the request is invalid, this exception is thrown. - */ + /** Validates the fields of the request. Always pass. */ @Override - public void validate() throws IllegalArgumentException { - Preconditions.checkArgument( - StringUtils.isNotBlank(newComment), - "\"newComment\" field is required and cannot be empty"); - } + public void validate() throws IllegalArgumentException {} /** * Returns the topic change. diff --git a/core/src/test/java/org/apache/gravitino/catalog/TestFilesetOperationDispatcher.java b/core/src/test/java/org/apache/gravitino/catalog/TestFilesetOperationDispatcher.java index 7ceed9e2e17..4fa3cecbb3d 100644 --- a/core/src/test/java/org/apache/gravitino/catalog/TestFilesetOperationDispatcher.java +++ b/core/src/test/java/org/apache/gravitino/catalog/TestFilesetOperationDispatcher.java @@ -143,7 +143,7 @@ public void testCreateAndAlterFileset() { Assertions.assertEquals(fileset1.name(), alteredFileset2.name()); Assertions.assertEquals("new comment", alteredFileset2.comment()); - FilesetChange[] changes3 = new FilesetChange[] {FilesetChange.removeComment()}; + FilesetChange[] changes3 = new FilesetChange[] {FilesetChange.updateComment(null)}; Fileset alteredFileset3 = filesetOperationDispatcher.alterFileset(filesetIdent1, changes3); Assertions.assertEquals(fileset1.name(), alteredFileset3.name()); diff --git a/docs/manage-fileset-metadata-using-gravitino.md b/docs/manage-fileset-metadata-using-gravitino.md index de478efb731..fe1d3304070 100644 --- a/docs/manage-fileset-metadata-using-gravitino.md +++ b/docs/manage-fileset-metadata-using-gravitino.md @@ -389,13 +389,13 @@ fileset_new = catalog.as_fileset_catalog().alter_fileset(NameIdentifier.of("sche Currently, Gravitino supports the following changes to a fileset: -| Supported modification | JSON | Java | -|----------------------------|--------------------------------------------------------------|-----------------------------------------------| -| Rename a fileset | `{"@type":"rename","newName":"fileset_renamed"}` | `FilesetChange.rename("fileset_renamed")` | -| Update a comment | `{"@type":"updateComment","newComment":"new_comment"}` | `FilesetChange.updateComment("new_comment")` | -| Set a fileset property | `{"@type":"setProperty","property":"key1","value":"value1"}` | `FilesetChange.setProperty("key1", "value1")` | -| Remove a fileset property | `{"@type":"removeProperty","property":"key1"}` | `FilesetChange.removeProperty("key1")` | -| Remove comment | `{"@type":"removeComment"}` | `FilesetChange.removeComment()` | +| Supported modification | JSON | Java | +|-----------------------------|--------------------------------------------------------------|-----------------------------------------------| +| Rename a fileset | `{"@type":"rename","newName":"fileset_renamed"}` | `FilesetChange.rename("fileset_renamed")` | +| Update a comment | `{"@type":"updateComment","newComment":"new_comment"}` | `FilesetChange.updateComment("new_comment")` | +| Set a fileset property | `{"@type":"setProperty","property":"key1","value":"value1"}` | `FilesetChange.setProperty("key1", "value1")` | +| Remove a fileset property | `{"@type":"removeProperty","property":"key1"}` | `FilesetChange.removeProperty("key1")` | +| Remove comment (deprecated) | `{"@type":"removeComment"}` | `FilesetChange.removeComment()` | ### Drop a fileset diff --git a/server/src/test/java/org/apache/gravitino/server/web/rest/TestFilesetOperations.java b/server/src/test/java/org/apache/gravitino/server/web/rest/TestFilesetOperations.java index 62375dc4b3e..4258346e49e 100644 --- a/server/src/test/java/org/apache/gravitino/server/web/rest/TestFilesetOperations.java +++ b/server/src/test/java/org/apache/gravitino/server/web/rest/TestFilesetOperations.java @@ -370,7 +370,7 @@ public void testUpdateFilesetComment() { @Test public void testRemoveFilesetComment() { - FilesetUpdateRequest req = new FilesetUpdateRequest.RemoveFilesetCommentRequest(); + FilesetUpdateRequest req = new FilesetUpdateRequest.UpdateFilesetCommentRequest(null); Fileset fileset = mockFileset("fileset1", Fileset.Type.MANAGED, null, "mock location", ImmutableMap.of()); assertUpdateFileset(new FilesetUpdatesRequest(ImmutableList.of(req)), fileset); @@ -387,7 +387,7 @@ public void testMultiUpdateRequest() { // remove k2 FilesetUpdateRequest req5 = new FilesetUpdateRequest.RemoveFilesetPropertiesRequest("k2"); // remove comment - FilesetUpdateRequest req6 = new FilesetUpdateRequest.RemoveFilesetCommentRequest(); + FilesetUpdateRequest req6 = new FilesetUpdateRequest.UpdateFilesetCommentRequest(null); Fileset fileset = mockFileset( From 5c2e6c180a3817eaff0b9d9fd52d59d808ef3ca9 Mon Sep 17 00:00:00 2001 From: Justin Mclean Date: Mon, 28 Oct 2024 17:01:12 +1100 Subject: [PATCH 023/123] set and display owners --- .../apache/gravitino/cli/ErrorMessages.java | 1 + .../gravitino/cli/GravitinoCommandLine.java | 32 ++++++++++++++++++- .../gravitino/cli/GravitinoOptions.java | 2 +- .../java/org/apache/gravitino/cli/Main.java | 2 +- .../gravitino/cli/TestCommandEntities.java | 5 +-- 5 files changed, 37 insertions(+), 5 deletions(-) diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/ErrorMessages.java b/clients/cli/src/main/java/org/apache/gravitino/cli/ErrorMessages.java index 6a6d4e5bbe8..7cedd9436ca 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/ErrorMessages.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/ErrorMessages.java @@ -39,4 +39,5 @@ public class ErrorMessages { public static final String GROUP_EXISTS = "Group already exists."; public static final String UNKNOWN_TAG = "Unknown tag."; public static final String TAG_EXISTS = "Tag already exists."; + public static final String UNKNOWN_ROLE = "Unknown role."; } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java index 2beec34b52e..fe5816e477a 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java @@ -57,6 +57,7 @@ import org.apache.gravitino.cli.commands.ListTagProperties; import org.apache.gravitino.cli.commands.ListUsers; import org.apache.gravitino.cli.commands.MetalakeDetails; +import org.apache.gravitino.cli.commands.OwnerDetails; import org.apache.gravitino.cli.commands.RemoveCatalogProperty; import org.apache.gravitino.cli.commands.RemoveMetalakeProperty; import org.apache.gravitino.cli.commands.RemoveSchemaProperty; @@ -65,6 +66,7 @@ import org.apache.gravitino.cli.commands.ServerVersion; import org.apache.gravitino.cli.commands.SetCatalogProperty; import org.apache.gravitino.cli.commands.SetMetalakeProperty; +import org.apache.gravitino.cli.commands.SetOwner; import org.apache.gravitino.cli.commands.SetSchemaProperty; import org.apache.gravitino.cli.commands.SetTagProperty; import org.apache.gravitino.cli.commands.TableDetails; @@ -165,7 +167,9 @@ public static void displayHelp(Options options) { /** Executes the appropriate command based on the command type. */ private void executeCommand() { - if (entity.equals(CommandEntities.COLUMN)) { + if (line.hasOption(GravitinoOptions.OWNER)) { + handleOwnerCommand(); + } else if (entity.equals(CommandEntities.COLUMN)) { handleColumnCommand(); } else if (entity.equals(CommandEntities.TABLE)) { handleTableCommand(); @@ -175,6 +179,10 @@ private void executeCommand() { handleCatalogCommand(); } else if (entity.equals(CommandEntities.METALAKE)) { handleMetalakeCommand(); + } else if (entity.equals(CommandEntities.USER)) { + handleUserCommand(); + } else if (entity.equals(CommandEntities.GROUP)) { + handleGroupCommand(); } } @@ -454,6 +462,28 @@ private void handleColumnCommand() { } } + /** + * Handles the command execution for Objects based on command type and the command line options. + */ + private void handleOwnerCommand() { + String url = getUrl(); + FullName name = new FullName(line); + String metalake = name.getMetalakeName(); + String entityName = line.getOptionValue(GravitinoOptions.NAME); + + if (CommandActions.DETAILS.equals(command)) { + new OwnerDetails(url, ignore, metalake, entityName, entity).handle(); + } else if (CommandActions.UPDATE.equals(command)) { + String owner = line.getOptionValue(GravitinoOptions.USER); + String group = line.getOptionValue(GravitinoOptions.GROUP); + if (owner != null) { + new SetOwner(url, ignore, metalake, entityName, entity, owner, false).handle(); + } else if (group != null) { + new SetOwner(url, ignore, metalake, entityName, entity, owner, true).handle(); + } + } + } + /** * Retrieves the Gravitinno URL from the command line options or the GRAVITINO_URL environment * variable or the Gravitio config file. diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java index b9e1e22a9db..9650e29649f 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java @@ -82,7 +82,7 @@ public Options options() { options.addOption(createArgOption("d", DATABASE, "database name")); options.addOption(createArgOption("g", GROUP, "group name")); options.addOption(createArgOption("a", TAG, "tag name")); - options.addOption(createArgOption("o", OWNER, "owner name or group name")); + options.addOption(createSimpleOption("o", OWNER, "display entity owner")); return options; } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/Main.java b/clients/cli/src/main/java/org/apache/gravitino/cli/Main.java index 0d0eaddf752..e81362b20d6 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/Main.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/Main.java @@ -25,7 +25,7 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; -/* Entry point for teh Gravitino command line. */ +/* Entry point for the Gravitino command line. */ public class Main { public static void main(String[] args) { diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestCommandEntities.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestCommandEntities.java index 4ea4ee5fe84..d980063ff5d 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestCommandEntities.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestCommandEntities.java @@ -37,8 +37,9 @@ public void validEntities() { CommandEntities.isValidEntity(CommandEntities.SCHEMA), "SCHEMA should be a valid entity"); assertTrue( CommandEntities.isValidEntity(CommandEntities.TABLE), "TABLE should be a valid entity"); - assertTrue(CommandEntities.isValidEntity(CommandEntities.TAG), "TAG should be a valid entity"); - assertTrue(CommandEntities.isValidEntity(CommandEntities.OWNER), "OWNER should be a valid entity"); + assertTrue(CommandEntities.isValidEntity(CommandEntities.TAG), "TAG should be a valid entity"); + assertTrue( + CommandEntities.isValidEntity(CommandEntities.OWNER), "OWNER should be a valid entity"); } @Test From 2130aa51fdb22d2df7897e085768cb3ed05bf08a Mon Sep 17 00:00:00 2001 From: Justin Mclean Date: Mon, 28 Oct 2024 17:15:38 +1100 Subject: [PATCH 024/123] fix group owner command --- .../java/org/apache/gravitino/cli/GravitinoCommandLine.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java index fe5816e477a..41e94f65e40 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java @@ -479,7 +479,7 @@ private void handleOwnerCommand() { if (owner != null) { new SetOwner(url, ignore, metalake, entityName, entity, owner, false).handle(); } else if (group != null) { - new SetOwner(url, ignore, metalake, entityName, entity, owner, true).handle(); + new SetOwner(url, ignore, metalake, entityName, entity, group, true).handle(); } } } From c43a47498d4176642b67d1f98dfff7b8adcb6808 Mon Sep 17 00:00:00 2001 From: Justin Mclean Date: Mon, 28 Oct 2024 17:43:15 +1100 Subject: [PATCH 025/123] [Minor] fix spelling in error messages (#5283) ### What changes were proposed in this pull request? fix spelling in error messages ### Why are the changes needed? fix spelling in error messages Fix: # N/A ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? compiled locally --- .../apache/gravitino/authorization/AuthorizationUtils.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java index 1d5a2acf034..147d66eef4e 100644 --- a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java +++ b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java @@ -45,9 +45,9 @@ /* The utilization class of authorization module*/ public class AuthorizationUtils { - static final String USER_DOES_NOT_EXIST_MSG = "User %s does not exist in th metalake %s"; - static final String GROUP_DOES_NOT_EXIST_MSG = "Group %s does not exist in th metalake %s"; - static final String ROLE_DOES_NOT_EXIST_MSG = "Role %s does not exist in th metalake %s"; + static final String USER_DOES_NOT_EXIST_MSG = "User %s does not exist in the metalake %s"; + static final String GROUP_DOES_NOT_EXIST_MSG = "Group %s does not exist in the metalake %s"; + static final String ROLE_DOES_NOT_EXIST_MSG = "Role %s does not exist in the metalake %s"; private static final Set FILESET_PRIVILEGES = Sets.immutableEnumSet( From e9e002612c5de1d0072bf35e404363449170f0ae Mon Sep 17 00:00:00 2001 From: Justin Mclean Date: Mon, 28 Oct 2024 17:44:06 +1100 Subject: [PATCH 026/123] [Minor] fix spelling th -> the (#5284) ### What changes were proposed in this pull request? fix spelling th -> the ### Why are the changes needed? to fix spelling Fix: # N/A ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? N/A - just changes to comments --- .../main/java/org/apache/gravitino/MetadataObject.java | 6 +++--- .../authorization/ranger/RangerMetadataObject.java | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/api/src/main/java/org/apache/gravitino/MetadataObject.java b/api/src/main/java/org/apache/gravitino/MetadataObject.java index 534226a56c8..7e94db194fb 100644 --- a/api/src/main/java/org/apache/gravitino/MetadataObject.java +++ b/api/src/main/java/org/apache/gravitino/MetadataObject.java @@ -72,15 +72,15 @@ enum Type { String parent(); /** - * The name of th object. + * The name of the object. * * @return The name of the object. */ String name(); /** - * The full name of th object. Full name will be separated by "." to represent a string identifier - * of the object, like catalog, catalog.table, etc. + * The full name of the object. Full name will be separated by "." to represent a string + * identifier of the object, like catalog, catalog.table, etc. * * @return The name of the object. */ diff --git a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerMetadataObject.java b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerMetadataObject.java index 08df90d0fbd..e6611a17944 100644 --- a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerMetadataObject.java +++ b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerMetadataObject.java @@ -74,22 +74,22 @@ public static Type fromMetadataType(MetadataObject.Type metadataType) { String parent(); /** - * The name of th object. + * The name of the object. * * @return The name of the object. */ String name(); /** - * The all name list of th object. + * The all name list of the object. * * @return The name list of the object. */ List names(); /** - * The full name of th object. Full name will be separated by "." to represent a string identifier - * of the object, like catalog, catalog.table, etc. + * The full name of the object. Full name will be separated by "." to represent a string + * identifier of the object, like catalog, catalog.table, etc. * * @return The name of the object. */ From a7445d699a5181e197002b4d52049c3e99c2b232 Mon Sep 17 00:00:00 2001 From: danhuawang <154112360+danhuawang@users.noreply.github.com> Date: Mon, 28 Oct 2024 14:47:05 +0800 Subject: [PATCH 027/123] [Minor] Update playground doc (#5290) ### What changes were proposed in this pull request? Update playground doc according to latest changes ### Why are the changes needed? Align doc ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? N/A --- docs/how-to-use-the-playground.md | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/docs/how-to-use-the-playground.md b/docs/how-to-use-the-playground.md index cd6b0ed7099..56792b5984d 100644 --- a/docs/how-to-use-the-playground.md +++ b/docs/how-to-use-the-playground.md @@ -13,7 +13,11 @@ Depending on your network and computer, startup time may take 3-5 minutes. Once ## Prerequisites -Install Git and Docker Compose. +Install Git, Docker, Docker Compose. + +## System Resource Requirements + +2 CPU cores, 8 GB RAM, 25 GB disk storage, MacOS or Linux OS (Verified Ubuntu22.04 Ubuntu24.04 AmazonLinux). ## TCP ports used @@ -28,26 +32,25 @@ The playground runs several services. The TCP ports used may clash with existing | playground-trino | 18080 | | playground-jupyter | 18888 | -## Start playground +## Playground usage -### Launch all components of the playground +### Launch playground ```shell git clone git@github.com:apache/gravitino-playground.git cd gravitino-playground -./launch-playground.sh +./playground.sh start ``` - -### Launch special component or components of playground +### Check status +```shell +./playground.sh status +``` +### Stop playground ```shell -git clone git@github.com:apache/gravitino-playground.git -cd gravitino-playground -./launch-playground.sh hive|gravitino|trino|postgresql|mysql|spark|jupyter +./playground.sh stop ``` -Note. Components have dependencies, so not launching all components may prevent you from experiencing the full functionality of the playground. - ## Using Apache Gravitino with Trino SQL ### Using Trino CLI in Docker Container From 46b71474b20a49390e63f4f0dfbc5c5e09fe0f69 Mon Sep 17 00:00:00 2001 From: Justin Mclean Date: Mon, 28 Oct 2024 17:48:15 +1100 Subject: [PATCH 028/123] add right files --- .../gravitino/cli/commands/OwnerDetails.java | 93 ++++++++++++++++ .../gravitino/cli/commands/SetOwner.java | 100 ++++++++++++++++++ 2 files changed, 193 insertions(+) create mode 100644 clients/cli/src/main/java/org/apache/gravitino/cli/commands/OwnerDetails.java create mode 100644 clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetOwner.java diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/OwnerDetails.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/OwnerDetails.java new file mode 100644 index 00000000000..ece8cfd628f --- /dev/null +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/OwnerDetails.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.cli.commands; + +import java.util.Optional; +import org.apache.gravitino.MetadataObject; +import org.apache.gravitino.MetadataObjects; +import org.apache.gravitino.authorization.Owner; +import org.apache.gravitino.cli.CommandEntities; +import org.apache.gravitino.cli.ErrorMessages; +import org.apache.gravitino.client.GravitinoClient; +import org.apache.gravitino.exceptions.NoSuchMetadataObjectException; +import org.apache.gravitino.exceptions.NoSuchMetalakeException; + +public class OwnerDetails extends Command { + + protected final String metalake; + protected final String entity; + protected final MetadataObject.Type entityType; + + /** + * Displays the owner of an entity. + * + * @param url The URL of the Gravitino server. + * @param ignoreVersions If true don't check the client/server versions match. + * @param metalake The name of the metalake. + * @param entity The name of the entity. + * @param entityType The type entity. + */ + public OwnerDetails( + String url, boolean ignoreVersions, String metalake, String entity, String entityType) { + super(url, ignoreVersions); + this.metalake = metalake; + this.entity = entity; + + if (entityType.equals(CommandEntities.METALAKE)) { + this.entityType = MetadataObject.Type.METALAKE; + } else if (entityType.equals(CommandEntities.CATALOG)) { + this.entityType = MetadataObject.Type.CATALOG; + } else if (entityType.equals(CommandEntities.SCHEMA)) { + this.entityType = MetadataObject.Type.SCHEMA; + } else if (entityType.equals(CommandEntities.TABLE)) { + this.entityType = MetadataObject.Type.TABLE; + } else if (entityType.equals(CommandEntities.COLUMN)) { + this.entityType = MetadataObject.Type.COLUMN; + } else { + this.entityType = null; + } + } + + /** Displays the owner of an entity. */ + public void handle() { + Optional owner = null; + MetadataObject metadata = MetadataObjects.parse(entity, entityType); + + try { + GravitinoClient client = buildClient(metalake); + owner = client.getOwner(metadata); + } catch (NoSuchMetalakeException err) { + System.err.println(ErrorMessages.UNKNOWN_METALAKE); + return; + } catch (NoSuchMetadataObjectException err) { + System.err.println(ErrorMessages.UNKNOWN_ENTITY); + return; + } catch (Exception exp) { + System.err.println(exp.getMessage()); + return; + } + + if (owner.isPresent()) { + System.out.println(owner.get().name()); + } else { + System.out.println("No owner"); + } + } +} diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetOwner.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetOwner.java new file mode 100644 index 00000000000..6d113467812 --- /dev/null +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetOwner.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.cli.commands; + +import org.apache.gravitino.MetadataObject; +import org.apache.gravitino.MetadataObjects; +import org.apache.gravitino.authorization.Owner; +import org.apache.gravitino.cli.CommandEntities; +import org.apache.gravitino.cli.ErrorMessages; +import org.apache.gravitino.client.GravitinoClient; +import org.apache.gravitino.exceptions.NoSuchMetadataObjectException; +import org.apache.gravitino.exceptions.NoSuchMetalakeException; + +public class SetOwner extends Command { + + protected final String metalake; + protected final String entity; + protected final MetadataObject.Type entityType; + protected final String owner; + protected final boolean isGroup; + + /** + * Sets the owner of an entity. + * + * @param url The URL of the Gravitino server. + * @param ignoreVersions If true don't check the client/server versions match. + * @param metalake The name of the metalake. + * @param entity The name of the entity. + * @param entityType The type entity. + * @param owner The name of the new owner. + * @param isGroup True if the owner is a group, false if it is not. + */ + public SetOwner( + String url, + boolean ignoreVersions, + String metalake, + String entity, + String entityType, + String owner, + boolean isGroup) { + super(url, ignoreVersions); + this.metalake = metalake; + this.entity = entity; + this.owner = owner; + this.isGroup = isGroup; + + if (entityType.equals(CommandEntities.METALAKE)) { + this.entityType = MetadataObject.Type.METALAKE; + } else if (entityType.equals(CommandEntities.CATALOG)) { + this.entityType = MetadataObject.Type.CATALOG; + } else if (entityType.equals(CommandEntities.SCHEMA)) { + this.entityType = MetadataObject.Type.SCHEMA; + } else if (entityType.equals(CommandEntities.TABLE)) { + this.entityType = MetadataObject.Type.TABLE; + } else if (entityType.equals(CommandEntities.COLUMN)) { + this.entityType = MetadataObject.Type.COLUMN; + } else { + this.entityType = null; + } + } + + /** Sets the owner of an entity. */ + public void handle() { + MetadataObject metadata = MetadataObjects.parse(entity, entityType); + Owner.Type ownerType = isGroup ? Owner.Type.GROUP : Owner.Type.USER; + + try { + GravitinoClient client = buildClient(metalake); + client.setOwner(metadata, owner, ownerType); + } catch (NoSuchMetalakeException err) { + System.err.println(ErrorMessages.UNKNOWN_METALAKE); + return; + } catch (NoSuchMetadataObjectException err) { + System.err.println(ErrorMessages.UNKNOWN_ENTITY); + return; + } catch (Exception exp) { + System.err.println(exp.getMessage()); + return; + } + + System.out.println("Set owner to " + owner); + } +} From 569e90235afeb9d0c19e7cf3167dddfb2d1bf3e0 Mon Sep 17 00:00:00 2001 From: Qian Xia Date: Mon, 28 Oct 2024 14:48:26 +0800 Subject: [PATCH 029/123] [#5206] UI (metalake, catalog): Web UI add disable/enable metalake/catalog button (#5250) ### What changes were proposed in this pull request? Web UI add disable/enable metalake/catalog button image image image image image image ### Why are the changes needed? Fix: #5206 ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? manually --- .../test/web/ui/CatalogsPageTest.java | 1 + .../test/web/ui/MetalakePageTest.java | 1 + .../test/web/ui/pages/CatalogsPage.java | 9 +++ .../test/web/ui/pages/MetalakePage.java | 9 +++ .../src/app/metalakes/CreateMetalakeDialog.js | 16 ++-- web/web/src/app/metalakes/TableBody.js | 72 ++++++++++++------ .../app/metalakes/metalake/MetalakeTree.js | 6 ++ .../rightContent/CreateCatalogDialog.js | 8 +- .../tabsContent/tableView/TableView.js | 75 +++++++++++++------ web/web/src/components/ConfirmDeleteDialog.js | 15 +++- web/web/src/lib/api/catalogs/index.js | 12 ++- web/web/src/lib/api/metalakes/index.js | 12 ++- web/web/src/lib/store/metalakes/index.js | 31 ++++++++ web/web/src/lib/utils/axios/Axios.js | 10 +++ 14 files changed, 218 insertions(+), 59 deletions(-) diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java index 31e50e65725..0b140a122c8 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java @@ -269,6 +269,7 @@ public void testDeleteCatalog() throws InterruptedException { catalogsPage.setCatalogFixedProp("metastore.uris", hiveMetastoreUri); clickAndWait(catalogsPage.handleSubmitCatalogBtn); // delete catalog + catalogsPage.clickInUseSwitch(DEFAULT_CATALOG_NAME); catalogsPage.clickDeleteCatalogBtn(DEFAULT_CATALOG_NAME); clickAndWait(catalogsPage.confirmDeleteBtn); Assertions.assertTrue(catalogsPage.verifyEmptyTableData()); diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/MetalakePageTest.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/MetalakePageTest.java index fc27dc08b48..93617927382 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/MetalakePageTest.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/MetalakePageTest.java @@ -91,6 +91,7 @@ public void testEditMetalake() { @Test @Order(4) public void testDeleteMetalake() { + metalakePage.clickInUseSwitch(EDITED_METALAKE_NAME); metalakePage.clickDeleteMetalakeBtn(EDITED_METALAKE_NAME); metalakePage.confirmDeleteBtn.click(); Assertions.assertTrue(metalakePage.verifyEmptyMetalake()); diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/CatalogsPage.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/CatalogsPage.java index b397c26a7e4..43a2dd5ba35 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/CatalogsPage.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/CatalogsPage.java @@ -208,6 +208,15 @@ public void clickEditCatalogBtn(String name) { } } + public void clickInUseSwitch(String name) { + try { + String xpath = "//*[@data-refer='catalog-in-use-" + name + "']"; + clickAndWait(By.xpath(xpath)); + } catch (Exception e) { + LOG.error(e.getMessage(), e); + } + } + public void clickDeleteCatalogBtn(String name) { try { String xpath = "//button[@data-refer='delete-entity-" + name + "']"; diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/MetalakePage.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/MetalakePage.java index 419589f5ad9..f2810ab0736 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/MetalakePage.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/pages/MetalakePage.java @@ -148,6 +148,15 @@ public void clearQueryInput() { queryMetalakeInput.clear(); } + public void clickInUseSwitch(String name) { + try { + String xpath = "//*[@data-refer='metalake-in-use-" + name + "']"; + clickAndWait(By.xpath(xpath)); + } catch (Exception e) { + LOG.error(e.getMessage(), e); + } + } + public void clickDeleteMetalakeBtn(String name) { try { String xpath = "//button[@data-refer='delete-metalake-" + name + "']"; diff --git a/web/web/src/app/metalakes/CreateMetalakeDialog.js b/web/web/src/app/metalakes/CreateMetalakeDialog.js index 9dc50df5f53..8b8daaadb05 100644 --- a/web/web/src/app/metalakes/CreateMetalakeDialog.js +++ b/web/web/src/app/metalakes/CreateMetalakeDialog.js @@ -287,6 +287,7 @@ const CreateMetalakeDialog = props => { name='key' label='Key' value={item.key} + disabled={item.key === 'in-use' && type === 'update'} onChange={event => handleFormChange(index, event)} error={item.hasDuplicateKey} data-refer={`add-props-key-${index}`} @@ -296,14 +297,19 @@ const CreateMetalakeDialog = props => { name='value' label='Value' value={item.value} + disabled={item.key === 'in-use' && type === 'update'} onChange={event => handleFormChange(index, event)} data-refer={`add-props-value-${index}`} /> - - removeFields(index)}> - - - + {!(item.key === 'in-use' && type === 'update') ? ( + + removeFields(index)}> + + + + ) : ( + + )} {item.hasDuplicateKey && ( Key already exists diff --git a/web/web/src/app/metalakes/TableBody.js b/web/web/src/app/metalakes/TableBody.js index 943e2c0b495..da5ad36c5a0 100644 --- a/web/web/src/app/metalakes/TableBody.js +++ b/web/web/src/app/metalakes/TableBody.js @@ -21,7 +21,7 @@ import { useState, useEffect, Fragment } from 'react' import Link from 'next/link' -import { Box, Typography, Portal, Tooltip, IconButton } from '@mui/material' +import { Box, Typography, Portal, Tooltip, IconButton, Switch } from '@mui/material' import { DataGrid, GridToolbar } from '@mui/x-data-grid' import { VisibilityOutlined as ViewIcon, @@ -31,7 +31,15 @@ import { import { formatToDateTime } from '@/lib/utils/date' import { useAppDispatch, useAppSelector } from '@/lib/hooks/useStore' -import { fetchMetalakes, setFilteredMetalakes, deleteMetalake, resetTree } from '@/lib/store/metalakes' +import { + fetchMetalakes, + setFilteredMetalakes, + deleteMetalake, + resetTree, + setMetalakeInUse +} from '@/lib/store/metalakes' +import { switchInUseApi } from '@/lib/api/metalakes' +import { to } from '@/lib/utils' import ConfirmDeleteDialog from '@/components/ConfirmDeleteDialog' const TableBody = props => { @@ -46,12 +54,12 @@ const TableBody = props => { const handleDeleteMetalake = name => () => { setOpenConfirmDelete(true) - setConfirmCacheData(name) + setConfirmCacheData({ name, type: 'metalake' }) } const handleConfirmDeleteSubmit = () => { if (confirmCacheData) { - dispatch(deleteMetalake(confirmCacheData)) + dispatch(deleteMetalake(confirmCacheData.name)) setOpenConfirmDelete(false) } } @@ -76,6 +84,14 @@ const TableBody = props => { dispatch(resetTree()) } + const handleChangeInUse = async (name, isInUse) => { + const [err, res] = await to(switchInUseApi({ name, isInUse })) + if (err || !res) { + throw new Error(err) + } + dispatch(setMetalakeInUse({ name, isInUse })) + } + useEffect(() => { dispatch(fetchMetalakes()) }, [dispatch]) @@ -109,24 +125,36 @@ const TableBody = props => { return ( - handleClickLink()} - sx={{ - fontWeight: 500, - color: 'primary.main', - textDecoration: 'none', - maxWidth: 240, - overflow: 'hidden', - textOverflow: 'ellipsis', - '&:hover': { color: 'primary.main', textDecoration: 'underline' } - }} - data-refer={`metalake-link-${name}`} - > - {name} - + {row.properties['in-use'] === 'true' ? ( + handleClickLink()} + sx={{ + fontWeight: 500, + color: 'primary.main', + textDecoration: 'none', + maxWidth: 240, + overflow: 'hidden', + textOverflow: 'ellipsis', + '&:hover': { color: 'primary.main', textDecoration: 'underline' } + }} + data-refer={`metalake-link-${name}`} + > + {name} + + ) : ( + {name} + )} + + + handleChangeInUse(name, value)} + size='small' + /> ) diff --git a/web/web/src/app/metalakes/metalake/MetalakeTree.js b/web/web/src/app/metalakes/metalake/MetalakeTree.js index 74065f93336..9f17480df08 100644 --- a/web/web/src/app/metalakes/metalake/MetalakeTree.js +++ b/web/web/src/app/metalakes/metalake/MetalakeTree.js @@ -86,6 +86,7 @@ const MetalakeTree = props => { const handleClickIcon = (e, nodeProps) => { e.stopPropagation() + if (nodeProps.data.inUse === 'false') return switch (nodeProps.data.node) { case 'table': { @@ -118,6 +119,7 @@ const MetalakeTree = props => { } const onMouseEnter = (e, nodeProps) => { + if (nodeProps.data.inUse === 'false') return if (nodeProps.data.node === 'table') { if (store.selectedNodes.includes(nodeProps.data.key)) { setIsHover(nodeProps.data.key) @@ -128,10 +130,12 @@ const MetalakeTree = props => { } const onMouseLeave = (e, nodeProps) => { + if (nodeProps.data.inUse === 'false') return setIsHover(null) } const onLoadData = node => { + if (node.inUse === 'false') return new Promise(resolve => resolve()) const { key, children } = node dispatch(setLoadedNodes([...store.loadedNodes, key])) @@ -150,6 +154,7 @@ const MetalakeTree = props => { } const onExpand = (keys, { expanded, node }) => { + if (node.inUse === 'false') return if (expanded) { dispatch(setExpandedNodes(keys)) } else { @@ -158,6 +163,7 @@ const MetalakeTree = props => { } const onSelect = (keys, { selected, node }) => { + if (node.inUse === 'false') return if (!selected) { dispatch(setSelectedNodes([node.key])) diff --git a/web/web/src/app/metalakes/metalake/rightContent/CreateCatalogDialog.js b/web/web/src/app/metalakes/metalake/rightContent/CreateCatalogDialog.js index aa836886aeb..4cb41e6fb8b 100644 --- a/web/web/src/app/metalakes/metalake/rightContent/CreateCatalogDialog.js +++ b/web/web/src/app/metalakes/metalake/rightContent/CreateCatalogDialog.js @@ -600,7 +600,9 @@ const CreateCatalogDialog = props => { name='key' label='Key' value={item.key} - disabled={item.required || item.disabled} + disabled={ + item.required || item.disabled || (item.key === 'in-use' && type === 'update') + } onChange={event => handleFormChange({ index, event })} error={item.hasDuplicateKey || item.invalid || !item.key.trim()} data-refer={`props-key-${index}`} @@ -631,7 +633,7 @@ const CreateCatalogDialog = props => { label='Value' error={item.required && item.value === ''} value={item.value} - disabled={item.disabled} + disabled={item.disabled || (item.key === 'in-use' && type === 'update')} onChange={event => handleFormChange({ index, event })} data-refer={`props-value-${index}`} data-prev-refer={`props-${item.key}`} @@ -640,7 +642,7 @@ const CreateCatalogDialog = props => { )} - {!(item.required || item.disabled) ? ( + {!(item.required || item.disabled || (item.key === 'in-use' && type === 'update')) ? ( removeFields(index)}> diff --git a/web/web/src/app/metalakes/metalake/rightContent/tabsContent/tableView/TableView.js b/web/web/src/app/metalakes/metalake/rightContent/tabsContent/tableView/TableView.js index cf8cc3bafef..964b7d6b891 100644 --- a/web/web/src/app/metalakes/metalake/rightContent/tabsContent/tableView/TableView.js +++ b/web/web/src/app/metalakes/metalake/rightContent/tabsContent/tableView/TableView.js @@ -25,7 +25,7 @@ import { useState, useEffect, Fragment } from 'react' import Link from 'next/link' -import { styled, Box, Typography, IconButton, Stack } from '@mui/material' +import { styled, Box, Typography, IconButton, Stack, Switch } from '@mui/material' import Tooltip, { tooltipClasses } from '@mui/material/Tooltip' import { DataGrid } from '@mui/x-data-grid' import { @@ -44,10 +44,17 @@ import CreateSchemaDialog from '../../CreateSchemaDialog' import CreateFilesetDialog from '../../CreateFilesetDialog' import { useAppSelector, useAppDispatch } from '@/lib/hooks/useStore' -import { deleteCatalog, deleteFileset, deleteSchema } from '@/lib/store/metalakes' +import { + deleteCatalog, + deleteFileset, + deleteSchema, + resetExpandNode, + setCatalogInUse, + setIntoTreeNodes +} from '@/lib/store/metalakes' import { to } from '@/lib/utils' -import { getCatalogDetailsApi } from '@/lib/api/catalogs' +import { getCatalogDetailsApi, switchInUseApi } from '@/lib/api/catalogs' import { getSchemaDetailsApi } from '@/lib/api/schemas' import { useSearchParams } from 'next/navigation' import { getFilesetDetailsApi } from '@/lib/api/filesets' @@ -80,6 +87,8 @@ const TableView = () => { const type = searchParams.get('type') || '' const schema = searchParams.get('schema') || '' + const isCatalogList = paramsSize == 1 && searchParams.has('metalake') + const isKafkaSchema = paramsSize == 3 && searchParams.has('metalake') && @@ -237,26 +246,40 @@ const TableView = () => { return ( - - handleClickUrl(path)} - sx={{ - fontWeight: 400, - color: 'primary.main', - textDecoration: 'none', - '&:hover': { color: 'primary.main', textDecoration: 'underline' } - }} - > - {name} - + + {(isCatalogList && row.inUse === 'true') || !isCatalogList ? ( + handleClickUrl(path)} + sx={{ + fontWeight: 400, + color: 'primary.main', + textDecoration: 'none', + '&:hover': { color: 'primary.main', textDecoration: 'underline' } + }} + > + {name} + + ) : ( + {name} + )} + {isCatalogList && ( + + handleChangeInUse(name, row.type, value)} + size='small' + /> + + )} ) } @@ -564,6 +587,14 @@ const TableView = () => { } } + const handleChangeInUse = async (name, catalogType, isInUse) => { + const [err, res] = await to(switchInUseApi({ metalake, catalog: name, isInUse })) + if (err || !res) { + throw new Error(err) + } + dispatch(setCatalogInUse({ name, catalogType, metalake, isInUse })) + } + const checkColumns = () => { if ( (paramsSize == 1 && searchParams.has('metalake')) || diff --git a/web/web/src/components/ConfirmDeleteDialog.js b/web/web/src/components/ConfirmDeleteDialog.js index 9cb38781b0b..b551a1fbfd3 100644 --- a/web/web/src/components/ConfirmDeleteDialog.js +++ b/web/web/src/components/ConfirmDeleteDialog.js @@ -22,7 +22,7 @@ import { Box, Button, Typography, Dialog, DialogContent, DialogActions } from '@ import Icon from '@/components/Icon' const ConfirmDeleteDialog = props => { - const { open, setOpen, handleConfirmDeleteSubmit } = props + const { open, setOpen, confirmCacheData, handleConfirmDeleteSubmit } = props const handleClose = () => setOpen(false) @@ -35,9 +35,16 @@ const ConfirmDeleteDialog = props => { > - Confirm Delete? + Confirm Drop? - This action can not be reversed! + {['metalake', 'catalog'].includes(confirmCacheData?.type) ? ( + + Make sure the {confirmCacheData.type} is not in-use, and all sub-entities in it are dropped. This action + can not be reversed! + + ) : ( + This action can not be reversed! + )} @@ -47,7 +54,7 @@ const ConfirmDeleteDialog = props => { className={'twc-mr-2'} onClick={() => handleConfirmDeleteSubmit()} > - Delete + Drop