From 7761440756fed560d765d95af75ae503b1dff586 Mon Sep 17 00:00:00 2001 From: roryqi Date: Tue, 12 Nov 2024 20:42:44 +0800 Subject: [PATCH] [#5517] feat(auth): Paimon catalog supports Ranger plugin (#5523) ### What changes were proposed in this pull request? Paimon catalog supports Ranger plugin. Kyuubi authz plugin doesn't support to update or delete the table. ### Why are the changes needed? Fix: #5517 ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Add IT. --- .../authorization-ranger/build.gradle.kts | 6 +- .../ranger/RangerAuthorization.java | 1 + .../RangerAuthorizationHadoopSQLPlugin.java | 20 +- .../ranger/RangerPrivileges.java | 6 +- .../integration/test/RangerBaseE2EIT.java | 34 +-- .../integration/test/RangerHiveE2EIT.java | 2 +- .../ranger/integration/test/RangerHiveIT.java | 4 +- .../ranger/integration/test/RangerITEnv.java | 2 +- .../integration/test/RangerIcebergE2EIT.java | 4 +- .../integration/test/RangerPaimonE2EIT.java | 235 ++++++++++++++++++ docs/security/authorization-pushdown.md | 40 +-- gradle/libs.versions.toml | 1 + 12 files changed, 284 insertions(+), 71 deletions(-) create mode 100644 authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java diff --git a/authorizations/authorization-ranger/build.gradle.kts b/authorizations/authorization-ranger/build.gradle.kts index 93d90cd4fdb..0eeb6c4fb5b 100644 --- a/authorizations/authorization-ranger/build.gradle.kts +++ b/authorizations/authorization-ranger/build.gradle.kts @@ -26,9 +26,10 @@ plugins { val scalaVersion: String = project.properties["scalaVersion"] as? String ?: extra["defaultScalaVersion"].toString() val sparkVersion: String = libs.versions.spark35.get() -val kyuubiVersion: String = libs.versions.kyuubi4spark35.get() +val kyuubiVersion: String = libs.versions.kyuubi4paimon.get() val sparkMajorVersion: String = sparkVersion.substringBeforeLast(".") val icebergVersion: String = libs.versions.iceberg4spark.get() +val paimonVersion: String = libs.versions.paimon.get() dependencies { implementation(project(":api")) { @@ -86,7 +87,7 @@ dependencies { exclude("io.dropwizard.metrics") exclude("org.rocksdb") } - testImplementation("org.apache.kyuubi:kyuubi-spark-authz_$scalaVersion:$kyuubiVersion") { + testImplementation("org.apache.kyuubi:kyuubi-spark-authz-shaded_$scalaVersion:$kyuubiVersion") { exclude("com.sun.jersey") } testImplementation(libs.hadoop3.client) @@ -100,6 +101,7 @@ dependencies { exclude("io.netty") } testImplementation("org.apache.iceberg:iceberg-spark-runtime-${sparkMajorVersion}_$scalaVersion:$icebergVersion") + testImplementation("org.apache.paimon:paimon-spark-$sparkMajorVersion:$paimonVersion") } tasks { diff --git a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorization.java b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorization.java index 9f8b42b0633..459b6b04720 100644 --- a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorization.java +++ b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorization.java @@ -34,6 +34,7 @@ protected AuthorizationPlugin newPlugin(String catalogProvider, Map names, RangerMetadataObjec public Map> privilegesMappingRule() { return ImmutableMap.of( Privilege.Name.CREATE_CATALOG, - ImmutableSet.of(RangerHivePrivilege.CREATE), + ImmutableSet.of(RangerHadoopSQLPrivilege.CREATE), Privilege.Name.USE_CATALOG, - ImmutableSet.of(RangerHivePrivilege.SELECT), + ImmutableSet.of(RangerHadoopSQLPrivilege.SELECT), Privilege.Name.CREATE_SCHEMA, - ImmutableSet.of(RangerHivePrivilege.CREATE), + ImmutableSet.of(RangerHadoopSQLPrivilege.CREATE), Privilege.Name.USE_SCHEMA, - ImmutableSet.of(RangerHivePrivilege.SELECT), + ImmutableSet.of(RangerHadoopSQLPrivilege.SELECT), Privilege.Name.CREATE_TABLE, - ImmutableSet.of(RangerHivePrivilege.CREATE), + ImmutableSet.of(RangerHadoopSQLPrivilege.CREATE), Privilege.Name.MODIFY_TABLE, ImmutableSet.of( - RangerHivePrivilege.UPDATE, RangerHivePrivilege.ALTER, RangerHivePrivilege.WRITE), + RangerHadoopSQLPrivilege.UPDATE, + RangerHadoopSQLPrivilege.ALTER, + RangerHadoopSQLPrivilege.WRITE), Privilege.Name.SELECT_TABLE, - ImmutableSet.of(RangerHivePrivilege.READ, RangerHivePrivilege.SELECT)); + ImmutableSet.of(RangerHadoopSQLPrivilege.READ, RangerHadoopSQLPrivilege.SELECT)); } @Override /** Set the default owner rule. */ public Set ownerMappingRule() { - return ImmutableSet.of(RangerHivePrivilege.ALL); + return ImmutableSet.of(RangerHadoopSQLPrivilege.ALL); } @Override diff --git a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerPrivileges.java b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerPrivileges.java index c547781678c..e47b46efca5 100644 --- a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerPrivileges.java +++ b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerPrivileges.java @@ -25,7 +25,7 @@ public class RangerPrivileges { /** Ranger Hive privileges enumeration. */ - public enum RangerHivePrivilege implements RangerPrivilege { + public enum RangerHadoopSQLPrivilege implements RangerPrivilege { ALL("all"), SELECT("select"), UPDATE("update"), @@ -41,7 +41,7 @@ public enum RangerHivePrivilege implements RangerPrivilege { private final String name; // Access a type in the Ranger policy item - RangerHivePrivilege(String name) { + RangerHadoopSQLPrivilege(String name) { this.name = name; } @@ -117,7 +117,7 @@ public boolean equalsTo(String value) { static List>> allRangerPrivileges = Lists.newArrayList( - RangerPrivileges.RangerHivePrivilege.class, RangerPrivileges.RangerHdfsPrivilege.class); + RangerHadoopSQLPrivilege.class, RangerPrivileges.RangerHdfsPrivilege.class); public static RangerPrivilege valueOf(String name) { Preconditions.checkArgument(name != null, "Privilege name string cannot be null!"); diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java index 8034e8d7a51..95dc4f93636 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java @@ -193,7 +193,7 @@ protected static void waitForUpdatingPolicies() throws InterruptedException { protected abstract void useCatalog() throws InterruptedException; - protected abstract void checkHaveNoPrivileges(); + protected abstract void checkWithoutPrivileges(); protected abstract void testAlterTable(); @@ -269,7 +269,7 @@ void testCreateTable() throws InterruptedException { AccessControlException.class, () -> sparkSession.sql(SQL_SELECT_TABLE).collectAsList()); // Clean up - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); catalog.asSchemas().dropSchema(schemaName, false); metalake.deleteRole(createTableRole); metalake.deleteRole(createSchemaRole); @@ -323,10 +323,10 @@ void testReadWriteTableWithMetalakeLevelRole() throws InterruptedException { // case 7: If we don't have the role, we can't insert and select from data. metalake.deleteRole(readWriteRole); waitForUpdatingPolicies(); - checkHaveNoPrivileges(); + checkWithoutPrivileges(); // Clean up - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); catalog.asSchemas().dropSchema(schemaName, false); } @@ -387,10 +387,10 @@ void testReadWriteTableWithTableLevelRole() throws InterruptedException { // case 7: If we don't have the role, we can't insert and select from data. metalake.deleteRole(roleName); waitForUpdatingPolicies(); - checkHaveNoPrivileges(); + checkWithoutPrivileges(); // Clean up - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); catalog.asSchemas().dropSchema(schemaName, false); } @@ -441,10 +441,10 @@ void testReadOnlyTable() throws InterruptedException { // case 7: If we don't have the role, we can't insert and select from data. metalake.deleteRole(readOnlyRole); waitForUpdatingPolicies(); - checkHaveNoPrivileges(); + checkWithoutPrivileges(); // Clean up - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); catalog.asSchemas().dropSchema(schemaName, false); } @@ -496,10 +496,10 @@ void testWriteOnlyTable() throws InterruptedException { // case 7: If we don't have the role, we can't insert and select from data. metalake.deleteRole(writeOnlyRole); waitForUpdatingPolicies(); - checkHaveNoPrivileges(); + checkWithoutPrivileges(); // Clean up - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); catalog.asSchemas().dropSchema(schemaName, false); } @@ -547,7 +547,7 @@ void testCreateAllPrivilegesRole() throws InterruptedException { sparkSession.sql(SQL_CREATE_TABLE); // Clean up - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); catalog.asSchemas().dropSchema(schemaName, false); metalake.deleteRole(roleName); } @@ -690,7 +690,7 @@ void testRenameMetadataObject() throws InterruptedException { sparkSession.sql(SQL_RENAME_BACK_TABLE); // Clean up - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); catalog.asSchemas().dropSchema(schemaName, false); metalake.deleteRole(roleName); } @@ -739,7 +739,7 @@ void testRenameMetadataObjectPrivilege() throws InterruptedException { sparkSession.sql(SQL_INSERT_TABLE); // Clean up - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); catalog.asSchemas().dropSchema(schemaName, false); metalake.deleteRole(roleName); } @@ -774,7 +774,7 @@ void testChangeOwner() throws InterruptedException { metalake.deleteRole(helperRole); waitForUpdatingPolicies(); - checkHaveNoPrivileges(); + checkWithoutPrivileges(); // case 2. user is the table owner MetadataObject tableObject = @@ -787,7 +787,7 @@ void testChangeOwner() throws InterruptedException { checkTableAllPrivilegesExceptForCreating(); // Delete Gravitino's meta data - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); waitForUpdatingPolicies(); // Fail to create the table @@ -854,7 +854,7 @@ void testChangeOwner() throws InterruptedException { sparkSession.sql(SQL_DROP_SCHEMA); // Clean up - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); catalog.asSchemas().dropSchema(schemaName, false); } @@ -915,7 +915,7 @@ void testAllowUseSchemaPrivilege() throws InterruptedException { 1, rows2.stream().filter(row -> row.getString(0).equals(schemaName)).count()); // Clean up - catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, tableName)); + catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName, tableName)); catalog.asSchemas().dropSchema(schemaName, false); metalake.revokeRolesFromUser(Lists.newArrayList(roleName), userName1); metalake.deleteRole(roleName); diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java index 7e3096a61ae..cb41e79216c 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java @@ -120,7 +120,7 @@ protected void useCatalog() throws InterruptedException { } @Override - protected void checkHaveNoPrivileges() { + protected void checkWithoutPrivileges() { Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_INSERT_TABLE)); Assertions.assertThrows( AccessControlException.class, () -> sparkSession.sql(SQL_SELECT_TABLE).collectAsList()); diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java index a72503c2ff5..da43daca799 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java @@ -348,7 +348,7 @@ public void testFindManagedPolicy() { RangerMetadataObject.Type.TABLE, ImmutableSet.of( new RangerPrivileges.RangerHivePrivilegeImpl( - RangerPrivileges.RangerHivePrivilege.ALL, Privilege.Condition.ALLOW))); + RangerPrivileges.RangerHadoopSQLPrivilege.ALL, Privilege.Condition.ALLOW))); Assertions.assertNull(rangerHelper.findManagedPolicy(rangerSecurableObject)); // Add a policy for `db3.tab1` @@ -398,7 +398,7 @@ static void createHivePolicy( policyItem.setAccesses( Arrays.asList( new RangerPolicy.RangerPolicyItemAccess( - RangerPrivileges.RangerHivePrivilege.SELECT.toString()))); + RangerPrivileges.RangerHadoopSQLPrivilege.SELECT.toString()))); RangerITEnv.updateOrCreateRangerPolicy( RangerDefines.SERVICE_TYPE_HIVE, RangerITEnv.RANGER_HIVE_REPO_NAME, diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java index 31ae3974d29..13202add720 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java @@ -212,7 +212,7 @@ static void allowAnyoneAccessInformationSchema() { policyItem.setAccesses( Arrays.asList( new RangerPolicy.RangerPolicyItemAccess( - RangerPrivileges.RangerHivePrivilege.SELECT.toString()))); + RangerPrivileges.RangerHadoopSQLPrivilege.SELECT.toString()))); updateOrCreateRangerPolicy( RANGER_HIVE_TYPE, RANGER_HIVE_REPO_NAME, diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java index 648a9c4d709..7b45eda7a6e 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java @@ -90,7 +90,7 @@ public void startIntegrationTest() throws Exception { sparkSession = SparkSession.builder() .master("local[1]") - .appName("Ranger Hive E2E integration test") + .appName("Ranger Iceberg E2E integration test") .config("spark.sql.catalog.iceberg", "org.apache.iceberg.spark.SparkCatalog") .config("spark.sql.catalog.iceberg.type", "hive") .config("spark.sql.catalog.iceberg.uri", HIVE_METASTORE_URIS) @@ -147,7 +147,7 @@ protected void checkDeleteSQLWithWritePrivileges() { } @Override - protected void checkHaveNoPrivileges() { + protected void checkWithoutPrivileges() { Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_INSERT_TABLE)); Assertions.assertThrows( AccessControlException.class, () -> sparkSession.sql(SQL_SELECT_TABLE).collectAsList()); diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java new file mode 100644 index 00000000000..7cb600b9d8c --- /dev/null +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.authorization.ranger.integration.test; + +import static org.apache.gravitino.Catalog.AUTHORIZATION_PROVIDER; +import static org.apache.gravitino.authorization.ranger.integration.test.RangerITEnv.currentFunName; +import static org.apache.gravitino.connector.AuthorizationPropertiesMeta.RANGER_AUTH_TYPE; +import static org.apache.gravitino.connector.AuthorizationPropertiesMeta.RANGER_PASSWORD; +import static org.apache.gravitino.connector.AuthorizationPropertiesMeta.RANGER_SERVICE_NAME; +import static org.apache.gravitino.connector.AuthorizationPropertiesMeta.RANGER_USERNAME; +import static org.apache.gravitino.integration.test.container.RangerContainer.RANGER_SERVER_PORT; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import java.util.Collections; +import java.util.Map; +import org.apache.gravitino.Catalog; +import org.apache.gravitino.Configs; +import org.apache.gravitino.auth.AuthConstants; +import org.apache.gravitino.auth.AuthenticatorType; +import org.apache.gravitino.authorization.Privileges; +import org.apache.gravitino.authorization.SecurableObject; +import org.apache.gravitino.authorization.SecurableObjects; +import org.apache.gravitino.connector.AuthorizationPropertiesMeta; +import org.apache.gravitino.integration.test.container.HiveContainer; +import org.apache.gravitino.integration.test.container.RangerContainer; +import org.apache.gravitino.integration.test.util.GravitinoITUtils; +import org.apache.kyuubi.plugin.spark.authz.AccessControlException; +import org.apache.spark.sql.SparkSession; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Tag("gravitino-docker-test") +public class RangerPaimonE2EIT extends RangerBaseE2EIT { + private static final Logger LOG = LoggerFactory.getLogger(RangerPaimonE2EIT.class); + + private static final String provider = "lakehouse-paimon"; + private static final String SQL_USE_CATALOG = "USE paimon"; + + @BeforeAll + public void startIntegrationTest() throws Exception { + metalakeName = GravitinoITUtils.genRandomName("metalake").toLowerCase(); + // Enable Gravitino Authorization mode + Map configs = Maps.newHashMap(); + configs.put(Configs.ENABLE_AUTHORIZATION.getKey(), String.valueOf(true)); + configs.put(Configs.SERVICE_ADMINS.getKey(), RangerITEnv.HADOOP_USER_NAME); + configs.put(Configs.AUTHENTICATORS.getKey(), AuthenticatorType.SIMPLE.name().toLowerCase()); + configs.put("SimpleAuthUserName", AuthConstants.ANONYMOUS_USER); + registerCustomConfigs(configs); + super.startIntegrationTest(); + + RangerITEnv.init(); + RangerITEnv.startHiveRangerContainer(); + + RANGER_ADMIN_URL = + String.format( + "http://%s:%d", + containerSuite.getRangerContainer().getContainerIpAddress(), RANGER_SERVER_PORT); + + HIVE_METASTORE_URIS = + String.format( + "thrift://%s:%d", + containerSuite.getHiveRangerContainer().getContainerIpAddress(), + HiveContainer.HIVE_METASTORE_PORT); + + generateRangerSparkSecurityXML(); + + sparkSession = + SparkSession.builder() + .master("local[1]") + .appName("Ranger Paimon E2E integration test") + .config("spark.sql.catalog.paimon", "org.apache.paimon.spark.SparkCatalog") + .config("spark.sql.catalog.paimon.metastore", "hive") + .config("spark.sql.catalog.paimon.uri", HIVE_METASTORE_URIS) + .config( + "spark.sql.catalog.paimon.warehouse", + String.format( + "hdfs://%s:%d/user/hive/warehouse", + containerSuite.getHiveRangerContainer().getContainerIpAddress(), + HiveContainer.HDFS_DEFAULTFS_PORT)) + .config("spark.sql.catalog.paimon.cache-enabled", "false") + .config( + "spark.sql.extensions", + "org.apache.kyuubi.plugin.spark.authz.ranger.RangerSparkExtension," + + "org.apache.paimon.spark.extensions.PaimonSparkSessionExtensions") + .enableHiveSupport() + .getOrCreate(); + + createMetalake(); + createCatalog(); + + RangerITEnv.cleanup(); + metalake.addUser(System.getenv(HADOOP_USER_NAME)); + } + + @AfterAll + public void stop() { + cleanIT(); + } + + @Override + protected void useCatalog() throws InterruptedException { + String userName1 = System.getenv(HADOOP_USER_NAME); + String roleName = currentFunName(); + SecurableObject securableObject = + SecurableObjects.ofMetalake( + metalakeName, Lists.newArrayList(Privileges.UseCatalog.allow())); + metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); + metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); + waitForUpdatingPolicies(); + sparkSession.sql(SQL_USE_CATALOG); + metalake.deleteRole(roleName); + waitForUpdatingPolicies(); + } + + @Override + protected void checkUpdateSQLWithReadWritePrivileges() { + // Kyuubi Paimon Ranger plugin doesn't support to update yet. + } + + @Override + protected void checkUpdateSQLWithReadPrivileges() { + // Kyuubi Paimon Ranger plugin doesn't support to update yet. + } + + @Override + protected void checkUpdateSQLWithWritePrivileges() { + // Kyuubi Paimon Ranger plugin doesn't support to update yet. + } + + @Override + protected void checkDeleteSQLWithReadWritePrivileges() { + // Kyuubi Paimon Ranger plugin doesn't support to delete yet. + } + + @Override + protected void checkDeleteSQLWithReadPrivileges() { + // Kyuubi Paimon Ranger plugin doesn't support to delete yet. + } + + @Override + protected void checkDeleteSQLWithWritePrivileges() { + // Kyuubi Paimon Ranger plugin doesn't support to delete yet. + } + + @Override + protected void checkWithoutPrivileges() { + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_INSERT_TABLE)); + Assertions.assertThrows( + AccessControlException.class, () -> sparkSession.sql(SQL_SELECT_TABLE).collectAsList()); + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_DROP_TABLE)); + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_DROP_SCHEMA)); + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_USE_SCHEMA)); + Assertions.assertThrows( + AccessControlException.class, () -> sparkSession.sql(SQL_CREATE_SCHEMA)); + Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_CREATE_TABLE)); + } + + @Override + protected void testAlterTable() { + sparkSession.sql(SQL_ALTER_TABLE); + sparkSession.sql(SQL_ALTER_TABLE_BACK); + } + + private static void createCatalog() { + Map properties = + ImmutableMap.of( + "uri", + HIVE_METASTORE_URIS, + "catalog-backend", + "hive", + "warehouse", + String.format( + "hdfs://%s:%d/user/hive/warehouse", + containerSuite.getHiveRangerContainer().getContainerIpAddress(), + HiveContainer.HDFS_DEFAULTFS_PORT), + AUTHORIZATION_PROVIDER, + "ranger", + RANGER_SERVICE_NAME, + RangerITEnv.RANGER_HIVE_REPO_NAME, + AuthorizationPropertiesMeta.RANGER_ADMIN_URL, + RANGER_ADMIN_URL, + RANGER_AUTH_TYPE, + RangerContainer.authType, + RANGER_USERNAME, + RangerContainer.rangerUserName, + RANGER_PASSWORD, + RangerContainer.rangerPassword); + + metalake.createCatalog(catalogName, Catalog.Type.RELATIONAL, provider, "comment", properties); + catalog = metalake.loadCatalog(catalogName); + LOG.info("Catalog created: {}", catalog); + } + + protected void checkTableAllPrivilegesExceptForCreating() { + // - a. Succeed to insert data into the table + sparkSession.sql(SQL_INSERT_TABLE); + + // - b. Succeed to select data from the table + sparkSession.sql(SQL_SELECT_TABLE).collectAsList(); + + // - c: Succeed to update data in the table. + sparkSession.sql(SQL_UPDATE_TABLE); + + // - d: Succeed to delete data from the table. + sparkSession.sql(SQL_DELETE_TABLE); + + // - e: Succeed to alter the table + sparkSession.sql(SQL_ALTER_TABLE); + + // - f: Succeed to drop the table + sparkSession.sql(SQL_DROP_TABLE); + } +} diff --git a/docs/security/authorization-pushdown.md b/docs/security/authorization-pushdown.md index 2d93305f5ec..dbcaa8d80d7 100644 --- a/docs/security/authorization-pushdown.md +++ b/docs/security/authorization-pushdown.md @@ -13,9 +13,9 @@ Gravitino offers a set of authorization frameworks that integrate with various u Gravitino manages different data sources through Catalogs, and when a user performs an authorization operation on data within a Catalog, Gravitino invokes the Authorization Plugin module for that Catalog. This module translates Gravitino's authorization model into the permission rules of the underlying data source. The permissions are then enforced by the underlying permission system via the respective client, such as JDBC or the Apache Ranger client. -### Authorization Hive with Ranger properties +### Ranger Hadoop SQL Plugin -In order to use the Authorization Ranger Hive Plugin, you need to configure the following properties and [Apache Hive catalog properties](../apache-hive-catalog.md#catalog-properties): +In order to use the Ranger Hadoop SQL Plugin, you need to configure the following properties: | Property Name | Description | Default Value | Required | Since Version | |-------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------| @@ -28,7 +28,7 @@ In order to use the Authorization Ranger Hive Plugin, you need to configure the Once you have used the correct configuration, you can perform authorization operations by calling Gravitino [authorization RESTful API](https://gravitino.apache.org/docs/latest/api/rest/grant-roles-to-a-user). -#### Example of using the Authorization Ranger Hive Plugin +#### Example of using the Ranger Hadoop SQL Plugin Suppose you have an Apache Hive service in your datacenter and have created a `hiveRepo` in Apache Ranger to manage its permissions. The Ranger service is accessible at `172.0.0.100:6080`, with the username `Jack` and the password `PWD123`. @@ -43,36 +43,8 @@ authorization.ranger.password=PWD123 authorization.ranger.service.name=hiveRepo ``` -### Authorization Iceberg with Ranger properties - -In order to use the Authorization Ranger Iceberg Plugin, you need to configure the following properties and [Lakehouse_Iceberg catalog properties](../lakehouse-iceberg-catalog.md#catalog-properties): - -| Property Name | Description | Default Value | Required | Since Version | -|-------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------| -| `authorization-provider` | Providers to use to implement authorization plugin such as `ranger`. | (none) | No | 0.8.0-incubating | -| `authorization.ranger.admin.url` | The Apache Ranger web URIs. | (none) | No | 0.8.0-incubating | -| `authorization.ranger.auth.type` | The Apache Ranger authentication type `simple` or `kerberos`. | `simple` | No | 0.8.0-incubating | -| `authorization.ranger.username` | The Apache Ranger admin web login username (auth type=simple), or kerberos principal(auth type=kerberos), Need have Ranger administrator permission. | (none) | No | 0.8.0-incubating | -| `authorization.ranger.password` | The Apache Ranger admin web login user password (auth type=simple), or path of the keytab file(auth type=kerberos) | (none) | No | 0.8.0-incubating | -| `authorization.ranger.service.name` | The Apache Ranger service name. | (none) | No | 0.8.0-incubating | - -Once you have used the correct configuration, you can perform authorization operations by calling Gravitino [authorization RESTful API](https://gravitino.apache.org/docs/latest/api/rest/grant-roles-to-a-user). - -#### Example of using the Authorization Ranger Iceberg Plugin - -Suppose you have an Apache Hive service in your datacenter and have created a `icebergRepo` in Apache Ranger to manage its permissions. -The Ranger service is accessible at `172.0.0.100:6080`, with the username `Jack` and the password `PWD123`. -To add this Hive service to Gravitino using the Hive catalog, you'll need to configure the following parameters. - -```properties -authorization-provider=ranger -authorization.ranger.admin.url=172.0.0.100:6080 -authorization.ranger.auth.type=simple -authorization.ranger.username=Jack -authorization.ranger.password=PWD123 -authorization.ranger.service.name=icebergRepo -``` - :::caution -Gravitino 0.8.0 only supports the authorization Apache Ranger Hive service and Apache Iceberg service. More data source authorization is under development. +Gravitino 0.8.0 only supports the authorization Apache Ranger Hive service , Apache Iceberg service and Apache Paimon Service. +Spark can use Kyuubi authorization plugin to access Gravitino's catalog. But the plugin can't support to update or delete data for Paimon catalog. +More data source authorization is under development. ::: \ No newline at end of file diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 3241a48375e..6629c8897a3 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -58,6 +58,7 @@ spark35 = "3.5.1" kyuubi4spark33 = "1.7.4" kyuubi4spark34 = "1.8.2" kyuubi4spark35 = "1.9.0" +kyuubi4paimon = "1.10.0" trino = '435' scala-collection-compat = "2.7.0" scala-java-compat = "1.0.2"