diff --git a/.github/workflows/backend-integration-test-action.yml b/.github/workflows/backend-integration-test-action.yml index 69cfc3164cd..b15c5d226ca 100644 --- a/.github/workflows/backend-integration-test-action.yml +++ b/.github/workflows/backend-integration-test-action.yml @@ -60,7 +60,8 @@ jobs: -x :web:web:test -x :web:integration-test:test -x :clients:client-python:test -x :flink-connector:flink:test -x :spark-connector:spark-common:test -x :spark-connector:spark-3.3:test -x :spark-connector:spark-3.4:test -x :spark-connector:spark-3.5:test -x :spark-connector:spark-runtime-3.3:test -x :spark-connector:spark-runtime-3.4:test -x :spark-connector:spark-runtime-3.5:test - -x :authorizations:authorization-ranger:test -x :trino-connector:integration-test:test -x :trino-connector:trino-connector:test + -x :trino-connector:integration-test:test -x :trino-connector:trino-connector:test + -x :authorizations:authorization-chain:test -x :authorizations:authorization-ranger:test - name: Upload integrate tests reports uses: actions/upload-artifact@v3 diff --git a/api/src/main/java/org/apache/gravitino/authorization/MetadataObjectChange.java b/api/src/main/java/org/apache/gravitino/authorization/MetadataObjectChange.java index a7281d97d5a..db14cd4b0d2 100644 --- a/api/src/main/java/org/apache/gravitino/authorization/MetadataObjectChange.java +++ b/api/src/main/java/org/apache/gravitino/authorization/MetadataObjectChange.java @@ -19,6 +19,7 @@ package org.apache.gravitino.authorization; import com.google.common.base.Preconditions; +import java.util.List; import java.util.Objects; import org.apache.gravitino.MetadataObject; import org.apache.gravitino.annotation.Evolving; @@ -44,10 +45,11 @@ static MetadataObjectChange rename( * Remove a metadata entity MetadataObjectChange. * * @param metadataObject The metadata object. + * @param locations The locations of the metadata object. * @return return a MetadataObjectChange for the remove metadata object. */ - static MetadataObjectChange remove(MetadataObject metadataObject) { - return new RemoveMetadataObject(metadataObject); + static MetadataObjectChange remove(MetadataObject metadataObject, List locations) { + return new RemoveMetadataObject(metadataObject, locations); } /** A RenameMetadataObject is to rename securable object's metadata entity. */ @@ -127,9 +129,11 @@ public String toString() { /** A RemoveMetadataObject is to remove securable object's metadata entity. */ final class RemoveMetadataObject implements MetadataObjectChange { private final MetadataObject metadataObject; + private final List locations; - private RemoveMetadataObject(MetadataObject metadataObject) { + private RemoveMetadataObject(MetadataObject metadataObject, List locations) { this.metadataObject = metadataObject; + this.locations = locations; } /** @@ -141,6 +145,15 @@ public MetadataObject metadataObject() { return metadataObject; } + /** + * Returns the location path of the metadata object. + * + * @return return a location path. + */ + public List getLocations() { + return locations; + } + /** * Compares this RemoveMetadataObject instance with another object for equality. The comparison * is based on the old metadata entity. diff --git a/authorizations/authorization-chain/src/test/java/org/apache/gravitino/authorization/chain/integration/test/TestChainedAuthorizationIT.java b/authorizations/authorization-chain/src/test/java/org/apache/gravitino/authorization/chain/integration/test/TestChainedAuthorizationIT.java index 74ad99aa7f9..d6cef92d537 100644 --- a/authorizations/authorization-chain/src/test/java/org/apache/gravitino/authorization/chain/integration/test/TestChainedAuthorizationIT.java +++ b/authorizations/authorization-chain/src/test/java/org/apache/gravitino/authorization/chain/integration/test/TestChainedAuthorizationIT.java @@ -27,11 +27,15 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import org.apache.gravitino.Catalog; import org.apache.gravitino.Configs; +import org.apache.gravitino.MetadataObject; +import org.apache.gravitino.MetadataObjects; import org.apache.gravitino.auth.AuthConstants; import org.apache.gravitino.auth.AuthenticatorType; +import org.apache.gravitino.authorization.Owner; import org.apache.gravitino.authorization.Privileges; import org.apache.gravitino.authorization.SecurableObject; import org.apache.gravitino.authorization.SecurableObjects; @@ -49,11 +53,15 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.kyuubi.plugin.spark.authz.AccessControlException; +import org.apache.ranger.RangerServiceException; +import org.apache.ranger.plugin.model.RangerPolicy; import org.apache.spark.sql.SparkSession; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.platform.commons.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -150,12 +158,80 @@ public void stop() throws IOException { RangerITEnv.cleanup(); } + @AfterEach + void clean() { + try { + List rangerHivePolicies = + RangerITEnv.rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME); + List rangerHdfsPolicies = + RangerITEnv.rangerClient.getPoliciesInService(RangerITEnv.RANGER_HDFS_REPO_NAME); + rangerHivePolicies.stream().forEach(policy -> LOG.info("Ranger Hive policy: {}", policy)); + rangerHdfsPolicies.stream().forEach(policy -> LOG.info("Ranger HDFS policy: {}", policy)); + Preconditions.condition( + rangerHivePolicies.size() == 0, "Ranger Hive policies should be empty"); + Preconditions.condition( + rangerHdfsPolicies.size() == 0, "Ranger HDFS policies should be empty"); + } catch (RangerServiceException e) { + throw new RuntimeException(e); + } + } + + @Override + protected String testUserName() { + return AuthConstants.ANONYMOUS_USER; + } + + @Override + protected void createCatalog() { + Map catalogConf = new HashMap<>(); + catalogConf.put(HiveConstants.METASTORE_URIS, HIVE_METASTORE_URIS); + catalogConf.put(IMPERSONATION_ENABLE, "true"); + catalogConf.put(Catalog.AUTHORIZATION_PROVIDER, "chain"); + catalogConf.put(ChainedAuthorizationProperties.CHAIN_PLUGINS_PROPERTIES_KEY, "hive1,hdfs1"); + catalogConf.put("authorization.chain.hive1.provider", "ranger"); + catalogConf.put("authorization.chain.hive1.ranger.auth.type", RangerContainer.authType); + catalogConf.put("authorization.chain.hive1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL); + catalogConf.put("authorization.chain.hive1.ranger.username", RangerContainer.rangerUserName); + catalogConf.put("authorization.chain.hive1.ranger.password", RangerContainer.rangerPassword); + catalogConf.put("authorization.chain.hive1.ranger.service.type", "HadoopSQL"); + catalogConf.put( + "authorization.chain.hive1.ranger.service.name", RangerITEnv.RANGER_HIVE_REPO_NAME); + catalogConf.put("authorization.chain.hdfs1.provider", "ranger"); + catalogConf.put("authorization.chain.hdfs1.ranger.auth.type", RangerContainer.authType); + catalogConf.put("authorization.chain.hdfs1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL); + catalogConf.put("authorization.chain.hdfs1.ranger.username", RangerContainer.rangerUserName); + catalogConf.put("authorization.chain.hdfs1.ranger.password", RangerContainer.rangerPassword); + catalogConf.put("authorization.chain.hdfs1.ranger.service.type", "HDFS"); + catalogConf.put( + "authorization.chain.hdfs1.ranger.service.name", RangerITEnv.RANGER_HDFS_REPO_NAME); + + metalake.createCatalog(catalogName, Catalog.Type.RELATIONAL, "hive", "comment", catalogConf); + catalog = metalake.loadCatalog(catalogName); + LOG.info("Catalog created: {}", catalog); + } + private String storageLocation(String dirName) { return DEFAULT_FS + "/" + dirName; } @Test public void testCreateSchemaInCatalog() throws IOException { + SecurableObject securableObject = + SecurableObjects.ofCatalog( + catalogName, Lists.newArrayList(Privileges.CreateSchema.allow())); + doTestCreateSchema(currentFunName(), securableObject); + } + + @Test + public void testCreateSchemaInMetalake() throws IOException { + SecurableObject securableObject = + SecurableObjects.ofMetalake( + metalakeName, Lists.newArrayList(Privileges.CreateSchema.allow())); + doTestCreateSchema(currentFunName(), securableObject); + } + + private void doTestCreateSchema(String roleName, SecurableObject securableObject) + throws IOException { // Choose a catalog useCatalog(); @@ -168,22 +244,17 @@ public void testCreateSchemaInCatalog() throws IOException { .contains( String.format( "Permission denied: user [%s] does not have [create] privilege", - AuthConstants.ANONYMOUS_USER)) + testUserName())) || accessControlException .getMessage() .contains( - String.format( - "Permission denied: user=%s, access=WRITE", AuthConstants.ANONYMOUS_USER))); + String.format("Permission denied: user=%s, access=WRITE", testUserName()))); Path schemaPath = new Path(storageLocation(schemaName + ".db")); Assertions.assertFalse(fileSystem.exists(schemaPath)); FileStatus fileStatus = fileSystem.getFileStatus(new Path(DEFAULT_FS)); Assertions.assertEquals(System.getenv(HADOOP_USER_NAME), fileStatus.getOwner()); // Second, grant the `CREATE_SCHEMA` role - String roleName = currentFunName(); - SecurableObject securableObject = - SecurableObjects.ofCatalog( - catalogName, Lists.newArrayList(Privileges.CreateSchema.allow())); metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); metalake.grantRolesToUser(Lists.newArrayList(roleName), AuthConstants.ANONYMOUS_USER); waitForUpdatingPolicies(); @@ -198,7 +269,15 @@ public void testCreateSchemaInCatalog() throws IOException { Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_CREATE_TABLE)); // Clean up + // Set owner + MetadataObject schemaObject = + MetadataObjects.of(catalogName, schemaName, MetadataObject.Type.SCHEMA); + metalake.setOwner(schemaObject, testUserName(), Owner.Type.USER); + waitForUpdatingPolicies(); + sparkSession.sql(SQL_DROP_SCHEMA); catalog.asSchemas().dropSchema(schemaName, false); + Assertions.assertFalse(fileSystem.exists(schemaPath)); + metalake.deleteRole(roleName); waitForUpdatingPolicies(); @@ -218,33 +297,14 @@ public void testCreateSchemaInCatalog() throws IOException { "Permission denied: user=%s, access=WRITE", AuthConstants.ANONYMOUS_USER))); } - @Override - public void createCatalog() { - Map catalogConf = new HashMap<>(); - catalogConf.put(HiveConstants.METASTORE_URIS, HIVE_METASTORE_URIS); - catalogConf.put(IMPERSONATION_ENABLE, "true"); - catalogConf.put(Catalog.AUTHORIZATION_PROVIDER, "chain"); - catalogConf.put(ChainedAuthorizationProperties.CHAIN_PLUGINS_PROPERTIES_KEY, "hive1,hdfs1"); - catalogConf.put("authorization.chain.hive1.provider", "ranger"); - catalogConf.put("authorization.chain.hive1.ranger.auth.type", RangerContainer.authType); - catalogConf.put("authorization.chain.hive1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL); - catalogConf.put("authorization.chain.hive1.ranger.username", RangerContainer.rangerUserName); - catalogConf.put("authorization.chain.hive1.ranger.password", RangerContainer.rangerPassword); - catalogConf.put("authorization.chain.hive1.ranger.service.type", "HadoopSQL"); - catalogConf.put( - "authorization.chain.hive1.ranger.service.name", RangerITEnv.RANGER_HIVE_REPO_NAME); - catalogConf.put("authorization.chain.hdfs1.provider", "ranger"); - catalogConf.put("authorization.chain.hdfs1.ranger.auth.type", RangerContainer.authType); - catalogConf.put("authorization.chain.hdfs1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL); - catalogConf.put("authorization.chain.hdfs1.ranger.username", RangerContainer.rangerUserName); - catalogConf.put("authorization.chain.hdfs1.ranger.password", RangerContainer.rangerPassword); - catalogConf.put("authorization.chain.hdfs1.ranger.service.type", "HDFS"); - catalogConf.put( - "authorization.chain.hdfs1.ranger.service.name", RangerITEnv.RANGER_HDFS_REPO_NAME); + @Test + protected void testAllowUseSchemaPrivilege() throws InterruptedException { + // TODO + } - metalake.createCatalog(catalogName, Catalog.Type.RELATIONAL, "hive", "comment", catalogConf); - catalog = metalake.loadCatalog(catalogName); - LOG.info("Catalog created: {}", catalog); + @Test + public void testRenameMetalakeOrCatalog() { + // TODO } @Test @@ -307,11 +367,6 @@ void testChangeOwner() throws InterruptedException { // TODO } - @Test - void testAllowUseSchemaPrivilege() throws InterruptedException { - // TODO - } - @Test void testDenyPrivileges() throws InterruptedException { // TODO diff --git a/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/common/PathBasedMetadataObject.java b/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/common/PathBasedMetadataObject.java index ed67b1cc0fc..48071a6f5b4 100644 --- a/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/common/PathBasedMetadataObject.java +++ b/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/common/PathBasedMetadataObject.java @@ -19,9 +19,7 @@ package org.apache.gravitino.authorization.common; import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; import java.util.List; -import javax.annotation.Nullable; import org.apache.gravitino.MetadataObject; import org.apache.gravitino.authorization.AuthorizationMetadataObject; @@ -44,29 +42,37 @@ public MetadataObject.Type metadataObjectType() { } } + private final String name; + private final String parent; private final String path; private final AuthorizationMetadataObject.Type type; - public PathBasedMetadataObject(String path, AuthorizationMetadataObject.Type type) { + public PathBasedMetadataObject( + String parent, String name, String path, AuthorizationMetadataObject.Type type) { + this.parent = parent; + this.name = name; this.path = path; this.type = type; } - @Nullable @Override - public String parent() { - return null; + public String name() { + return name; } @Override - public String name() { - return this.path; + public List names() { + return DOT_SPLITTER.splitToList(fullName()); } @Override - public List names() { - return ImmutableList.of(this.path); + public String parent() { + return parent; + } + + public String path() { + return path; } @Override @@ -81,11 +87,7 @@ public void validateAuthorizationMetadataObject() throws IllegalArgumentExceptio names != null && !names.isEmpty(), "Cannot create a path based metadata object with no names"); Preconditions.checkArgument( - names.size() == 1, - "Cannot create a path based metadata object with the name length which is 1"); - Preconditions.checkArgument( - type != null, "Cannot create a path based metadata object with no type"); - + path != null && !path.isEmpty(), "Cannot create a path based metadata object with no path"); Preconditions.checkArgument( type == PathBasedMetadataObject.Type.PATH, "it must be the PATH type"); diff --git a/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/common/PathBasedSecurableObject.java b/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/common/PathBasedSecurableObject.java index 6712cdf0e3d..aa2262fb169 100644 --- a/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/common/PathBasedSecurableObject.java +++ b/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/common/PathBasedSecurableObject.java @@ -31,8 +31,12 @@ public class PathBasedSecurableObject extends PathBasedMetadataObject private final List privileges; public PathBasedSecurableObject( - String path, AuthorizationMetadataObject.Type type, Set privileges) { - super(path, type); + String parent, + String name, + String path, + AuthorizationMetadataObject.Type type, + Set privileges) { + super(parent, name, path, type); this.privileges = ImmutableList.copyOf(privileges); } diff --git a/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/jdbc/JdbcSecurableObjectMappingProvider.java b/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/jdbc/JdbcSecurableObjectMappingProvider.java index 70b2d10e39c..fc3fa0aff77 100644 --- a/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/jdbc/JdbcSecurableObjectMappingProvider.java +++ b/authorizations/authorization-common/src/main/java/org/apache/gravitino/authorization/jdbc/JdbcSecurableObjectMappingProvider.java @@ -193,7 +193,7 @@ public List translateOwner(MetadataObject metadata } @Override - public AuthorizationMetadataObject translateMetadataObject(MetadataObject metadataObject) { + public List translateMetadataObject(MetadataObject metadataObject) { throw new UnsupportedOperationException("Not supported"); } diff --git a/authorizations/authorization-ranger/build.gradle.kts b/authorizations/authorization-ranger/build.gradle.kts index 8cc82250c23..b0094178fae 100644 --- a/authorizations/authorization-ranger/build.gradle.kts +++ b/authorizations/authorization-ranger/build.gradle.kts @@ -82,6 +82,7 @@ dependencies { testImplementation(project(":integration-test-common", "testArtifacts")) testImplementation(libs.junit.jupiter.api) testImplementation(libs.mockito.core) + testImplementation(libs.mockito.inline) testImplementation(libs.testcontainers) testImplementation(libs.mysql.driver) testImplementation(libs.postgresql.driver) diff --git a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java index bc3d309e1d1..4db58e91e66 100644 --- a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java +++ b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java @@ -18,20 +18,26 @@ */ package org.apache.gravitino.authorization.ranger; -import com.google.common.annotations.VisibleForTesting; +import static org.apache.gravitino.authorization.common.PathBasedMetadataObject.Type.PATH; +import static org.apache.gravitino.authorization.ranger.RangerHadoopSQLMetadataObject.Type.COLUMN; +import static org.apache.gravitino.authorization.ranger.RangerHadoopSQLMetadataObject.Type.SCHEMA; +import static org.apache.gravitino.authorization.ranger.RangerHadoopSQLMetadataObject.Type.TABLE; + import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.regex.Pattern; -import org.apache.gravitino.Catalog; +import java.util.stream.Collectors; +import org.apache.gravitino.Entity; import org.apache.gravitino.GravitinoEnv; import org.apache.gravitino.MetadataObject; import org.apache.gravitino.NameIdentifier; @@ -40,21 +46,24 @@ import org.apache.gravitino.authorization.AuthorizationMetadataObject; import org.apache.gravitino.authorization.AuthorizationPrivilege; import org.apache.gravitino.authorization.AuthorizationSecurableObject; +import org.apache.gravitino.authorization.AuthorizationUtils; +import org.apache.gravitino.authorization.MetadataObjectChange; import org.apache.gravitino.authorization.Privilege; +import org.apache.gravitino.authorization.Privileges; import org.apache.gravitino.authorization.SecurableObject; -import org.apache.gravitino.authorization.SecurableObjects; import org.apache.gravitino.authorization.common.PathBasedMetadataObject; import org.apache.gravitino.authorization.common.PathBasedSecurableObject; import org.apache.gravitino.authorization.ranger.reference.RangerDefines; -import org.apache.gravitino.catalog.FilesetDispatcher; -import org.apache.gravitino.catalog.hive.HiveConstants; import org.apache.gravitino.exceptions.AuthorizationPluginException; -import org.apache.gravitino.exceptions.NoSuchEntityException; -import org.apache.gravitino.file.Fileset; +import org.apache.gravitino.utils.MetadataObjectUtil; +import org.apache.ranger.RangerServiceException; import org.apache.ranger.plugin.model.RangerPolicy; +import org.apache.ranger.plugin.util.SearchFilter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class RangerAuthorizationHDFSPlugin extends RangerAuthorizationPlugin { - private static final Pattern pattern = Pattern.compile("^hdfs://[^/]*"); + private static final Logger LOG = LoggerFactory.getLogger(RangerAuthorizationHDFSPlugin.class); public RangerAuthorizationHDFSPlugin(String metalake, Map config) { super(metalake, config); @@ -118,13 +127,349 @@ public List policyResourceDefinesRule() { return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName()); } + /** + * Find the managed policy for the ranger securable object. + * + * @param authzMetadataObject The ranger securable object to find the managed policy. + * @return The managed policy for the metadata object. + */ + public RangerPolicy findManagedPolicy(AuthorizationMetadataObject authzMetadataObject) + throws AuthorizationPluginException { + List policies = wildcardSearchPolies(authzMetadataObject); + if (!policies.isEmpty()) { + /** + * Because Ranger doesn't support the precise search, Ranger will return the policy meets the + * wildcard(*,?) conditions, If you use `/a/b` condition to search policy, the Ranger will + * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely filter this research + * results. + */ + List nsMetadataObj = authzMetadataObject.names(); + PathBasedMetadataObject pathAuthzMetadataObject = + (PathBasedMetadataObject) authzMetadataObject; + Map preciseFilters = new HashMap<>(); + for (int i = 0; i < nsMetadataObj.size() && i < policyResourceDefinesRule().size(); i++) { + preciseFilters.put(policyResourceDefinesRule().get(i), pathAuthzMetadataObject.path()); + } + policies = + policies.stream() + .filter( + policy -> + policy.getResources().entrySet().stream() + .allMatch( + entry -> + preciseFilters.containsKey(entry.getKey()) + && entry.getValue().getValues().size() == 1 + && entry + .getValue() + .getValues() + .contains(preciseFilters.get(entry.getKey())))) + .collect(Collectors.toList()); + } + // Only return the policies that are managed by Gravitino. + if (policies.size() > 1) { + throw new AuthorizationPluginException("Each metadata object can have at most one policy."); + } + + if (policies.isEmpty()) { + return null; + } + + RangerPolicy policy = policies.get(0); + // Delegating Gravitino management policies cannot contain duplicate privilege + policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess); + policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess); + policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess); + policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess); + + return policy; + } + + @Override + /** Wildcard search the Ranger policies in the different Ranger service. */ + protected List wildcardSearchPolies( + AuthorizationMetadataObject authzMetadataObject) { + Preconditions.checkArgument(authzMetadataObject instanceof PathBasedMetadataObject); + PathBasedMetadataObject pathBasedMetadataObject = (PathBasedMetadataObject) authzMetadataObject; + List resourceDefines = policyResourceDefinesRule(); + Map searchFilters = new HashMap<>(); + searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName); + resourceDefines.stream() + .forEach( + resourceDefine -> { + searchFilters.put( + SearchFilter.RESOURCE_PREFIX + resourceDefine, pathBasedMetadataObject.path()); + }); + try { + List policies = rangerClient.findPolicies(searchFilters); + return policies; + } catch (RangerServiceException e) { + throw new AuthorizationPluginException(e, "Failed to find the policies in the Ranger"); + } + } + + /** + * IF rename the SCHEMA, Need to rename these the relevant policies, `{schema}`, `{schema}.*`, + * `{schema}.*.*`
+ * IF rename the TABLE, Need to rename these the relevant policies, `{schema}.*`, `{schema}.*.*` + *
+ */ + @Override + protected void doRenameMetadataObject( + AuthorizationMetadataObject authzMetadataObject, + AuthorizationMetadataObject newAuthzMetadataObject) { + List> loop; + if (newAuthzMetadataObject.type().equals(SCHEMA)) { + loop = + ImmutableList.of( + ImmutableMap.of( + authzMetadataObject.names().get(0), newAuthzMetadataObject.names().get(0)), + ImmutableMap.of(RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL), + ImmutableMap.of(RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL)); + } else if (newAuthzMetadataObject.type().equals(TABLE)) { + loop = + ImmutableList.of( + ImmutableMap.of( + authzMetadataObject.names().get(0), newAuthzMetadataObject.names().get(0)), + ImmutableMap.of( + authzMetadataObject.names().get(1), newAuthzMetadataObject.names().get(1)), + ImmutableMap.of(RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL)); + } else if (newAuthzMetadataObject.type().equals(COLUMN)) { + loop = + ImmutableList.of( + ImmutableMap.of( + authzMetadataObject.names().get(0), newAuthzMetadataObject.names().get(0)), + ImmutableMap.of( + authzMetadataObject.names().get(1), newAuthzMetadataObject.names().get(1)), + ImmutableMap.of( + authzMetadataObject.names().get(2), newAuthzMetadataObject.names().get(2))); + } else if (newAuthzMetadataObject.type().equals(PATH)) { + // do nothing when fileset is renamed + return; + } else { + throw new IllegalArgumentException( + "Unsupported metadata object type: " + authzMetadataObject.type()); + } + + List oldMetadataNames = new ArrayList<>(); + List newMetadataNames = new ArrayList<>(); + for (int index = 0; index < loop.size(); index++) { + oldMetadataNames.add(loop.get(index).keySet().stream().findFirst().get()); + newMetadataNames.add(loop.get(index).values().stream().findFirst().get()); + + AuthorizationMetadataObject.Type type = + (index == 0 + ? RangerHadoopSQLMetadataObject.Type.SCHEMA + : (index == 1 + ? RangerHadoopSQLMetadataObject.Type.TABLE + : RangerHadoopSQLMetadataObject.Type.COLUMN)); + AuthorizationMetadataObject oldHadoopSQLMetadataObject = + new RangerHadoopSQLMetadataObject( + AuthorizationMetadataObject.getParentFullName(oldMetadataNames), + AuthorizationMetadataObject.getLastName(oldMetadataNames), + type); + AuthorizationMetadataObject newHadoopSQLMetadataObject = + new RangerHadoopSQLMetadataObject( + AuthorizationMetadataObject.getParentFullName(newMetadataNames), + AuthorizationMetadataObject.getLastName(newMetadataNames), + type); + updatePolicyByMetadataObject( + MetadataObject.Type.SCHEMA, oldHadoopSQLMetadataObject, newHadoopSQLMetadataObject); + } + } + + @Override + protected void updatePolicyByMetadataObject( + MetadataObject.Type operationType, + AuthorizationMetadataObject oldAuthzMetaobject, + AuthorizationMetadataObject newAuthzMetaobject) { + List oldPolicies = wildcardSearchPolies(oldAuthzMetaobject); + List existNewPolicies = wildcardSearchPolies(newAuthzMetaobject); + if (oldPolicies.isEmpty()) { + LOG.warn("Cannot find the Ranger policy for the metadata object({})!", oldAuthzMetaobject); + } + if (!existNewPolicies.isEmpty()) { + LOG.warn("The Ranger policy for the metadata object({}) already exists!", newAuthzMetaobject); + } + Map operationTypeIndex = + ImmutableMap.of( + MetadataObject.Type.SCHEMA, 0, + MetadataObject.Type.TABLE, 1, + MetadataObject.Type.COLUMN, 2); + oldPolicies.stream() + .forEach( + policy -> { + try { + String policyName = policy.getName(); + int index = operationTypeIndex.get(operationType); + + // Update the policy name is following Gravitino's spec + if (policy + .getName() + .equals( + AuthorizationSecurableObject.DOT_JOINER.join(oldAuthzMetaobject.names()))) { + List policyNames = + Lists.newArrayList( + AuthorizationSecurableObject.DOT_SPLITTER.splitToList(policyName)); + Preconditions.checkArgument( + policyNames.size() >= oldAuthzMetaobject.names().size(), + String.format("The policy name(%s) is invalid!", policyName)); + if (policyNames.get(index).equals(RangerHelper.RESOURCE_ALL)) { + // Doesn't need to rename the policy `*` + return; + } + policyNames.set(index, newAuthzMetaobject.names().get(index)); + policy.setName(AuthorizationSecurableObject.DOT_JOINER.join(policyNames)); + } + // Update the policy resource name to new name + policy + .getResources() + .put( + rangerHelper.policyResourceDefines.get(index), + new RangerPolicy.RangerPolicyResource( + newAuthzMetaobject.names().get(index))); + + boolean alreadyExist = + existNewPolicies.stream() + .anyMatch( + existNewPolicy -> + existNewPolicy.getName().equals(policy.getName()) + || existNewPolicy.getResources().equals(policy.getResources())); + if (alreadyExist) { + LOG.warn( + "The Ranger policy for the metadata object({}) already exists!", + newAuthzMetaobject); + return; + } + + // Update the policy + rangerClient.updatePolicy(policy.getId(), policy); + } catch (RangerServiceException e) { + LOG.error("Failed to rename the policy {}!", policy); + throw new RuntimeException(e); + } + }); + } + + /** + * IF remove the SCHEMA, need to remove these the relevant policies, `{schema}`, `{schema}.*`, + * `{schema}.*.*`
+ * IF remove the TABLE, need to remove these the relevant policies, `{schema}.*`, `{schema}.*.*` + *
+ * IF remove the COLUMN, Only need to remove `{schema}.*.*`
+ */ + @Override + protected void doRemoveMetadataObject(AuthorizationMetadataObject authzMetadataObject) { + if (authzMetadataObject.type().equals(SCHEMA)) { + doRemoveSchemaMetadataObject(authzMetadataObject); + } else if (authzMetadataObject.type().equals(TABLE)) { + doRemoveTableMetadataObject(authzMetadataObject); + } else if (authzMetadataObject.type().equals(PATH)) { + removePolicyByMetadataObject(authzMetadataObject); + } else { + throw new IllegalArgumentException( + "Unsupported authorization metadata object type: " + authzMetadataObject.type()); + } + } + + /** + * Remove the SCHEMA, Need to remove these the relevant policies, `{schema}`, `{schema}.*`, + * `{schema}.*.*` permissions. + */ + private void doRemoveSchemaMetadataObject(AuthorizationMetadataObject authzMetadataObject) { + Preconditions.checkArgument( + authzMetadataObject instanceof PathBasedMetadataObject, + "The metadata object must be a PathBasedMetadataObject"); + Preconditions.checkArgument( + authzMetadataObject.type() == SCHEMA, "The metadata object type must be SCHEMA"); + Preconditions.checkArgument( + authzMetadataObject.names().size() == 1, "The metadata object names must be 1"); + if (RangerHelper.RESOURCE_ALL.equals(authzMetadataObject.name())) { + // Remove all schema in this catalog + NameIdentifier[] catalogs = + GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake)); + Arrays.asList(catalogs).stream() + .forEach( + catalog -> { + List catalogLocations = + AuthorizationUtils.getMetadataObjectLocation( + NameIdentifier.of(catalog.name()), Entity.EntityType.CATALOG); + catalogLocations.stream() + .forEach( + locationPath -> { + AuthorizationMetadataObject catalogMetadataObject = + new PathBasedMetadataObject( + metalake, catalog.name(), locationPath, PATH); + doRemoveSchemaMetadataObject(catalogMetadataObject); + }); + }); + } else { + // Remove all table in this schema + NameIdentifier[] tables = + GravitinoEnv.getInstance() + .tableDispatcher() + .listTables(Namespace.of(authzMetadataObject.name())); + Arrays.asList(tables).stream() + .forEach( + table -> { + NameIdentifier identifier = + NameIdentifier.of(authzMetadataObject.name(), table.name()); + List tabLocations = + AuthorizationUtils.getMetadataObjectLocation( + identifier, Entity.EntityType.TABLE); + tabLocations.stream() + .forEach( + locationPath -> { + AuthorizationMetadataObject tableMetadataObject = + new PathBasedMetadataObject( + authzMetadataObject.name(), table.name(), locationPath, PATH); + doRemoveTableMetadataObject(tableMetadataObject); + }); + // Remove schema + Schema schema = + GravitinoEnv.getInstance() + .schemaDispatcher() + .loadSchema(NameIdentifier.of(authzMetadataObject.name())); + List schemaLocations = + AuthorizationUtils.getMetadataObjectLocation( + identifier, Entity.EntityType.SCHEMA); + schemaLocations.stream() + .forEach( + locationPath -> { + AuthorizationMetadataObject schemaMetadataObject = + new PathBasedMetadataObject( + authzMetadataObject.name(), schema.name(), locationPath, PATH); + removePolicyByMetadataObject(schemaMetadataObject); + }); + }); + } + } + + /** + * Remove the TABLE, Need to remove these the relevant policies, `*.{table}`, `*.{table}.{column}` + * permissions. + */ + private void doRemoveTableMetadataObject(AuthorizationMetadataObject authzMetadataObject) { + Preconditions.checkArgument( + authzMetadataObject instanceof PathBasedMetadataObject, + "The metadata object must be a PathBasedMetadataObject"); + Preconditions.checkArgument( + authzMetadataObject.names().size() == 3, "The metadata object names must be 3"); + Preconditions.checkArgument( + authzMetadataObject.type() == PATH, "The metadata object type must be PATH"); + removePolicyByMetadataObject(authzMetadataObject); + } + @Override protected RangerPolicy createPolicyAddResources(AuthorizationMetadataObject metadataObject) { + Preconditions.checkArgument( + metadataObject instanceof PathBasedMetadataObject, + "The metadata object must be a PathBasedMetadataObject"); RangerPolicy policy = new RangerPolicy(); policy.setService(rangerServiceName); policy.setName(metadataObject.fullName()); RangerPolicy.RangerPolicyResource policyResource = - new RangerPolicy.RangerPolicyResource(metadataObject.names().get(0), false, true); + new RangerPolicy.RangerPolicyResource( + ((PathBasedMetadataObject) metadataObject).path(), false, true); policy.getResources().put(RangerDefines.PolicyResource.PATH.getName(), policyResource); return policy; } @@ -132,13 +477,22 @@ protected RangerPolicy createPolicyAddResources(AuthorizationMetadataObject meta @Override public AuthorizationSecurableObject generateAuthorizationSecurableObject( List names, + String path, AuthorizationMetadataObject.Type type, Set privileges) { AuthorizationMetadataObject authMetadataObject = - new PathBasedMetadataObject(AuthorizationMetadataObject.getLastName(names), type); + new PathBasedMetadataObject( + AuthorizationMetadataObject.getParentFullName(names), + AuthorizationMetadataObject.getLastName(names), + path, + type); authMetadataObject.validateAuthorizationMetadataObject(); return new PathBasedSecurableObject( - authMetadataObject.name(), authMetadataObject.type(), privileges); + authMetadataObject.parent(), + authMetadataObject.name(), + path, + authMetadataObject.type(), + privileges); } @Override @@ -159,7 +513,10 @@ public Set allowMetadataObjectTypesRule() { @Override public List translatePrivilege(SecurableObject securableObject) { List rangerSecurableObjects = new ArrayList<>(); - + NameIdentifier identifier = + securableObject.type().equals(MetadataObject.Type.METALAKE) + ? NameIdentifier.of(securableObject.fullName()) + : NameIdentifier.parse(String.join(".", metalake, securableObject.fullName())); securableObject.privileges().stream() .filter(Objects::nonNull) .forEach( @@ -183,36 +540,66 @@ public List translatePrivilege(SecurableObject sec // in the RangerAuthorizationHDFSPlugin. break; case USE_SCHEMA: + switch (securableObject.type()) { + case METALAKE: + case CATALOG: + case SCHEMA: + AuthorizationUtils.getMetadataObjectLocation( + identifier, MetadataObjectUtil.toEntityType(securableObject)) + .stream() + .forEach( + locationPath -> { + PathBasedMetadataObject pathBaseMetadataObject = + new PathBasedMetadataObject( + securableObject.parent(), + securableObject.name(), + locationPath, + PathBasedMetadataObject.Type.PATH); + pathBaseMetadataObject.validateAuthorizationMetadataObject(); + rangerSecurableObjects.add( + generateAuthorizationSecurableObject( + pathBaseMetadataObject.names(), + locationPath, + PathBasedMetadataObject.Type.PATH, + rangerPrivileges)); + }); + break; + default: + checkOmissionTranslate( + Privileges.UseSchema.allow(), + securableObject.type(), + gravitinoPrivilege.name()); + } break; case CREATE_SCHEMA: switch (securableObject.type()) { case METALAKE: case CATALOG: - { - String locationPath = getLocationPath(securableObject); - if (locationPath != null && !locationPath.isEmpty()) { - PathBasedMetadataObject rangerPathBaseMetadataObject = - new PathBasedMetadataObject( - locationPath, PathBasedMetadataObject.Type.PATH); - rangerSecurableObjects.add( - generateAuthorizationSecurableObject( - rangerPathBaseMetadataObject.names(), - PathBasedMetadataObject.Type.PATH, - rangerPrivileges)); - } - } - break; - case FILESET: - rangerSecurableObjects.add( - generateAuthorizationSecurableObject( - translateMetadataObject(securableObject).names(), - PathBasedMetadataObject.Type.PATH, - rangerPrivileges)); + AuthorizationUtils.getMetadataObjectLocation( + identifier, MetadataObjectUtil.toEntityType(securableObject)) + .stream() + .forEach( + locationPath -> { + PathBasedMetadataObject pathBaseMetadataObject = + new PathBasedMetadataObject( + securableObject.parent(), + securableObject.name(), + locationPath, + PathBasedMetadataObject.Type.PATH); + pathBaseMetadataObject.validateAuthorizationMetadataObject(); + rangerSecurableObjects.add( + generateAuthorizationSecurableObject( + pathBaseMetadataObject.names(), + locationPath, + PathBasedMetadataObject.Type.PATH, + rangerPrivileges)); + }); break; default: - throw new AuthorizationPluginException( - "The privilege %s is not supported for the securable object: %s", - gravitinoPrivilege.name(), securableObject.type()); + checkOmissionTranslate( + Privileges.CreateSchema.allow(), + securableObject.type(), + gravitinoPrivilege.name()); } break; case SELECT_TABLE: @@ -231,11 +618,21 @@ public List translatePrivilege(SecurableObject sec case SCHEMA: break; case FILESET: - rangerSecurableObjects.add( - generateAuthorizationSecurableObject( - translateMetadataObject(securableObject).names(), - PathBasedMetadataObject.Type.PATH, - rangerPrivileges)); + translateMetadataObject(securableObject).stream() + .forEach( + metadataObject -> { + Preconditions.checkArgument( + metadataObject instanceof PathBasedMetadataObject, + "The metadata object must be a PathBasedMetadataObject"); + PathBasedMetadataObject pathBasedMetadataObject = + (PathBasedMetadataObject) metadataObject; + rangerSecurableObjects.add( + generateAuthorizationSecurableObject( + pathBasedMetadataObject.names(), + pathBasedMetadataObject.path(), + PathBasedMetadataObject.Type.PATH, + rangerPrivileges)); + }); break; default: throw new AuthorizationPluginException( @@ -253,6 +650,17 @@ public List translatePrivilege(SecurableObject sec return rangerSecurableObjects; } + private void checkOmissionTranslate( + Privileges.GenericPrivilege privilege, + MetadataObject.Type gravitinoType, + Privilege.Name gravitinoPrivilege) { + Preconditions.checkArgument( + privilege.canBindTo(gravitinoType), + "The translate %s privilege for %s is omitted!", + gravitinoType, + gravitinoPrivilege); + } + @Override public List translateOwner(MetadataObject gravitinoMetadataObject) { List rangerSecurableObjects = new ArrayList<>(); @@ -262,11 +670,21 @@ public List translateOwner(MetadataObject gravitin case SCHEMA: break; case FILESET: - rangerSecurableObjects.add( - generateAuthorizationSecurableObject( - translateMetadataObject(gravitinoMetadataObject).names(), - PathBasedMetadataObject.Type.PATH, - ownerMappingRule())); + translateMetadataObject(gravitinoMetadataObject).stream() + .forEach( + metadataObject -> { + Preconditions.checkArgument( + metadataObject instanceof PathBasedMetadataObject, + "The metadata object must be a PathBasedMetadataObject"); + PathBasedMetadataObject pathBasedMetadataObject = + (PathBasedMetadataObject) metadataObject; + rangerSecurableObjects.add( + generateAuthorizationSecurableObject( + pathBasedMetadataObject.names(), + pathBasedMetadataObject.path(), + PathBasedMetadataObject.Type.PATH, + ownerMappingRule())); + }); break; default: throw new AuthorizationPluginException( @@ -278,88 +696,91 @@ public List translateOwner(MetadataObject gravitin } @Override - public AuthorizationMetadataObject translateMetadataObject(MetadataObject metadataObject) { - Preconditions.checkArgument( - allowMetadataObjectTypesRule().contains(metadataObject.type()), - String.format( - "The metadata object type %s is not supported in the RangerAuthorizationHDFSPlugin", - metadataObject.type())); - List nsMetadataObject = - Lists.newArrayList(SecurableObjects.DOT_SPLITTER.splitToList(metadataObject.fullName())); - Preconditions.checkArgument( - nsMetadataObject.size() > 0, "The metadata object must have at least one name."); - - PathBasedMetadataObject rangerPathBaseMetadataObject; - switch (metadataObject.type()) { - case METALAKE: - case CATALOG: - rangerPathBaseMetadataObject = - new PathBasedMetadataObject("", PathBasedMetadataObject.Type.PATH); - break; - case SCHEMA: - rangerPathBaseMetadataObject = - new PathBasedMetadataObject( - metadataObject.fullName(), PathBasedMetadataObject.Type.PATH); - break; - case FILESET: - rangerPathBaseMetadataObject = - new PathBasedMetadataObject( - getLocationPath(metadataObject), PathBasedMetadataObject.Type.PATH); - break; - default: - throw new AuthorizationPluginException( - "The metadata object type %s is not supported in the RangerAuthorizationHDFSPlugin", - metadataObject.type()); - } - rangerPathBaseMetadataObject.validateAuthorizationMetadataObject(); - return rangerPathBaseMetadataObject; - } - - private NameIdentifier getObjectNameIdentifier(MetadataObject metadataObject) { - return NameIdentifier.parse(String.format("%s.%s", metalake, metadataObject.fullName())); + public List translateMetadataObject(MetadataObject metadataObject) { + List authzMetadataObjects = new ArrayList<>(); + Entity.EntityType entityType = MetadataObjectUtil.toEntityType(metadataObject); + NameIdentifier identifier = + metadataObject.type().equals(MetadataObject.Type.METALAKE) + ? NameIdentifier.of(metadataObject.fullName()) + : NameIdentifier.parse(String.join(".", metalake, metadataObject.fullName())); + List locations = AuthorizationUtils.getMetadataObjectLocation(identifier, entityType); + locations.stream() + .forEach( + locationPath -> { + PathBasedMetadataObject pathBaseMetadataObject = + new PathBasedMetadataObject( + metadataObject.parent(), + metadataObject.name(), + locationPath, + PathBasedMetadataObject.Type.PATH); + pathBaseMetadataObject.validateAuthorizationMetadataObject(); + authzMetadataObjects.add(pathBaseMetadataObject); + }); + return authzMetadataObjects; } - @VisibleForTesting - public String getLocationPath(MetadataObject metadataObject) throws NoSuchEntityException { - String locationPath = null; - switch (metadataObject.type()) { - case METALAKE: - case SCHEMA: - case TABLE: - break; - case CATALOG: - { - Namespace nsMetadataObj = Namespace.fromString(metadataObject.fullName()); - NameIdentifier ident = NameIdentifier.of(metalake, nsMetadataObj.level(0)); - Catalog catalog = GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(ident); - if (catalog.provider().equals("hive")) { - Schema schema = - GravitinoEnv.getInstance() - .schemaDispatcher() - .loadSchema( - NameIdentifier.of( - metalake, nsMetadataObj.level(0), "default" /*Hive default schema*/)); - String defaultSchemaLocation = schema.properties().get(HiveConstants.LOCATION); - locationPath = pattern.matcher(defaultSchemaLocation).replaceAll(""); - } - } - break; - case FILESET: - FilesetDispatcher filesetDispatcher = GravitinoEnv.getInstance().filesetDispatcher(); - NameIdentifier identifier = getObjectNameIdentifier(metadataObject); - Fileset fileset = filesetDispatcher.loadFileset(identifier); + @Override + public Boolean onMetadataUpdated(MetadataObjectChange... changes) throws RuntimeException { + for (MetadataObjectChange change : changes) { + if (change instanceof MetadataObjectChange.RenameMetadataObject) { + MetadataObject metadataObject = + ((MetadataObjectChange.RenameMetadataObject) change).metadataObject(); + MetadataObject newMetadataObject = + ((MetadataObjectChange.RenameMetadataObject) change).newMetadataObject(); Preconditions.checkArgument( - fileset != null, String.format("Fileset %s is not found", identifier)); - String filesetLocation = fileset.storageLocation(); + metadataObject.type() == newMetadataObject.type(), + "The old and new metadata object type must be equal!"); + if (metadataObject.type() == MetadataObject.Type.METALAKE) { + // Rename the metalake name + this.metalake = newMetadataObject.name(); + // Did not need to update the Ranger policy + continue; + } else if (metadataObject.type() == MetadataObject.Type.CATALOG) { + // Did not need to update the Ranger policy + continue; + } + List oldAuthzMetadataObjects = + translateMetadataObject(metadataObject); + List newAuthzMetadataObjects = + translateMetadataObject(newMetadataObject); Preconditions.checkArgument( - filesetLocation != null, String.format("Fileset %s location is not found", identifier)); - locationPath = pattern.matcher(filesetLocation).replaceAll(""); - break; - default: - throw new AuthorizationPluginException( - "The metadata object type %s is not supported in the RangerAuthorizationHDFSPlugin", - metadataObject.type()); + oldAuthzMetadataObjects.size() == newAuthzMetadataObjects.size(), + "The old and new metadata objects size must be equal!"); + for (int i = 0; i < oldAuthzMetadataObjects.size(); i++) { + AuthorizationMetadataObject oldAuthMetadataObject = oldAuthzMetadataObjects.get(i); + AuthorizationMetadataObject newAuthzMetadataObject = newAuthzMetadataObjects.get(i); + if (oldAuthMetadataObject.equals(newAuthzMetadataObject)) { + LOG.info( + "The metadata object({}) and new metadata object({}) are equal, so ignore rename!", + oldAuthMetadataObject.fullName(), + newAuthzMetadataObject.fullName()); + continue; + } + doRenameMetadataObject(oldAuthMetadataObject, newAuthzMetadataObject); + } + } else if (change instanceof MetadataObjectChange.RemoveMetadataObject) { + MetadataObjectChange.RemoveMetadataObject changeMetadataObject = + ((MetadataObjectChange.RemoveMetadataObject) change); + List authzMetadataObjects = new ArrayList<>(); + changeMetadataObject.getLocations().stream() + .forEach( + locationPath -> { + PathBasedMetadataObject pathBaseMetadataObject = + new PathBasedMetadataObject( + changeMetadataObject.metadataObject().parent(), + changeMetadataObject.metadataObject().name(), + locationPath, + PathBasedMetadataObject.Type.PATH); + pathBaseMetadataObject.validateAuthorizationMetadataObject(); + authzMetadataObjects.add(pathBaseMetadataObject); + }); + authzMetadataObjects.forEach(this::doRemoveMetadataObject); + } else { + throw new IllegalArgumentException( + "Unsupported metadata object change type: " + + (change == null ? "null" : change.getClass().getSimpleName())); + } } - return locationPath; + return Boolean.TRUE; } } diff --git a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java index aab19d31f36..08581f7c433 100644 --- a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java +++ b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java @@ -18,12 +18,18 @@ */ package org.apache.gravitino.authorization.ranger; +import static org.apache.gravitino.authorization.common.PathBasedMetadataObject.Type.PATH; +import static org.apache.gravitino.authorization.ranger.RangerHadoopSQLMetadataObject.Type.COLUMN; +import static org.apache.gravitino.authorization.ranger.RangerHadoopSQLMetadataObject.Type.SCHEMA; +import static org.apache.gravitino.authorization.ranger.RangerHadoopSQLMetadataObject.Type.TABLE; + import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -35,13 +41,16 @@ import org.apache.gravitino.authorization.AuthorizationMetadataObject; import org.apache.gravitino.authorization.AuthorizationPrivilege; import org.apache.gravitino.authorization.AuthorizationSecurableObject; +import org.apache.gravitino.authorization.MetadataObjectChange; import org.apache.gravitino.authorization.Privilege; import org.apache.gravitino.authorization.SecurableObject; import org.apache.gravitino.authorization.SecurableObjects; import org.apache.gravitino.authorization.ranger.RangerPrivileges.RangerHadoopSQLPrivilege; import org.apache.gravitino.authorization.ranger.reference.RangerDefines.PolicyResource; import org.apache.gravitino.exceptions.AuthorizationPluginException; +import org.apache.ranger.RangerServiceException; import org.apache.ranger.plugin.model.RangerPolicy; +import org.apache.ranger.plugin.util.SearchFilter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,6 +85,328 @@ public Map> privilegesMappingRule() ImmutableSet.of(RangerHadoopSQLPrivilege.READ, RangerHadoopSQLPrivilege.SELECT)); } + /** + * Find the managed policy for the ranger securable object. + * + * @param authzMetadataObject The ranger securable object to find the managed policy. + * @return The managed policy for the metadata object. + */ + public RangerPolicy findManagedPolicy(AuthorizationMetadataObject authzMetadataObject) + throws AuthorizationPluginException { + List policies = wildcardSearchPolies(authzMetadataObject); + if (!policies.isEmpty()) { + /** + * Because Ranger doesn't support the precise search, Ranger will return the policy meets the + * wildcard(*,?) conditions, If you use `db.table` condition to search policy, the Ranger will + * match `db1.table1`, `db1.table2`, `db*.table*`, So we need to manually precisely filter + * this research results. + */ + List nsMetadataObj = authzMetadataObject.names(); + Map preciseFilters = new HashMap<>(); + for (int i = 0; i < nsMetadataObj.size() && i < policyResourceDefinesRule().size(); i++) { + preciseFilters.put(policyResourceDefinesRule().get(i), nsMetadataObj.get(i)); + } + policies = + policies.stream() + .filter( + policy -> + policy.getResources().entrySet().stream() + .allMatch( + entry -> + preciseFilters.containsKey(entry.getKey()) + && entry.getValue().getValues().size() == 1 + && entry + .getValue() + .getValues() + .contains(preciseFilters.get(entry.getKey())))) + .collect(Collectors.toList()); + } + // Only return the policies that are managed by Gravitino. + if (policies.size() > 1) { + throw new AuthorizationPluginException("Each metadata object can have at most one policy."); + } + + if (policies.isEmpty()) { + return null; + } + + RangerPolicy policy = policies.get(0); + // Delegating Gravitino management policies cannot contain duplicate privilege + policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess); + policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess); + policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess); + policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess); + + return policy; + } + + /** Wildcard search the Ranger policies in the different Ranger service. */ + @Override + protected List wildcardSearchPolies( + AuthorizationMetadataObject authzMetadataObject) { + List resourceDefines = policyResourceDefinesRule(); + Map searchFilters = new HashMap<>(); + searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName); + for (int i = 0; i < authzMetadataObject.names().size() && i < resourceDefines.size(); i++) { + searchFilters.put( + SearchFilter.RESOURCE_PREFIX + resourceDefines.get(i), + authzMetadataObject.names().get(i)); + } + + try { + List policies = rangerClient.findPolicies(searchFilters); + return policies; + } catch (RangerServiceException e) { + throw new AuthorizationPluginException(e, "Failed to find the policies in the Ranger"); + } + } + + /** + * IF rename the SCHEMA, Need to rename these the relevant policies, `{schema}`, `{schema}.*`, + * `{schema}.*.*`
+ * IF rename the TABLE, Need to rename these the relevant policies, `{schema}.*`, `{schema}.*.*` + *
+ * IF rename the COLUMN, Only need to rename `{schema}.*.*`
+ */ + @Override + protected void doRenameMetadataObject( + AuthorizationMetadataObject authzMetadataObject, + AuthorizationMetadataObject newAuthzMetadataObject) { + List> loop = new ArrayList<>(); + if (newAuthzMetadataObject.type().equals(SCHEMA)) { + loop = + ImmutableList.of( + ImmutableMap.of( + authzMetadataObject.names().get(0), newAuthzMetadataObject.names().get(0)), + ImmutableMap.of(RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL), + ImmutableMap.of(RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL)); + } else if (newAuthzMetadataObject.type().equals(TABLE)) { + loop = + ImmutableList.of( + ImmutableMap.of( + authzMetadataObject.names().get(0), newAuthzMetadataObject.names().get(0)), + ImmutableMap.of( + authzMetadataObject.names().get(1), newAuthzMetadataObject.names().get(1)), + ImmutableMap.of(RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL)); + } else if (newAuthzMetadataObject.type().equals(COLUMN)) { + loop = + ImmutableList.of( + ImmutableMap.of( + authzMetadataObject.names().get(0), newAuthzMetadataObject.names().get(0)), + ImmutableMap.of( + authzMetadataObject.names().get(1), newAuthzMetadataObject.names().get(1)), + ImmutableMap.of( + authzMetadataObject.names().get(2), newAuthzMetadataObject.names().get(2))); + } else if (newAuthzMetadataObject.type().equals(PATH)) { // do nothing when fileset is renamed + } else { + throw new IllegalArgumentException( + "Unsupported metadata object type: " + authzMetadataObject.type()); + } + + List oldMetadataNames = new ArrayList<>(); + List newMetadataNames = new ArrayList<>(); + for (int index = 0; index < loop.size(); index++) { + oldMetadataNames.add(loop.get(index).keySet().stream().findFirst().get()); + newMetadataNames.add(loop.get(index).values().stream().findFirst().get()); + + AuthorizationMetadataObject.Type type = + (index == 0 + ? RangerHadoopSQLMetadataObject.Type.SCHEMA + : (index == 1 + ? RangerHadoopSQLMetadataObject.Type.TABLE + : RangerHadoopSQLMetadataObject.Type.COLUMN)); + AuthorizationMetadataObject authzMetadataObject1 = + new RangerHadoopSQLMetadataObject( + AuthorizationMetadataObject.getParentFullName(oldMetadataNames), + AuthorizationMetadataObject.getLastName(oldMetadataNames), + type); + AuthorizationMetadataObject newAuthzMetadataObject1 = + new RangerHadoopSQLMetadataObject( + AuthorizationMetadataObject.getParentFullName(newMetadataNames), + AuthorizationMetadataObject.getLastName(newMetadataNames), + type); + updatePolicyByMetadataObject( + type.metadataObjectType(), authzMetadataObject1, newAuthzMetadataObject1); + } + } + + @Override + protected void updatePolicyByMetadataObject( + MetadataObject.Type operationType, + AuthorizationMetadataObject oldAuthzMetaobject, + AuthorizationMetadataObject newAuthzMetaobject) { + List oldPolicies = wildcardSearchPolies(oldAuthzMetaobject); + List existNewPolicies = wildcardSearchPolies(newAuthzMetaobject); + if (oldPolicies.isEmpty()) { + LOG.warn("Cannot find the Ranger policy for the metadata object({})!", oldAuthzMetaobject); + } + if (!existNewPolicies.isEmpty()) { + LOG.warn("The Ranger policy for the metadata object({}) already exists!", newAuthzMetaobject); + } + Map operationTypeIndex = + ImmutableMap.of( + MetadataObject.Type.SCHEMA, 0, + MetadataObject.Type.TABLE, 1, + MetadataObject.Type.COLUMN, 2); + oldPolicies.stream() + .forEach( + policy -> { + try { + String policyName = policy.getName(); + int index = operationTypeIndex.get(operationType); + + // Update the policy name is following Gravitino's spec + if (policy + .getName() + .equals( + AuthorizationSecurableObject.DOT_JOINER.join(oldAuthzMetaobject.names()))) { + List policyNames = + Lists.newArrayList( + AuthorizationSecurableObject.DOT_SPLITTER.splitToList(policyName)); + Preconditions.checkArgument( + policyNames.size() >= oldAuthzMetaobject.names().size(), + String.format("The policy name(%s) is invalid!", policyName)); + if (policyNames.get(index).equals(RangerHelper.RESOURCE_ALL)) { + // Doesn't need to rename the policy `*` + return; + } + policyNames.set(index, newAuthzMetaobject.names().get(index)); + policy.setName(AuthorizationSecurableObject.DOT_JOINER.join(policyNames)); + } + // Update the policy resource name to new name + policy + .getResources() + .put( + policyResourceDefinesRule().get(index), + new RangerPolicy.RangerPolicyResource( + newAuthzMetaobject.names().get(index))); + + boolean alreadyExist = + existNewPolicies.stream() + .anyMatch( + existNewPolicy -> + existNewPolicy.getName().equals(policy.getName()) + || existNewPolicy.getResources().equals(policy.getResources())); + if (alreadyExist) { + LOG.warn( + "The Ranger policy for the metadata object({}) already exists!", + newAuthzMetaobject); + return; + } + + // Update the policy + rangerClient.updatePolicy(policy.getId(), policy); + } catch (RangerServiceException e) { + LOG.error("Failed to rename the policy {}!", policy); + throw new RuntimeException(e); + } + }); + } + + /** + * IF remove the SCHEMA, need to remove these the relevant policies, `{schema}`, `{schema}.*`, + * `{schema}.*.*`
+ * IF remove the TABLE, need to remove these the relevant policies, `{schema}.*`, `{schema}.*.*` + *
+ * IF remove the COLUMN, Only need to remove `{schema}.*.*`
+ */ + @Override + protected void doRemoveMetadataObject(AuthorizationMetadataObject authzMetadataObject) { + AuthorizationMetadataObject.Type type = authzMetadataObject.type(); + if (type.equals(SCHEMA)) { + doRemoveSchemaMetadataObject(authzMetadataObject); + } else if (type.equals(TABLE)) { + doRemoveTableMetadataObject(authzMetadataObject); + } else if (type.equals(COLUMN) || type.equals(PATH)) { + removePolicyByMetadataObject(authzMetadataObject); + } else { + throw new IllegalArgumentException( + "Unsupported metadata object type: " + authzMetadataObject.type()); + } + } + + /** + * Remove the SCHEMA, Need to remove these the relevant policies, `{schema}`, `{schema}.*`, + * `{schema}.*.*` permissions. + */ + private void doRemoveSchemaMetadataObject(AuthorizationMetadataObject authMetadataObject) { + Preconditions.checkArgument( + authMetadataObject.type() == SCHEMA, "The metadata object type must be SCHEMA"); + Preconditions.checkArgument( + authMetadataObject.names().size() == 1, "The metadata object names must be 1"); + if (RangerHelper.RESOURCE_ALL.equals(authMetadataObject.name())) { + // Delete metalake or catalog policies in this Ranger service + try { + List policies = rangerClient.getPoliciesInService(rangerServiceName); + policies.stream() + .filter(RangerHelper::hasGravitinoManagedPolicyItem) + .forEach(rangerHelper::removeAllGravitinoManagedPolicyItem); + } catch (RangerServiceException e) { + throw new RuntimeException(e); + } + } else { + List> loop = + ImmutableList.of( + ImmutableList.of(authMetadataObject.name()) + /** SCHEMA permission */ + , + ImmutableList.of(authMetadataObject.name(), RangerHelper.RESOURCE_ALL) + /** TABLE permission */ + , + ImmutableList.of( + authMetadataObject.name(), RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL) + /** COLUMN permission */ + ); + + for (int index = 0; index < loop.size(); index++) { + AuthorizationMetadataObject.Type type = + (index == 0 + ? RangerHadoopSQLMetadataObject.Type.SCHEMA + : (index == 1 + ? RangerHadoopSQLMetadataObject.Type.TABLE + : RangerHadoopSQLMetadataObject.Type.COLUMN)); + AuthorizationMetadataObject authzMetadataObject1 = + new RangerHadoopSQLMetadataObject( + AuthorizationMetadataObject.getParentFullName(loop.get(index)), + AuthorizationMetadataObject.getLastName(loop.get(index)), + type); + removePolicyByMetadataObject(authzMetadataObject1); + } + } + } + + /** + * Remove the TABLE, Need to remove these the relevant policies, `*.{table}`, `*.{table}.{column}` + * permissions. + */ + private void doRemoveTableMetadataObject(AuthorizationMetadataObject authzMetadataObject) { + List> loop = + ImmutableList.of( + authzMetadataObject.names() + /** TABLE permission */ + , + Stream.concat( + authzMetadataObject.names().stream(), Stream.of(RangerHelper.RESOURCE_ALL)) + .collect(Collectors.toList()) + /** COLUMN permission */ + ); + + for (int index = 0; index < loop.size(); index++) { + AuthorizationMetadataObject.Type type = + (index == 0 + ? RangerHadoopSQLMetadataObject.Type.SCHEMA + : (index == 1 + ? RangerHadoopSQLMetadataObject.Type.TABLE + : RangerHadoopSQLMetadataObject.Type.COLUMN)); + AuthorizationMetadataObject authzMetadataObject1 = + new RangerHadoopSQLMetadataObject( + AuthorizationMetadataObject.getParentFullName(loop.get(index)), + AuthorizationMetadataObject.getLastName(loop.get(index)), + type); + removePolicyByMetadataObject(authzMetadataObject1); + } + } + @Override /** Set the default owner rule. */ public Set ownerMappingRule() { @@ -108,6 +439,7 @@ protected RangerPolicy createPolicyAddResources(AuthorizationMetadataObject meta @Override public AuthorizationSecurableObject generateAuthorizationSecurableObject( List names, + String path, AuthorizationMetadataObject.Type type, Set privileges) { AuthorizationMetadataObject authMetadataObject = @@ -163,12 +495,14 @@ public List translateOwner(MetadataObject gravitin rangerSecurableObjects.add( generateAuthorizationSecurableObject( ImmutableList.of(RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.SCHEMA, ownerMappingRule())); // Add `*.*` for the TABLE permission rangerSecurableObjects.add( generateAuthorizationSecurableObject( ImmutableList.of(RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.TABLE, ownerMappingRule())); // Add `*.*.*` for the COLUMN permission @@ -178,6 +512,7 @@ public List translateOwner(MetadataObject gravitin RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.COLUMN, ownerMappingRule())); break; @@ -186,6 +521,7 @@ public List translateOwner(MetadataObject gravitin rangerSecurableObjects.add( generateAuthorizationSecurableObject( ImmutableList.of(gravitinoMetadataObject.name() /*Schema name*/), + null, RangerHadoopSQLMetadataObject.Type.SCHEMA, ownerMappingRule())); // Add `{schema}.*` for the TABLE permission @@ -193,6 +529,7 @@ public List translateOwner(MetadataObject gravitin generateAuthorizationSecurableObject( ImmutableList.of( gravitinoMetadataObject.name() /*Schema name*/, RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.TABLE, ownerMappingRule())); // Add `{schema}.*.*` for the COLUMN permission @@ -202,25 +539,32 @@ public List translateOwner(MetadataObject gravitin gravitinoMetadataObject.name() /*Schema name*/, RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.COLUMN, ownerMappingRule())); break; case TABLE: - // Add `{schema}.{table}` for the TABLE permission - rangerSecurableObjects.add( - generateAuthorizationSecurableObject( - translateMetadataObject(gravitinoMetadataObject).names(), - RangerHadoopSQLMetadataObject.Type.TABLE, - ownerMappingRule())); - // Add `{schema}.{table}.*` for the COLUMN permission - rangerSecurableObjects.add( - generateAuthorizationSecurableObject( - Stream.concat( - translateMetadataObject(gravitinoMetadataObject).names().stream(), - Stream.of(RangerHelper.RESOURCE_ALL)) - .collect(Collectors.toList()), - RangerHadoopSQLMetadataObject.Type.COLUMN, - ownerMappingRule())); + translateMetadataObject(gravitinoMetadataObject).stream() + .forEach( + rangerMetadataObject -> { + // Add `{schema}.{table}` for the TABLE permission + rangerSecurableObjects.add( + generateAuthorizationSecurableObject( + rangerMetadataObject.names(), + null, + RangerHadoopSQLMetadataObject.Type.TABLE, + ownerMappingRule())); + // Add `{schema}.{table}.*` for the COLUMN permission + rangerSecurableObjects.add( + generateAuthorizationSecurableObject( + Stream.concat( + rangerMetadataObject.names().stream(), + Stream.of(RangerHelper.RESOURCE_ALL)) + .collect(Collectors.toList()), + null, + RangerHadoopSQLMetadataObject.Type.COLUMN, + ownerMappingRule())); + }); break; default: throw new AuthorizationPluginException( @@ -265,6 +609,7 @@ public List translatePrivilege(SecurableObject sec rangerSecurableObjects.add( generateAuthorizationSecurableObject( ImmutableList.of(RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.SCHEMA, rangerPrivileges)); break; @@ -282,6 +627,7 @@ public List translatePrivilege(SecurableObject sec rangerSecurableObjects.add( generateAuthorizationSecurableObject( ImmutableList.of(RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.SCHEMA, rangerPrivileges)); break; @@ -299,6 +645,7 @@ public List translatePrivilege(SecurableObject sec rangerSecurableObjects.add( generateAuthorizationSecurableObject( ImmutableList.of(RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.SCHEMA, rangerPrivileges)); break; @@ -307,6 +654,7 @@ public List translatePrivilege(SecurableObject sec rangerSecurableObjects.add( generateAuthorizationSecurableObject( ImmutableList.of(securableObject.name() /*Schema name*/), + null, RangerHadoopSQLMetadataObject.Type.SCHEMA, rangerPrivileges)); break; @@ -327,6 +675,7 @@ public List translatePrivilege(SecurableObject sec generateAuthorizationSecurableObject( ImmutableList.of( RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.TABLE, rangerPrivileges)); // Add `*.*.*` for the COLUMN permission @@ -336,6 +685,7 @@ public List translatePrivilege(SecurableObject sec RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.COLUMN, rangerPrivileges)); break; @@ -346,6 +696,7 @@ public List translatePrivilege(SecurableObject sec ImmutableList.of( securableObject.name() /*Schema name*/, RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.TABLE, rangerPrivileges)); // Add `{schema}.*.*` for the COLUMN permission @@ -355,6 +706,7 @@ public List translatePrivilege(SecurableObject sec securableObject.name() /*Schema name*/, RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL), + null, RangerHadoopSQLMetadataObject.Type.COLUMN, rangerPrivileges)); break; @@ -364,21 +716,27 @@ public List translatePrivilege(SecurableObject sec "The privilege %s is not supported for the securable object: %s", gravitinoPrivilege.name(), securableObject.type()); } else { - // Add `{schema}.{table}` for the TABLE permission - rangerSecurableObjects.add( - generateAuthorizationSecurableObject( - translateMetadataObject(securableObject).names(), - RangerHadoopSQLMetadataObject.Type.TABLE, - rangerPrivileges)); - // Add `{schema}.{table}.*` for the COLUMN permission - rangerSecurableObjects.add( - generateAuthorizationSecurableObject( - Stream.concat( - translateMetadataObject(securableObject).names().stream(), - Stream.of(RangerHelper.RESOURCE_ALL)) - .collect(Collectors.toList()), - RangerHadoopSQLMetadataObject.Type.COLUMN, - rangerPrivileges)); + translateMetadataObject(securableObject).stream() + .forEach( + rangerMetadataObject -> { + // Add `{schema}.{table}` for the TABLE permission + rangerSecurableObjects.add( + generateAuthorizationSecurableObject( + rangerMetadataObject.names(), + null, + RangerHadoopSQLMetadataObject.Type.TABLE, + rangerPrivileges)); + // Add `{schema}.{table}.*` for the COLUMN permission + rangerSecurableObjects.add( + generateAuthorizationSecurableObject( + Stream.concat( + rangerMetadataObject.names().stream(), + Stream.of(RangerHelper.RESOURCE_ALL)) + .collect(Collectors.toList()), + null, + RangerHadoopSQLMetadataObject.Type.COLUMN, + rangerPrivileges)); + }); } break; default: @@ -404,12 +762,7 @@ public List translatePrivilege(SecurableObject sec * convert the Gravitino metadata object to the Ranger metadata object. */ @Override - public AuthorizationMetadataObject translateMetadataObject(MetadataObject metadataObject) { - Preconditions.checkArgument( - allowMetadataObjectTypesRule().contains(metadataObject.type()), - String.format( - "The metadata object type %s is not supported in the RangerAuthorizationHivePlugin", - metadataObject.type())); + public List translateMetadataObject(MetadataObject metadataObject) { Preconditions.checkArgument( !(metadataObject instanceof RangerPrivileges), "The metadata object must be not a RangerPrivileges object."); @@ -435,6 +788,62 @@ public AuthorizationMetadataObject translateMetadataObject(MetadataObject metada AuthorizationMetadataObject.getLastName(nsMetadataObject), type); rangerHadoopSQLMetadataObject.validateAuthorizationMetadataObject(); - return rangerHadoopSQLMetadataObject; + return ImmutableList.of(rangerHadoopSQLMetadataObject); + } + + @Override + public Boolean onMetadataUpdated(MetadataObjectChange... changes) throws RuntimeException { + for (MetadataObjectChange change : changes) { + if (change instanceof MetadataObjectChange.RenameMetadataObject) { + MetadataObject metadataObject = + ((MetadataObjectChange.RenameMetadataObject) change).metadataObject(); + MetadataObject newMetadataObject = + ((MetadataObjectChange.RenameMetadataObject) change).newMetadataObject(); + Preconditions.checkArgument( + metadataObject.type() == newMetadataObject.type(), + "The old and new metadata object type must be equal!"); + if (metadataObject.type() == MetadataObject.Type.METALAKE) { + // Rename the metalake name + this.metalake = newMetadataObject.name(); + // Did not need to update the Ranger policy + continue; + } else if (metadataObject.type() == MetadataObject.Type.CATALOG) { + // Did not need to update the Ranger policy + continue; + } + List oldAuthzMetadataObjects = + translateMetadataObject(metadataObject); + List newAuthzMetadataObjects = + translateMetadataObject(newMetadataObject); + Preconditions.checkArgument( + oldAuthzMetadataObjects.size() == newAuthzMetadataObjects.size(), + "The old and new metadata objects size must be equal!"); + for (int i = 0; i < oldAuthzMetadataObjects.size(); i++) { + AuthorizationMetadataObject oldAuthMetadataObject = oldAuthzMetadataObjects.get(i); + AuthorizationMetadataObject newAuthzMetadataObject = newAuthzMetadataObjects.get(i); + if (oldAuthMetadataObject.equals(newAuthzMetadataObject)) { + LOG.info( + "The metadata object({}) and new metadata object({}) are equal, so ignore rename!", + oldAuthMetadataObject.fullName(), + newAuthzMetadataObject.fullName()); + continue; + } + doRenameMetadataObject(oldAuthMetadataObject, newAuthzMetadataObject); + } + } else if (change instanceof MetadataObjectChange.RemoveMetadataObject) { + MetadataObject metadataObject = + ((MetadataObjectChange.RemoveMetadataObject) change).metadataObject(); + // if (metadataObject.type() != MetadataObject.Type.FILESET) { + List authzMetadataObjects = + translateMetadataObject(metadataObject); + authzMetadataObjects.stream().forEach(this::doRemoveMetadataObject); + // } + } else { + throw new IllegalArgumentException( + "Unsupported metadata object change type: " + + (change == null ? "null" : change.getClass().getSimpleName())); + } + } + return Boolean.TRUE; } } diff --git a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java index 1198b68cb46..f134e50dfe1 100644 --- a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java +++ b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java @@ -18,23 +18,21 @@ */ package org.apache.gravitino.authorization.ranger; +import static org.apache.gravitino.authorization.ranger.RangerHadoopSQLMetadataObject.Type.SCHEMA; +import static org.apache.gravitino.authorization.ranger.RangerHadoopSQLMetadataObject.Type.TABLE; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; import java.io.IOException; import java.time.Instant; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; -import java.util.stream.Stream; import org.apache.gravitino.MetadataObject; import org.apache.gravitino.authorization.AuthorizationMetadataObject; import org.apache.gravitino.authorization.AuthorizationPrivilege; @@ -83,7 +81,7 @@ public abstract class RangerAuthorizationPlugin protected String metalake; protected final String rangerServiceName; protected final RangerClientExtension rangerClient; - private final RangerHelper rangerHelper; + protected final RangerHelper rangerHelper; @VisibleForTesting public final String rangerAdminName; protected RangerAuthorizationPlugin(String metalake, Map config) { @@ -128,6 +126,24 @@ public String getMetalake() { protected abstract RangerPolicy createPolicyAddResources( AuthorizationMetadataObject metadataObject); + /** Wildcard search the Ranger policies in the different Ranger service. */ + protected abstract List wildcardSearchPolies( + AuthorizationMetadataObject authzMetadataObject); + + /** + * Find the managed policy for the ranger securable object. + * + * @param authzMetadataObject The ranger securable object to find the managed policy. + * @return The managed policy for the metadata object. + */ + public abstract RangerPolicy findManagedPolicy(AuthorizationMetadataObject authzMetadataObject) + throws AuthorizationPluginException; + + protected abstract void updatePolicyByMetadataObject( + MetadataObject.Type operationType, + AuthorizationMetadataObject oldAuthzMetaobject, + AuthorizationMetadataObject newAuthzMetaobject); + protected RangerPolicy addOwnerToNewPolicy( AuthorizationMetadataObject metadataObject, Owner newOwner) { RangerPolicy policy = createPolicyAddResources(metadataObject); @@ -232,12 +248,12 @@ public Boolean onRoleUpdated(Role role, RoleChange... changes) return Boolean.FALSE; } - List AuthorizationSecurableObjects = + List authzSecurableObjects = translatePrivilege(securableObject); - AuthorizationSecurableObjects.stream() + authzSecurableObjects.stream() .forEach( - AuthorizationSecurableObject -> { - if (!doAddSecurableObject(role.name(), AuthorizationSecurableObject)) { + authzSecurableObject -> { + if (!doAddSecurableObject(role.name(), authzSecurableObject)) { throw new AuthorizationPluginException( "Failed to add the securable object to the Ranger policy!"); } @@ -249,12 +265,12 @@ public Boolean onRoleUpdated(Role role, RoleChange... changes) return Boolean.FALSE; } - List AuthorizationSecurableObjects = + List authzSecurableObjects = translatePrivilege(securableObject); - AuthorizationSecurableObjects.stream() + authzSecurableObjects.stream() .forEach( - AuthorizationSecurableObject -> { - if (!doRemoveSecurableObject(role.name(), AuthorizationSecurableObject)) { + authzSecurableObject -> { + if (!doRemoveSecurableObject(role.name(), authzSecurableObject)) { throw new AuthorizationPluginException( "Failed to add the securable object to the Ranger policy!"); } @@ -307,30 +323,43 @@ public Boolean onMetadataUpdated(MetadataObjectChange... changes) throws Runtime ((MetadataObjectChange.RenameMetadataObject) change).metadataObject(); MetadataObject newMetadataObject = ((MetadataObjectChange.RenameMetadataObject) change).newMetadataObject(); - if (metadataObject.type() == MetadataObject.Type.METALAKE - && newMetadataObject.type() == MetadataObject.Type.METALAKE) { - // Modify the metalake name + Preconditions.checkArgument( + metadataObject.type() == newMetadataObject.type(), + "The old and new metadata object type must be equal!"); + if (metadataObject.type() == MetadataObject.Type.METALAKE) { + // Rename the metalake name this.metalake = newMetadataObject.name(); + // Did not need to update the Ranger policy + continue; + } else if (metadataObject.type() == MetadataObject.Type.CATALOG) { + // Did not need to update the Ranger policy + continue; } - AuthorizationMetadataObject oldAuthMetadataObject = translateMetadataObject(metadataObject); - AuthorizationMetadataObject newAuthMetadataObject = + List oldAuthzMetadataObjects = + translateMetadataObject(metadataObject); + List newAuthzMetadataObjects = translateMetadataObject(newMetadataObject); - if (oldAuthMetadataObject.equals(newAuthMetadataObject)) { - LOG.info( - "The metadata object({}) and new metadata object({}) are equal, so ignore rename!", - oldAuthMetadataObject.fullName(), - newAuthMetadataObject.fullName()); - continue; + Preconditions.checkArgument( + oldAuthzMetadataObjects.size() == newAuthzMetadataObjects.size(), + "The old and new metadata objects size must be equal!"); + for (int i = 0; i < oldAuthzMetadataObjects.size(); i++) { + AuthorizationMetadataObject oldAuthMetadataObject = oldAuthzMetadataObjects.get(i); + AuthorizationMetadataObject newAuthzMetadataObject = newAuthzMetadataObjects.get(i); + if (oldAuthMetadataObject.equals(newAuthzMetadataObject)) { + LOG.info( + "The metadata object({}) and new metadata object({}) are equal, so ignore rename!", + oldAuthMetadataObject.fullName(), + newAuthzMetadataObject.fullName()); + continue; + } + doRenameMetadataObject(oldAuthMetadataObject, newAuthzMetadataObject); } - doRenameMetadataObject(oldAuthMetadataObject, newAuthMetadataObject); } else if (change instanceof MetadataObjectChange.RemoveMetadataObject) { MetadataObject metadataObject = ((MetadataObjectChange.RemoveMetadataObject) change).metadataObject(); - if (metadataObject.type() != MetadataObject.Type.FILESET) { - AuthorizationMetadataObject AuthorizationMetadataObject = - translateMetadataObject(metadataObject); - doRemoveMetadataObject(AuthorizationMetadataObject); - } + List authzMetadataObjects = + translateMetadataObject(metadataObject); + authzMetadataObjects.stream().forEach(this::doRemoveMetadataObject); } else { throw new IllegalArgumentException( "Unsupported metadata object change type: " @@ -431,7 +460,7 @@ public Boolean onOwnerSet(MetadataObject metadataObject, Owner preOwner, Owner n rangerSecurableObjects.stream() .forEach( rangerSecurableObject -> { - RangerPolicy policy = rangerHelper.findManagedPolicy(rangerSecurableObject); + RangerPolicy policy = findManagedPolicy(rangerSecurableObject); try { if (policy == null) { policy = addOwnerRoleToNewPolicy(rangerSecurableObject, ownerRoleName); @@ -453,8 +482,7 @@ public Boolean onOwnerSet(MetadataObject metadataObject, Owner preOwner, Owner n rangerSecurableObjects.stream() .forEach( AuthorizationSecurableObject -> { - RangerPolicy policy = - rangerHelper.findManagedPolicy(AuthorizationSecurableObject); + RangerPolicy policy = findManagedPolicy(AuthorizationSecurableObject); try { if (policy == null) { policy = addOwnerToNewPolicy(AuthorizationSecurableObject, newOwner); @@ -684,14 +712,14 @@ public Boolean onGroupAcquired(Group group) { */ private boolean doAddSecurableObject( String roleName, AuthorizationSecurableObject securableObject) { - RangerPolicy policy = rangerHelper.findManagedPolicy(securableObject); + RangerPolicy policy = findManagedPolicy(securableObject); if (policy != null) { // Check the policy item's accesses and roles equal the Ranger securable object's privilege - List allowPrivilies = + List allowPrivileges = securableObject.privileges().stream() .filter(privilege -> privilege.condition() == Privilege.Condition.ALLOW) .collect(Collectors.toList()); - List denyPrivilies = + List denyPrivileges = securableObject.privileges().stream() .filter(privilege -> privilege.condition() == Privilege.Condition.DENY) .collect(Collectors.toList()); @@ -720,8 +748,8 @@ private boolean doAddSecurableObject( .map(RangerPrivileges::valueOf) .collect(Collectors.toSet()); - if (policyPrivileges.containsAll(allowPrivilies) - && policyDenyPrivileges.containsAll(denyPrivilies)) { + if (policyPrivileges.containsAll(allowPrivileges) + && policyDenyPrivileges.containsAll(denyPrivileges)) { LOG.info( "The privilege({}) already added to Ranger policy({})!", policy.getName(), @@ -757,17 +785,17 @@ private boolean doAddSecurableObject( * 3. If policy does not contain any policy item, then delete this policy.
*/ private boolean doRemoveSecurableObject( - String roleName, AuthorizationSecurableObject AuthorizationSecurableObject) { - RangerPolicy policy = rangerHelper.findManagedPolicy(AuthorizationSecurableObject); + String roleName, AuthorizationSecurableObject authzSecurableObject) { + RangerPolicy policy = findManagedPolicy(authzSecurableObject); if (policy == null) { LOG.warn( "Cannot find the Ranger policy for the Ranger securable object({})!", - AuthorizationSecurableObject.fullName()); + authzSecurableObject.fullName()); // Don't throw exception or return false, because need support immutable operation. return true; } - AuthorizationSecurableObject.privileges().stream() + authzSecurableObject.privileges().stream() .forEach( rangerPrivilege -> { if (rangerPrivilege.condition() == Privilege.Condition.ALLOW) { @@ -776,7 +804,7 @@ private boolean doRemoveSecurableObject( .forEach( policyItem -> { removePolicyItemIfEqualRoleName( - policyItem, AuthorizationSecurableObject, roleName); + policyItem, authzSecurableObject, roleName); }); } else { policy @@ -784,7 +812,7 @@ private boolean doRemoveSecurableObject( .forEach( policyItem -> { removePolicyItemIfEqualRoleName( - policyItem, AuthorizationSecurableObject, roleName); + policyItem, authzSecurableObject, roleName); }); } }); @@ -806,7 +834,11 @@ private boolean doRemoveSecurableObject( && policyItem.getGroups().isEmpty()); try { - rangerClient.updatePolicy(policy.getId(), policy); + if (policy.getPolicyItems().isEmpty() && policy.getDenyPolicyItems().isEmpty()) { + rangerClient.deletePolicy(policy.getId()); + } else { + rangerClient.updatePolicy(policy.getId(), policy); + } } catch (RangerServiceException e) { LOG.error("Failed to remove the policy item from the Ranger policy {}!", policy); throw new AuthorizationPluginException( @@ -836,94 +868,6 @@ private void removePolicyItemIfEqualRoleName( } } - /** - * IF remove the SCHEMA, need to remove these the relevant policies, `{schema}`, `{schema}.*`, - * `{schema}.*.*`
- * IF remove the TABLE, need to remove these the relevant policies, `{schema}.*`, `{schema}.*.*` - *
- * IF remove the COLUMN, Only need to remove `{schema}.*.*`
- */ - private void doRemoveMetadataObject(AuthorizationMetadataObject authMetadataObject) { - switch (authMetadataObject.metadataObjectType()) { - case SCHEMA: - doRemoveSchemaMetadataObject(authMetadataObject); - break; - case TABLE: - doRemoveTableMetadataObject(authMetadataObject); - break; - case COLUMN: - removePolicyByMetadataObject(authMetadataObject.names()); - break; - case FILESET: - // can not get fileset path in this case, do nothing - break; - default: - throw new IllegalArgumentException( - "Unsupported metadata object type: " + authMetadataObject.type()); - } - } - - /** - * Remove the SCHEMA, Need to remove these the relevant policies, `{schema}`, `{schema}.*`, - * `{schema}.*.*` permissions. - */ - private void doRemoveSchemaMetadataObject(AuthorizationMetadataObject authMetadataObject) { - Preconditions.checkArgument( - authMetadataObject.type() == RangerHadoopSQLMetadataObject.Type.SCHEMA, - "The metadata object type must be SCHEMA"); - Preconditions.checkArgument( - authMetadataObject.names().size() == 1, "The metadata object names must be 1"); - if (RangerHelper.RESOURCE_ALL.equals(authMetadataObject.name())) { - // Delete metalake or catalog policies in this Ranger service - try { - List policies = rangerClient.getPoliciesInService(rangerServiceName); - policies.stream() - .filter(rangerHelper::hasGravitinoManagedPolicyItem) - .forEach(rangerHelper::removeAllGravitinoManagedPolicyItem); - } catch (RangerServiceException e) { - throw new RuntimeException(e); - } - } else { - List> loop = - ImmutableList.of( - ImmutableList.of(authMetadataObject.name()) - /** SCHEMA permission */ - , - ImmutableList.of(authMetadataObject.name(), RangerHelper.RESOURCE_ALL) - /** TABLE permission */ - , - ImmutableList.of( - authMetadataObject.name(), RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL) - /** COLUMN permission */ - ); - for (List resNames : loop) { - removePolicyByMetadataObject(resNames); - } - } - } - - /** - * Remove the TABLE, Need to remove these the relevant policies, `*.{table}`, `*.{table}.{column}` - * permissions. - */ - private void doRemoveTableMetadataObject( - AuthorizationMetadataObject AuthorizationMetadataObject) { - List> loop = - ImmutableList.of( - AuthorizationMetadataObject.names() - /** TABLE permission */ - , - Stream.concat( - AuthorizationMetadataObject.names().stream(), - Stream.of(RangerHelper.RESOURCE_ALL)) - .collect(Collectors.toList()) - /** COLUMN permission */ - ); - for (List resNames : loop) { - removePolicyByMetadataObject(resNames); - } - } - /** * IF rename the SCHEMA, Need to rename these the relevant policies, `{schema}`, `{schema}.*`, * `{schema}.*.*`
@@ -931,205 +875,22 @@ private void doRemoveTableMetadataObject( *
* IF rename the COLUMN, Only need to rename `{schema}.*.*`
*/ - private void doRenameMetadataObject( - AuthorizationMetadataObject AuthorizationMetadataObject, - AuthorizationMetadataObject newAuthMetadataObject) { - switch (newAuthMetadataObject.metadataObjectType()) { - case SCHEMA: - doRenameSchemaMetadataObject(AuthorizationMetadataObject, newAuthMetadataObject); - break; - case TABLE: - doRenameTableMetadataObject(AuthorizationMetadataObject, newAuthMetadataObject); - break; - case COLUMN: - doRenameColumnMetadataObject(AuthorizationMetadataObject, newAuthMetadataObject); - break; - case FILESET: - // do nothing when fileset is renamed - break; - default: - throw new IllegalArgumentException( - "Unsupported metadata object type: " + AuthorizationMetadataObject.type()); - } - } - - /** - * Rename the SCHEMA, Need to rename these the relevant policies, `{schema}`, `{schema}.*`, - * `{schema}.*.*`
- */ - private void doRenameSchemaMetadataObject( - AuthorizationMetadataObject AuthorizationMetadataObject, - AuthorizationMetadataObject newAuthorizationMetadataObject) { - List oldMetadataNames = new ArrayList<>(); - List newMetadataNames = new ArrayList<>(); - List> loop = - ImmutableList.of( - ImmutableMap.of( - AuthorizationMetadataObject.names().get(0), - newAuthorizationMetadataObject.names().get(0)), - ImmutableMap.of(RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL), - ImmutableMap.of(RangerHelper.RESOURCE_ALL, RangerHelper.RESOURCE_ALL)); - for (Map mapName : loop) { - oldMetadataNames.add(mapName.keySet().stream().findFirst().get()); - newMetadataNames.add(mapName.values().stream().findFirst().get()); - updatePolicyByMetadataObject(MetadataObject.Type.SCHEMA, oldMetadataNames, newMetadataNames); - } - } - - /** - * Rename the TABLE, Need to rename these the relevant policies, `*.{table}`, `*.{table}.{column}` - *
- */ - private void doRenameTableMetadataObject( - AuthorizationMetadataObject AuthorizationMetadataObject, - AuthorizationMetadataObject newAuthorizationMetadataObject) { - List oldMetadataNames = new ArrayList<>(); - List newMetadataNames = new ArrayList<>(); - List> loop = - ImmutableList.of( - ImmutableMap.of(AuthorizationMetadataObject.names().get(0), MetadataObject.Type.SCHEMA), - ImmutableMap.of(AuthorizationMetadataObject.names().get(1), MetadataObject.Type.TABLE), - ImmutableMap.of(RangerHelper.RESOURCE_ALL, MetadataObject.Type.COLUMN)); - for (Map nameAndType : loop) { - oldMetadataNames.add(nameAndType.keySet().stream().findFirst().get()); - if (nameAndType.containsValue(MetadataObject.Type.SCHEMA)) { - newMetadataNames.add(newAuthorizationMetadataObject.names().get(0)); - // Skip update the schema name operation - continue; - } else if (nameAndType.containsValue(MetadataObject.Type.TABLE)) { - newMetadataNames.add(newAuthorizationMetadataObject.names().get(1)); - } else if (nameAndType.containsValue(MetadataObject.Type.COLUMN)) { - newMetadataNames.add(RangerHelper.RESOURCE_ALL); - } - updatePolicyByMetadataObject(MetadataObject.Type.TABLE, oldMetadataNames, newMetadataNames); - } - } + protected abstract void doRenameMetadataObject( + AuthorizationMetadataObject authzMetadataObject, + AuthorizationMetadataObject newAuthzMetadataObject); - /** rename the COLUMN, Only need to rename `*.*.{column}`
*/ - private void doRenameColumnMetadataObject( - AuthorizationMetadataObject AuthorizationMetadataObject, - AuthorizationMetadataObject newAuthorizationMetadataObject) { - List oldMetadataNames = new ArrayList<>(); - List newMetadataNames = new ArrayList<>(); - List> loop = - ImmutableList.of( - ImmutableMap.of(AuthorizationMetadataObject.names().get(0), MetadataObject.Type.SCHEMA), - ImmutableMap.of(AuthorizationMetadataObject.names().get(1), MetadataObject.Type.TABLE), - ImmutableMap.of( - AuthorizationMetadataObject.names().get(2), MetadataObject.Type.COLUMN)); - for (Map nameAndType : loop) { - oldMetadataNames.add(nameAndType.keySet().stream().findFirst().get()); - if (nameAndType.containsValue(MetadataObject.Type.SCHEMA)) { - newMetadataNames.add(newAuthorizationMetadataObject.names().get(0)); - // Skip update the schema name operation - continue; - } else if (nameAndType.containsValue(MetadataObject.Type.TABLE)) { - newMetadataNames.add(newAuthorizationMetadataObject.names().get(1)); - // Skip update the table name operation - continue; - } else if (nameAndType.containsValue(MetadataObject.Type.COLUMN)) { - newMetadataNames.add(newAuthorizationMetadataObject.names().get(2)); - } - updatePolicyByMetadataObject(MetadataObject.Type.COLUMN, oldMetadataNames, newMetadataNames); - } - } + protected abstract void doRemoveMetadataObject(AuthorizationMetadataObject authzMetadataObject); /** * Remove the policy by the metadata object names.
* - * @param metadataNames The metadata object names. + * @param authzMetadataObject The authorization metadata object. */ - private void removePolicyByMetadataObject(List metadataNames) { - List policies = rangerHelper.wildcardSearchPolies(metadataNames); - Map preciseFilters = new HashMap<>(); - for (int i = 0; i < metadataNames.size(); i++) { - preciseFilters.put(rangerHelper.policyResourceDefines.get(i), metadataNames.get(i)); - } - policies = - policies.stream() - .filter( - policy -> - policy.getResources().entrySet().stream() - .allMatch( - entry -> - preciseFilters.containsKey(entry.getKey()) - && entry.getValue().getValues().size() == 1 - && entry - .getValue() - .getValues() - .contains(preciseFilters.get(entry.getKey())))) - .collect(Collectors.toList()); - policies.forEach(rangerHelper::removeAllGravitinoManagedPolicyItem); - } - - private void updatePolicyByMetadataObject( - MetadataObject.Type operationType, - List oldMetadataNames, - List newMetadataNames) { - List oldPolicies = rangerHelper.wildcardSearchPolies(oldMetadataNames); - List existNewPolicies = rangerHelper.wildcardSearchPolies(newMetadataNames); - if (oldPolicies.isEmpty()) { - LOG.warn("Cannot find the Ranger policy for the metadata object({})!", oldMetadataNames); - } - if (!existNewPolicies.isEmpty()) { - LOG.warn("The Ranger policy for the metadata object({}) already exists!", newMetadataNames); + protected void removePolicyByMetadataObject(AuthorizationMetadataObject authzMetadataObject) { + RangerPolicy policy = findManagedPolicy(authzMetadataObject); + if (policy != null) { + rangerHelper.removeAllGravitinoManagedPolicyItem(policy); } - Map operationTypeIndex = - ImmutableMap.of( - MetadataObject.Type.SCHEMA, 0, - MetadataObject.Type.TABLE, 1, - MetadataObject.Type.COLUMN, 2); - oldPolicies.stream() - .forEach( - policy -> { - try { - String policyName = policy.getName(); - int index = operationTypeIndex.get(operationType); - - // Update the policy name is following Gravitino's spec - if (policy - .getName() - .equals(AuthorizationSecurableObject.DOT_JOINER.join(oldMetadataNames))) { - List policyNames = - Lists.newArrayList( - AuthorizationSecurableObject.DOT_SPLITTER.splitToList(policyName)); - Preconditions.checkArgument( - policyNames.size() >= oldMetadataNames.size(), - String.format("The policy name(%s) is invalid!", policyName)); - if (policyNames.get(index).equals(RangerHelper.RESOURCE_ALL)) { - // Doesn't need to rename the policy `*` - return; - } - policyNames.set(index, newMetadataNames.get(index)); - policy.setName(AuthorizationSecurableObject.DOT_JOINER.join(policyNames)); - } - // Update the policy resource name to new name - policy - .getResources() - .put( - rangerHelper.policyResourceDefines.get(index), - new RangerPolicy.RangerPolicyResource(newMetadataNames.get(index))); - - boolean alreadyExist = - existNewPolicies.stream() - .anyMatch( - existNewPolicy -> - existNewPolicy.getName().equals(policy.getName()) - || existNewPolicy.getResources().equals(policy.getResources())); - if (alreadyExist) { - LOG.warn( - "The Ranger policy for the metadata object({}) already exists!", - newMetadataNames); - return; - } - - // Update the policy - rangerClient.updatePolicy(policy.getId(), policy); - } catch (RangerServiceException e) { - LOG.error("Failed to rename the policy {}!", policy); - throw new RuntimeException(e); - } - }); } @Override @@ -1138,6 +899,7 @@ public void close() throws IOException {} /** Generate authorization securable object */ public abstract AuthorizationSecurableObject generateAuthorizationSecurableObject( List names, + String path, AuthorizationMetadataObject.Type type, Set privileges); diff --git a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerHadoopSQLMetadataObject.java b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerHadoopSQLMetadataObject.java index d64433b9feb..452fdc1e058 100644 --- a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerHadoopSQLMetadataObject.java +++ b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerHadoopSQLMetadataObject.java @@ -48,7 +48,7 @@ public MetadataObject.Type metadataObjectType() { public static Type fromMetadataType(MetadataObject.Type metadataType) { for (Type type : Type.values()) { - if (type.metadataObjectType() == metadataType) { + if (type.metadataType == metadataType) { return type; } } diff --git a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerHelper.java b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerHelper.java index 64c454de61a..1ec65daea22 100644 --- a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerHelper.java +++ b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerHelper.java @@ -20,13 +20,10 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Sets; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import org.apache.commons.lang.StringUtils; -import org.apache.gravitino.authorization.AuthorizationMetadataObject; import org.apache.gravitino.authorization.AuthorizationPrivilege; import org.apache.gravitino.authorization.AuthorizationSecurableObject; import org.apache.gravitino.authorization.Owner; @@ -37,7 +34,6 @@ import org.apache.ranger.plugin.model.RangerPolicy; import org.apache.ranger.plugin.model.RangerRole; import org.apache.ranger.plugin.util.GrantRevokeRoleRequest; -import org.apache.ranger.plugin.util.SearchFilter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -89,7 +85,7 @@ public RangerHelper( * @param policyItem The policy item to check * @throws AuthorizationPluginException If the policy item contains more than one access type */ - void checkPolicyItemAccess(RangerPolicy.RangerPolicyItem policyItem) + public static void checkPolicyItemAccess(RangerPolicy.RangerPolicyItem policyItem) throws AuthorizationPluginException { if (!isGravitinoManagedPolicyItemAccess(policyItem)) { return; @@ -169,94 +165,17 @@ void addPolicyItem( }); } - /** - * Find the managed policies for the ranger securable object. - * - * @param metadataNames The metadata object names to find the managed policy. - * @return The managed policy for the metadata object. - */ - public List wildcardSearchPolies(List metadataNames) - throws AuthorizationPluginException { - Map searchFilters = new HashMap<>(); - searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName); - for (int i = 0; i < metadataNames.size(); i++) { - searchFilters.put( - SearchFilter.RESOURCE_PREFIX + policyResourceDefines.get(i), metadataNames.get(i)); - } - - try { - List policies = rangerClient.findPolicies(searchFilters); - return policies; - } catch (RangerServiceException e) { - throw new AuthorizationPluginException(e, "Failed to find the policies in the Ranger"); - } - } - - /** - * Find the managed policy for the ranger securable object. - * - * @param AuthorizationMetadataObject The ranger securable object to find the managed policy. - * @return The managed policy for the metadata object. - */ - public RangerPolicy findManagedPolicy(AuthorizationMetadataObject AuthorizationMetadataObject) - throws AuthorizationPluginException { - List policies = wildcardSearchPolies(AuthorizationMetadataObject.names()); - if (!policies.isEmpty()) { - /** - * Because Ranger doesn't support the precise search, Ranger will return the policy meets the - * wildcard(*,?) conditions, If you use `db.table` condition to search policy, the Ranger will - * match `db1.table1`, `db1.table2`, `db*.table*`, So we need to manually precisely filter - * this research results. - */ - List nsMetadataObj = AuthorizationMetadataObject.names(); - Map preciseFilters = new HashMap<>(); - for (int i = 0; i < nsMetadataObj.size(); i++) { - preciseFilters.put(policyResourceDefines.get(i), nsMetadataObj.get(i)); - } - policies = - policies.stream() - .filter( - policy -> - policy.getResources().entrySet().stream() - .allMatch( - entry -> - preciseFilters.containsKey(entry.getKey()) - && entry.getValue().getValues().size() == 1 - && entry - .getValue() - .getValues() - .contains(preciseFilters.get(entry.getKey())))) - .collect(Collectors.toList()); - } - // Only return the policies that are managed by Gravitino. - if (policies.size() > 1) { - throw new AuthorizationPluginException("Each metadata object can have at most one policy."); - } - - if (policies.isEmpty()) { - return null; - } - - RangerPolicy policy = policies.get(0); - // Delegating Gravitino management policies cannot contain duplicate privilege - policy.getPolicyItems().forEach(this::checkPolicyItemAccess); - policy.getDenyPolicyItems().forEach(this::checkPolicyItemAccess); - policy.getRowFilterPolicyItems().forEach(this::checkPolicyItemAccess); - policy.getDataMaskPolicyItems().forEach(this::checkPolicyItemAccess); - - return policy; - } - - public boolean isGravitinoManagedPolicyItemAccess(RangerPolicy.RangerPolicyItem policyItem) { + public static boolean isGravitinoManagedPolicyItemAccess( + RangerPolicy.RangerPolicyItem policyItem) { return policyItem.getRoles().stream().anyMatch(role -> role.startsWith(GRAVITINO_ROLE_PREFIX)); } - public boolean hasGravitinoManagedPolicyItem(RangerPolicy policy) { + public static boolean hasGravitinoManagedPolicyItem(RangerPolicy policy) { List policyItems = policy.getPolicyItems(); policyItems.addAll(policy.getDenyPolicyItems()); policyItems.addAll(policy.getRowFilterPolicyItems()); policyItems.addAll(policy.getDataMaskPolicyItems()); - return policyItems.stream().anyMatch(this::isGravitinoManagedPolicyItemAccess); + return policyItems.stream().anyMatch(RangerHelper::isGravitinoManagedPolicyItemAccess); } public void removeAllGravitinoManagedPolicyItem(RangerPolicy policy) { @@ -277,7 +196,14 @@ public void removeAllGravitinoManagedPolicyItem(RangerPolicy policy) { policy.getDataMaskPolicyItems().stream() .filter(item -> !isGravitinoManagedPolicyItemAccess(item)) .collect(Collectors.toList())); - rangerClient.updatePolicy(policy.getId(), policy); + if (policy.getPolicyItems().isEmpty() + && policy.getDenyPolicyItems().isEmpty() + && policy.getRowFilterPolicyItems().isEmpty() + && policy.getDataMaskPolicyItems().isEmpty()) { + rangerClient.deletePolicy(policy.getId()); + } else { + rangerClient.updatePolicy(policy.getId(), policy); + } } catch (RangerServiceException e) { LOG.error("Failed to update the policy {}!", policy); throw new RuntimeException(e); @@ -383,7 +309,7 @@ protected void updatePolicyOwner(RangerPolicy policy, Owner preOwner, Owner newO }); }); }) - .filter(this::isGravitinoManagedPolicyItemAccess) + .filter(RangerHelper::isGravitinoManagedPolicyItemAccess) .collect(Collectors.toList()); // Add or remove the owner in the policy item matchPolicyItems.forEach( diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationHDFSPluginIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationHDFSPluginIT.java index 4606fa68e70..ecee8dd7940 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationHDFSPluginIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationHDFSPluginIT.java @@ -18,22 +18,28 @@ */ package org.apache.gravitino.authorization.ranger.integration.test; +import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import java.util.List; +import org.apache.gravitino.Entity; import org.apache.gravitino.MetadataObject; import org.apache.gravitino.MetadataObjects; -import org.apache.gravitino.authorization.AuthorizationMetadataObject; +import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.authorization.AuthorizationSecurableObject; +import org.apache.gravitino.authorization.AuthorizationUtils; import org.apache.gravitino.authorization.Privileges; import org.apache.gravitino.authorization.SecurableObject; import org.apache.gravitino.authorization.SecurableObjects; import org.apache.gravitino.authorization.common.PathBasedMetadataObject; +import org.apache.gravitino.authorization.common.PathBasedSecurableObject; import org.apache.gravitino.authorization.ranger.RangerAuthorizationPlugin; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; +import org.mockito.MockedStatic; +import org.mockito.Mockito; @Tag("gravitino-docker-test") public class RangerAuthorizationHDFSPluginIT { @@ -51,121 +57,198 @@ public static void cleanup() { RangerITEnv.cleanup(); } + public static void withMockedAuthorizationUtils(Runnable testCode) { + try (MockedStatic authzUtilsMockedStatic = + Mockito.mockStatic(AuthorizationUtils.class)) { + authzUtilsMockedStatic + .when( + () -> + AuthorizationUtils.getMetadataObjectLocation( + Mockito.any(NameIdentifier.class), Mockito.any(Entity.EntityType.class))) + .thenReturn(ImmutableList.of("/test")); + testCode.run(); + } + } + @Test public void testTranslateMetadataObject() { - MetadataObject metalake = - MetadataObjects.parse(String.format("metalake1"), MetadataObject.Type.METALAKE); - Assertions.assertEquals( - PathBasedMetadataObject.Type.PATH, - rangerAuthPlugin.translateMetadataObject(metalake).type()); - - MetadataObject catalog = - MetadataObjects.parse(String.format("catalog1"), MetadataObject.Type.CATALOG); - Assertions.assertEquals( - PathBasedMetadataObject.Type.PATH, - rangerAuthPlugin.translateMetadataObject(catalog).type()); - - MetadataObject schema = - MetadataObjects.parse(String.format("catalog1.schema1"), MetadataObject.Type.SCHEMA); - Assertions.assertEquals( - PathBasedMetadataObject.Type.PATH, rangerAuthPlugin.translateMetadataObject(schema).type()); - - MetadataObject table = - MetadataObjects.parse(String.format("catalog1.schema1.tab1"), MetadataObject.Type.TABLE); - Assertions.assertThrows( - IllegalArgumentException.class, () -> rangerAuthPlugin.translateMetadataObject(table)); - - MetadataObject fileset = - MetadataObjects.parse( - String.format("catalog1.schema1.fileset1"), MetadataObject.Type.FILESET); - AuthorizationMetadataObject rangerFileset = rangerAuthPlugin.translateMetadataObject(fileset); - Assertions.assertEquals(1, rangerFileset.names().size()); - Assertions.assertEquals("/test", rangerFileset.fullName()); - Assertions.assertEquals(PathBasedMetadataObject.Type.PATH, rangerFileset.type()); + withMockedAuthorizationUtils( + () -> { + MetadataObject metalake = + MetadataObjects.parse(String.format("metalake1"), MetadataObject.Type.METALAKE); + rangerAuthPlugin + .translateMetadataObject(metalake) + .forEach( + securableObject -> { + PathBasedMetadataObject pathBasedMetadataObject = + (PathBasedMetadataObject) securableObject; + Assertions.assertEquals( + metalake.fullName(), pathBasedMetadataObject.fullName()); + Assertions.assertEquals( + PathBasedMetadataObject.Type.PATH, pathBasedMetadataObject.type()); + Assertions.assertEquals("/test", pathBasedMetadataObject.path()); + }); + + MetadataObject catalog = + MetadataObjects.parse(String.format("catalog1"), MetadataObject.Type.CATALOG); + rangerAuthPlugin + .translateMetadataObject(catalog) + .forEach( + securableObject -> { + PathBasedMetadataObject pathBasedMetadataObject = + (PathBasedMetadataObject) securableObject; + Assertions.assertEquals(catalog.fullName(), pathBasedMetadataObject.fullName()); + Assertions.assertEquals( + PathBasedMetadataObject.Type.PATH, pathBasedMetadataObject.type()); + Assertions.assertEquals("/test", pathBasedMetadataObject.path()); + }); + + MetadataObject schema = + MetadataObjects.parse(String.format("catalog1.schema1"), MetadataObject.Type.SCHEMA); + rangerAuthPlugin + .translateMetadataObject(schema) + .forEach( + securableObject -> { + PathBasedMetadataObject pathBasedMetadataObject = + (PathBasedMetadataObject) securableObject; + Assertions.assertEquals(schema.fullName(), pathBasedMetadataObject.fullName()); + Assertions.assertEquals( + PathBasedMetadataObject.Type.PATH, pathBasedMetadataObject.type()); + Assertions.assertEquals("/test", pathBasedMetadataObject.path()); + }); + + MetadataObject table = + MetadataObjects.parse( + String.format("catalog1.schema1.tab1"), MetadataObject.Type.TABLE); + rangerAuthPlugin + .translateMetadataObject(table) + .forEach( + securableObject -> { + PathBasedMetadataObject pathBasedMetadataObject = + (PathBasedMetadataObject) securableObject; + Assertions.assertEquals(table.fullName(), pathBasedMetadataObject.fullName()); + Assertions.assertEquals( + PathBasedMetadataObject.Type.PATH, securableObject.type()); + Assertions.assertEquals("/test", pathBasedMetadataObject.path()); + }); + + MetadataObject fileset = + MetadataObjects.parse( + String.format("catalog1.schema1.fileset1"), MetadataObject.Type.FILESET); + rangerAuthPlugin + .translateMetadataObject(fileset) + .forEach( + securableObject -> { + PathBasedMetadataObject pathBasedMetadataObject = + (PathBasedMetadataObject) securableObject; + Assertions.assertEquals(fileset.fullName(), pathBasedMetadataObject.fullName()); + Assertions.assertEquals( + PathBasedMetadataObject.Type.PATH, securableObject.type()); + Assertions.assertEquals("/test", pathBasedMetadataObject.path()); + }); + }); } @Test public void testTranslatePrivilege() { - SecurableObject filesetInMetalake = - SecurableObjects.parse( - String.format("metalake1"), - MetadataObject.Type.METALAKE, - Lists.newArrayList( - Privileges.CreateFileset.allow(), - Privileges.ReadFileset.allow(), - Privileges.WriteFileset.allow())); - List filesetInMetalake1 = - rangerAuthPlugin.translatePrivilege(filesetInMetalake); - Assertions.assertEquals(0, filesetInMetalake1.size()); - - SecurableObject filesetInCatalog = - SecurableObjects.parse( - String.format("catalog1"), - MetadataObject.Type.CATALOG, - Lists.newArrayList( - Privileges.CreateFileset.allow(), - Privileges.ReadFileset.allow(), - Privileges.WriteFileset.allow())); - List filesetInCatalog1 = - rangerAuthPlugin.translatePrivilege(filesetInCatalog); - Assertions.assertEquals(0, filesetInCatalog1.size()); - - SecurableObject filesetInSchema = - SecurableObjects.parse( - String.format("catalog1.schema1"), - MetadataObject.Type.SCHEMA, - Lists.newArrayList( - Privileges.CreateFileset.allow(), - Privileges.ReadFileset.allow(), - Privileges.WriteFileset.allow())); - List filesetInSchema1 = - rangerAuthPlugin.translatePrivilege(filesetInSchema); - Assertions.assertEquals(0, filesetInSchema1.size()); - - SecurableObject filesetInFileset = - SecurableObjects.parse( - String.format("catalog1.schema1.fileset1"), - MetadataObject.Type.FILESET, - Lists.newArrayList( - Privileges.CreateFileset.allow(), - Privileges.ReadFileset.allow(), - Privileges.WriteFileset.allow())); - List filesetInFileset1 = - rangerAuthPlugin.translatePrivilege(filesetInFileset); - Assertions.assertEquals(2, filesetInFileset1.size()); - - filesetInFileset1.forEach( - securableObject -> { - Assertions.assertEquals(PathBasedMetadataObject.Type.PATH, securableObject.type()); - Assertions.assertEquals("/test", securableObject.fullName()); - Assertions.assertEquals(2, securableObject.privileges().size()); + withMockedAuthorizationUtils( + () -> { + SecurableObject filesetInMetalake = + SecurableObjects.parse( + String.format("metalake1"), + MetadataObject.Type.METALAKE, + Lists.newArrayList( + Privileges.CreateFileset.allow(), + Privileges.ReadFileset.allow(), + Privileges.WriteFileset.allow())); + List filesetInMetalake1 = + rangerAuthPlugin.translatePrivilege(filesetInMetalake); + Assertions.assertEquals(0, filesetInMetalake1.size()); + + SecurableObject filesetInCatalog = + SecurableObjects.parse( + String.format("catalog1"), + MetadataObject.Type.CATALOG, + Lists.newArrayList( + Privileges.CreateFileset.allow(), + Privileges.ReadFileset.allow(), + Privileges.WriteFileset.allow())); + List filesetInCatalog1 = + rangerAuthPlugin.translatePrivilege(filesetInCatalog); + Assertions.assertEquals(0, filesetInCatalog1.size()); + + SecurableObject filesetInSchema = + SecurableObjects.parse( + String.format("catalog1.schema1"), + MetadataObject.Type.SCHEMA, + Lists.newArrayList( + Privileges.CreateFileset.allow(), + Privileges.ReadFileset.allow(), + Privileges.WriteFileset.allow())); + List filesetInSchema1 = + rangerAuthPlugin.translatePrivilege(filesetInSchema); + Assertions.assertEquals(0, filesetInSchema1.size()); + + SecurableObject filesetInFileset = + SecurableObjects.parse( + String.format("catalog1.schema1.fileset1"), + MetadataObject.Type.FILESET, + Lists.newArrayList( + Privileges.CreateFileset.allow(), + Privileges.ReadFileset.allow(), + Privileges.WriteFileset.allow())); + List filesetInFileset1 = + rangerAuthPlugin.translatePrivilege(filesetInFileset); + Assertions.assertEquals(2, filesetInFileset1.size()); + + filesetInFileset1.forEach( + securableObject -> { + PathBasedSecurableObject pathBasedSecurableObject = + (PathBasedSecurableObject) securableObject; + Assertions.assertEquals( + PathBasedMetadataObject.Type.PATH, pathBasedSecurableObject.type()); + Assertions.assertEquals("/test", pathBasedSecurableObject.path()); + Assertions.assertEquals(2, pathBasedSecurableObject.privileges().size()); + }); }); } @Test public void testTranslateOwner() { - MetadataObject metalake = - MetadataObjects.parse(String.format("metalake1"), MetadataObject.Type.METALAKE); - List metalakeOwner = rangerAuthPlugin.translateOwner(metalake); - Assertions.assertEquals(0, metalakeOwner.size()); - - MetadataObject catalog = - MetadataObjects.parse(String.format("catalog1"), MetadataObject.Type.CATALOG); - List catalogOwner = rangerAuthPlugin.translateOwner(catalog); - Assertions.assertEquals(0, catalogOwner.size()); - - MetadataObject schema = - MetadataObjects.parse(String.format("catalog1.schema1"), MetadataObject.Type.SCHEMA); - List schemaOwner = rangerAuthPlugin.translateOwner(schema); - Assertions.assertEquals(0, schemaOwner.size()); - - MetadataObject fileset = - MetadataObjects.parse( - String.format("catalog1.schema1.fileset1"), MetadataObject.Type.FILESET); - List filesetOwner = rangerAuthPlugin.translateOwner(fileset); - Assertions.assertEquals(1, filesetOwner.size()); - Assertions.assertEquals("/test", filesetOwner.get(0).fullName()); - Assertions.assertEquals(PathBasedMetadataObject.Type.PATH, filesetOwner.get(0).type()); - Assertions.assertEquals(3, filesetOwner.get(0).privileges().size()); + withMockedAuthorizationUtils( + () -> { + MetadataObject metalake = + MetadataObjects.parse(String.format("metalake1"), MetadataObject.Type.METALAKE); + List metalakeOwner = + rangerAuthPlugin.translateOwner(metalake); + Assertions.assertEquals(0, metalakeOwner.size()); + + MetadataObject catalog = + MetadataObjects.parse(String.format("catalog1"), MetadataObject.Type.CATALOG); + List catalogOwner = + rangerAuthPlugin.translateOwner(catalog); + Assertions.assertEquals(0, catalogOwner.size()); + + MetadataObject schema = + MetadataObjects.parse(String.format("catalog1.schema1"), MetadataObject.Type.SCHEMA); + List schemaOwner = rangerAuthPlugin.translateOwner(schema); + Assertions.assertEquals(0, schemaOwner.size()); + + MetadataObject fileset = + MetadataObjects.parse( + String.format("catalog1.schema1.fileset1"), MetadataObject.Type.FILESET); + List filesetOwner = + rangerAuthPlugin.translateOwner(fileset); + filesetOwner.forEach( + authorizationSecurableObject -> { + PathBasedSecurableObject pathBasedSecurableObject = + (PathBasedSecurableObject) authorizationSecurableObject; + Assertions.assertEquals(1, filesetOwner.size()); + Assertions.assertEquals("/test", pathBasedSecurableObject.path()); + Assertions.assertEquals( + PathBasedMetadataObject.Type.PATH, pathBasedSecurableObject.type()); + Assertions.assertEquals(3, pathBasedSecurableObject.privileges().size()); + }); + }); } } diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java index 881d8f0ab44..41e91008c8e 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java @@ -58,32 +58,51 @@ public static void cleanup() { public void testTranslateMetadataObject() { MetadataObject metalake = MetadataObjects.parse(String.format("metalake1"), MetadataObject.Type.METALAKE); - AuthorizationMetadataObject rangerMetalake = rangerAuthPlugin.translateMetadataObject(metalake); - Assertions.assertEquals(1, rangerMetalake.names().size()); - Assertions.assertEquals(RangerHelper.RESOURCE_ALL, rangerMetalake.names().get(0)); - Assertions.assertEquals(RangerHadoopSQLMetadataObject.Type.SCHEMA, rangerMetalake.type()); + List rangerMetalake = + rangerAuthPlugin.translateMetadataObject(metalake); + rangerMetalake.stream() + .forEach( + authz -> { + Assertions.assertEquals(1, authz.names().size()); + Assertions.assertEquals(RangerHelper.RESOURCE_ALL, authz.names().get(0)); + Assertions.assertEquals(RangerHadoopSQLMetadataObject.Type.SCHEMA, authz.type()); + }); MetadataObject catalog = MetadataObjects.parse(String.format("catalog1"), MetadataObject.Type.CATALOG); - AuthorizationMetadataObject rangerCatalog = rangerAuthPlugin.translateMetadataObject(catalog); - Assertions.assertEquals(1, rangerCatalog.names().size()); - Assertions.assertEquals(RangerHelper.RESOURCE_ALL, rangerCatalog.names().get(0)); - Assertions.assertEquals(RangerHadoopSQLMetadataObject.Type.SCHEMA, rangerCatalog.type()); + List rangerCatalog = + rangerAuthPlugin.translateMetadataObject(catalog); + rangerCatalog.stream() + .forEach( + authz -> { + Assertions.assertEquals(1, authz.names().size()); + Assertions.assertEquals(RangerHelper.RESOURCE_ALL, authz.names().get(0)); + Assertions.assertEquals(RangerHadoopSQLMetadataObject.Type.SCHEMA, authz.type()); + }); MetadataObject schema = MetadataObjects.parse(String.format("catalog1.schema1"), MetadataObject.Type.SCHEMA); - AuthorizationMetadataObject rangerSchema = rangerAuthPlugin.translateMetadataObject(schema); - Assertions.assertEquals(1, rangerSchema.names().size()); - Assertions.assertEquals("schema1", rangerSchema.names().get(0)); - Assertions.assertEquals(RangerHadoopSQLMetadataObject.Type.SCHEMA, rangerSchema.type()); + List rangerSchema = + rangerAuthPlugin.translateMetadataObject(schema); + rangerSchema.stream() + .forEach( + authz -> { + Assertions.assertEquals(1, authz.names().size()); + Assertions.assertEquals("schema1", authz.names().get(0)); + Assertions.assertEquals(RangerHadoopSQLMetadataObject.Type.SCHEMA, authz.type()); + }); MetadataObject table = MetadataObjects.parse(String.format("catalog1.schema1.tab1"), MetadataObject.Type.TABLE); - AuthorizationMetadataObject rangerTable = rangerAuthPlugin.translateMetadataObject(table); - Assertions.assertEquals(2, rangerTable.names().size()); - Assertions.assertEquals("schema1", rangerTable.names().get(0)); - Assertions.assertEquals("tab1", rangerTable.names().get(1)); - Assertions.assertEquals(RangerHadoopSQLMetadataObject.Type.TABLE, rangerTable.type()); + List rangerTable = rangerAuthPlugin.translateMetadataObject(table); + rangerTable.stream() + .forEach( + authz -> { + Assertions.assertEquals(2, authz.names().size()); + Assertions.assertEquals("schema1", authz.names().get(0)); + Assertions.assertEquals("tab1", authz.names().get(1)); + Assertions.assertEquals(RangerHadoopSQLMetadataObject.Type.TABLE, authz.type()); + }); } @Test diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java index 919551bd922..2eb5d2b35eb 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java @@ -170,8 +170,6 @@ protected void createMetalake() { metalake = loadMetalake; } - public abstract void createCatalog(); - protected static void waitForUpdatingPolicies() { // After Ranger authorization, Must wait a period of time for the Ranger Spark plugin to update // the policy Sleep time must be greater than the policy update interval @@ -184,6 +182,10 @@ protected static void waitForUpdatingPolicies() { } } + protected abstract void createCatalog(); + + protected abstract String testUserName(); + protected abstract void checkTableAllPrivilegesExceptForCreating(); protected abstract void checkUpdateSQLWithReadWritePrivileges(); @@ -198,7 +200,7 @@ protected static void waitForUpdatingPolicies() { protected abstract void checkDeleteSQLWithWritePrivileges(); - protected abstract void useCatalog() throws InterruptedException; + protected abstract void useCatalog(); protected abstract void checkWithoutPrivileges(); @@ -206,7 +208,7 @@ protected static void waitForUpdatingPolicies() { // ISSUE-5947: can't rename a catalog or a metalake @Test - void testRenameMetalakeOrCatalog() { + protected void testRenameMetalakeOrCatalog() { Assertions.assertDoesNotThrow( () -> client.alterMetalake(metalakeName, MetalakeChange.rename("new_name"))); Assertions.assertDoesNotThrow( @@ -224,17 +226,28 @@ protected void testCreateSchema() throws InterruptedException, IOException { useCatalog(); // First, fail to create the schema - Assertions.assertThrows( - AccessControlException.class, () -> sparkSession.sql(SQL_CREATE_SCHEMA)); + Assertions.assertThrows(Exception.class, () -> sparkSession.sql(SQL_CREATE_SCHEMA)); + Exception accessControlException = + Assertions.assertThrows(Exception.class, () -> sparkSession.sql(SQL_CREATE_SCHEMA)); + Assertions.assertTrue( + accessControlException + .getMessage() + .contains( + String.format( + "Permission denied: user [%s] does not have [create] privilege", + testUserName())) + || accessControlException + .getMessage() + .contains( + String.format("Permission denied: user=%s, access=WRITE", testUserName()))); // Second, grant the `CREATE_SCHEMA` role - String userName1 = System.getenv(HADOOP_USER_NAME); String roleName = currentFunName(); SecurableObject securableObject = SecurableObjects.ofMetalake( metalakeName, Lists.newArrayList(Privileges.CreateSchema.allow())); metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); - metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); + metalake.grantRolesToUser(Lists.newArrayList(roleName), testUserName()); waitForUpdatingPolicies(); // Third, succeed to create the schema @@ -259,7 +272,7 @@ void testCreateTable() throws InterruptedException { SecurableObjects.ofMetalake( metalakeName, Lists.newArrayList(Privileges.UseSchema.allow(), Privileges.CreateSchema.allow())); - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.createRole( createSchemaRole, Collections.emptyMap(), Lists.newArrayList(securableObject)); metalake.grantRolesToUser(Lists.newArrayList(createSchemaRole), userName1); @@ -312,7 +325,7 @@ void testReadWriteTableWithMetalakeLevelRole() throws InterruptedException { Privileges.CreateTable.allow(), Privileges.SelectTable.allow(), Privileges.ModifyTable.allow())); - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.createRole(readWriteRole, Collections.emptyMap(), Lists.newArrayList(securableObject)); metalake.grantRolesToUser(Lists.newArrayList(readWriteRole), userName1); waitForUpdatingPolicies(); @@ -365,7 +378,7 @@ void testReadWriteTableWithTableLevelRole() throws InterruptedException { Privileges.UseSchema.allow(), Privileges.CreateSchema.allow(), Privileges.CreateTable.allow())); - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); waitForUpdatingPolicies(); @@ -430,7 +443,7 @@ void testReadOnlyTable() throws InterruptedException { Privileges.CreateSchema.allow(), Privileges.CreateTable.allow(), Privileges.SelectTable.allow())); - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.createRole(readOnlyRole, Collections.emptyMap(), Lists.newArrayList(securableObject)); metalake.grantRolesToUser(Lists.newArrayList(readOnlyRole), userName1); waitForUpdatingPolicies(); @@ -484,7 +497,7 @@ void testWriteOnlyTable() throws InterruptedException { Privileges.CreateSchema.allow(), Privileges.CreateTable.allow(), Privileges.ModifyTable.allow())); - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.createRole(writeOnlyRole, Collections.emptyMap(), Lists.newArrayList(securableObject)); metalake.grantRolesToUser(Lists.newArrayList(writeOnlyRole), userName1); waitForUpdatingPolicies(); @@ -555,7 +568,7 @@ void testCreateAllPrivilegesRole() throws InterruptedException { metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); // Granted this role to the spark execution user `HADOOP_USER_NAME` - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); waitForUpdatingPolicies(); @@ -591,7 +604,7 @@ void testDeleteAndRecreateRole() throws InterruptedException { metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); // Granted this role to the spark execution user `HADOOP_USER_NAME` - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); waitForUpdatingPolicies(); @@ -637,7 +650,7 @@ void testDeleteAndRecreateMetadataObject() throws InterruptedException { metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); // Granted this role to the spark execution user `HADOOP_USER_NAME` - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); waitForUpdatingPolicies(); @@ -696,7 +709,7 @@ void testRenameMetadataObject() throws InterruptedException { Privileges.ModifyTable.allow())); metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); // Granted this role to the spark execution user `HADOOP_USER_NAME` - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); waitForUpdatingPolicies(); @@ -734,7 +747,7 @@ void testRenameMetadataObjectPrivilege() throws InterruptedException { Privileges.ModifyTable.allow())); metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); // Granted this role to the spark execution user `HADOOP_USER_NAME` - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); waitForUpdatingPolicies(); @@ -780,7 +793,7 @@ void testChangeOwner() throws InterruptedException { Privileges.CreateSchema.allow(), Privileges.CreateTable.allow(), Privileges.ModifyTable.allow())); - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.createRole(helperRole, Collections.emptyMap(), Lists.newArrayList(securableObject)); metalake.grantRolesToUser(Lists.newArrayList(helperRole), userName1); waitForUpdatingPolicies(); @@ -880,7 +893,7 @@ void testChangeOwner() throws InterruptedException { } @Test - void testAllowUseSchemaPrivilege() throws InterruptedException { + protected void testAllowUseSchemaPrivilege() throws InterruptedException { // Choose a catalog useCatalog(); @@ -894,7 +907,7 @@ void testAllowUseSchemaPrivilege() throws InterruptedException { metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject)); // Granted this role to the spark execution user `HADOOP_USER_NAME` - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); waitForUpdatingPolicies(); @@ -967,7 +980,7 @@ void testDenyPrivileges() throws InterruptedException { roleName, Collections.emptyMap(), Lists.newArrayList(allowObject, denyObject)); // Granted this role to the spark execution user `HADOOP_USER_NAME` - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); waitForUpdatingPolicies(); @@ -993,7 +1006,7 @@ void testDenyPrivileges() throws InterruptedException { roleName, Collections.emptyMap(), Lists.newArrayList(allowObject, denyObject)); // Granted this role to the spark execution user `HADOOP_USER_NAME` - userName1 = System.getenv(HADOOP_USER_NAME); + userName1 = testUserName(); metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); waitForUpdatingPolicies(); @@ -1027,7 +1040,7 @@ void testGrantPrivilegesForMetalake() throws InterruptedException { AccessControlException.class, () -> sparkSession.sql(SQL_CREATE_SCHEMA)); // Granted this role to the spark execution user `HADOOP_USER_NAME` - String userName1 = System.getenv(HADOOP_USER_NAME); + String userName1 = testUserName(); metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1); waitForUpdatingPolicies(); diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerFilesetIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerFilesetIT.java index ab74b0449ae..fd93b988d04 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerFilesetIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerFilesetIT.java @@ -373,8 +373,7 @@ void testReadWritePath() throws IOException, RangerServiceException { catalog.asFilesetCatalog().dropFileset(NameIdentifier.of(schemaName, fileset.name())); policies = rangerClient.getPoliciesInService(RangerITEnv.RANGER_HDFS_REPO_NAME); - Assertions.assertEquals(1, policies.size()); - Assertions.assertEquals(3, policies.get(0).getPolicyItems().size()); + Assertions.assertEquals(0, policies.size()); } @Test diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java index 56cec3e9da3..20ba909c1d6 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java @@ -110,10 +110,15 @@ public void stop() { } @Override - protected void useCatalog() throws InterruptedException { + protected void useCatalog() { // Do nothing, default catalog is ok for Hive. } + @Override + protected String testUserName() { + return System.getenv(HADOOP_USER_NAME); + } + @Override protected void checkWithoutPrivileges() { Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_INSERT_TABLE)); diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java index 9545f243dd3..6a32ab9ea2a 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveIT.java @@ -343,18 +343,19 @@ public void testFindManagedPolicy() { AuthorizationSecurableObject rangerSecurableObject = rangerAuthHivePlugin.generateAuthorizationSecurableObject( ImmutableList.of(String.format("%s3", dbName), "tab1"), + "", RangerHadoopSQLMetadataObject.Type.TABLE, ImmutableSet.of( new RangerPrivileges.RangerHivePrivilegeImpl( RangerPrivileges.RangerHadoopSQLPrivilege.ALL, Privilege.Condition.ALLOW))); - Assertions.assertNull(rangerHelper.findManagedPolicy(rangerSecurableObject)); + Assertions.assertNull(rangerAuthHivePlugin.findManagedPolicy(rangerSecurableObject)); // Add a policy for `db3.tab1` createHivePolicy( Lists.newArrayList(String.format("%s3", dbName), "tab1"), GravitinoITUtils.genRandomName(currentFunName())); // findManagedPolicy function use precise search, so return not null - Assertions.assertNotNull(rangerHelper.findManagedPolicy(rangerSecurableObject)); + Assertions.assertNotNull(rangerAuthHivePlugin.findManagedPolicy(rangerSecurableObject)); } @Test @@ -461,7 +462,7 @@ static void createHivePolicy(List metaObjects, String roleName) { } static boolean deleteHivePolicy(RangerHadoopSQLSecurableObject rangerSecurableObject) { - RangerPolicy policy = rangerHelper.findManagedPolicy(rangerSecurableObject); + RangerPolicy policy = rangerAuthHivePlugin.findManagedPolicy(rangerSecurableObject); if (policy != null) { try { RangerITEnv.rangerClient.deletePolicy(policy.getId()); @@ -890,18 +891,21 @@ void metadataObjectChangeRemoveMetalakeOrCatalog(String funcName, MetadataObject .build(); Assertions.assertTrue(rangerAuthHivePlugin.onRoleCreated(role)); assertFindManagedPolicyItems(role, true); + Assertions.assertEquals( + 6, rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME).size()); Assertions.assertTrue( - rangerAuthHivePlugin.onMetadataUpdated(MetadataObjectChange.remove(metadataObject))); + rangerAuthHivePlugin.onMetadataUpdated( + MetadataObjectChange.remove(metadataObject, ImmutableList.of()))); assertFindManagedPolicyItems(role, false); Assertions.assertEquals( - 6, rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME).size()); + 4, rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME).size()); rangerClient .getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME) .forEach( policy -> { - Assertions.assertFalse(rangerHelper.hasGravitinoManagedPolicyItem(policy)); + Assertions.assertFalse(RangerHelper.hasGravitinoManagedPolicyItem(policy)); }); } @@ -937,7 +941,8 @@ public void testMetadataObjectChangeRemoveSchema() throws RangerServiceException 4, rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME).size()); Assertions.assertTrue( - rangerAuthHivePlugin.onMetadataUpdated(MetadataObjectChange.remove(schemaObject))); + rangerAuthHivePlugin.onMetadataUpdated( + MetadataObjectChange.remove(schemaObject, ImmutableList.of()))); RoleEntity roleVerify = RoleEntity.builder() .withId(1L) @@ -947,7 +952,13 @@ public void testMetadataObjectChangeRemoveSchema() throws RangerServiceException .build(); assertFindManagedPolicyItems(roleVerify, false); Assertions.assertEquals( - 4, rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME).size()); + 2, rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME).size()); + + Assertions.assertTrue( + rangerAuthHivePlugin.onMetadataUpdated( + MetadataObjectChange.remove(tableObject, ImmutableList.of()))); + Assertions.assertEquals( + 0, rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME).size()); } @Test @@ -980,7 +991,8 @@ public void testMetadataObjectChangeRemoveTable() throws RangerServiceException assertFindManagedPolicyItems(role, true); Assertions.assertTrue( - rangerAuthHivePlugin.onMetadataUpdated(MetadataObjectChange.remove(tableObject))); + rangerAuthHivePlugin.onMetadataUpdated( + MetadataObjectChange.remove(tableObject, ImmutableList.of()))); RoleEntity verifyScheamRole = RoleEntity.builder() .withId(1L) @@ -998,7 +1010,7 @@ public void testMetadataObjectChangeRemoveTable() throws RangerServiceException assertFindManagedPolicyItems(verifyScheamRole, true); assertFindManagedPolicyItems(verifyTableRole, false); Assertions.assertEquals( - 4, rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME).size()); + 2, rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME).size()); } @Test @@ -1247,7 +1259,7 @@ private List findRoleResourceRelatedPolicies(Role role) { .map( rangerSecurableObject -> { LOG.info("rangerSecurableObject: " + rangerSecurableObject); - return rangerHelper.findManagedPolicy(rangerSecurableObject); + return rangerAuthHivePlugin.findManagedPolicy(rangerSecurableObject); })) .filter(Objects::nonNull) .collect(Collectors.toList()); diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java index 913482ef03e..1cbf076c124 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerITEnv.java @@ -19,7 +19,6 @@ package org.apache.gravitino.authorization.ranger.integration.test; import static org.apache.gravitino.integration.test.container.RangerContainer.RANGER_SERVER_PORT; -import static org.mockito.Mockito.doReturn; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -64,7 +63,7 @@ public class RangerITEnv { private static final String RANGER_HIVE_TYPE = "hive"; public static final String RANGER_HDFS_REPO_NAME = "hdfsDev"; private static final String RANGER_HDFS_TYPE = "hdfs"; - protected static RangerClient rangerClient; + public static RangerClient rangerClient; public static final String HADOOP_USER_NAME = "gravitino"; private static volatile boolean initRangerService = false; private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); @@ -139,7 +138,6 @@ public static void init(String metalakeName, boolean allowAnyoneAccessHDFS) { "HDFS", RangerAuthorizationProperties.RANGER_SERVICE_NAME, RangerITEnv.RANGER_HDFS_REPO_NAME))); - doReturn("/test").when(spyRangerAuthorizationHDFSPlugin).getLocationPath(Mockito.any()); rangerAuthHDFSPlugin = spyRangerAuthorizationHDFSPlugin; rangerHelper = diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java index 3e3d0d24234..68b4b7e42ba 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java @@ -107,37 +107,40 @@ public void stop() { } @Override - protected void checkUpdateSQLWithReadWritePrivileges() { + protected String testUserName() { + return System.getenv(HADOOP_USER_NAME); + } + + public void checkUpdateSQLWithReadWritePrivileges() { sparkSession.sql(SQL_UPDATE_TABLE); } @Override - protected void checkUpdateSQLWithReadPrivileges() { + public void checkUpdateSQLWithReadPrivileges() { Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_UPDATE_TABLE)); } @Override - protected void checkUpdateSQLWithWritePrivileges() { + public void checkUpdateSQLWithWritePrivileges() { Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_UPDATE_TABLE)); } @Override - protected void checkDeleteSQLWithReadWritePrivileges() { + public void checkDeleteSQLWithReadWritePrivileges() { sparkSession.sql(SQL_DELETE_TABLE); } @Override - protected void checkDeleteSQLWithReadPrivileges() { + public void checkDeleteSQLWithReadPrivileges() { Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_DELETE_TABLE)); } @Override - protected void checkDeleteSQLWithWritePrivileges() { + public void checkDeleteSQLWithWritePrivileges() { Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_DELETE_TABLE)); } - @Override - protected void checkWithoutPrivileges() { + public void checkWithoutPrivileges() { Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_INSERT_TABLE)); Assertions.assertThrows( AccessControlException.class, () -> sparkSession.sql(SQL_SELECT_TABLE).collectAsList()); @@ -151,8 +154,7 @@ protected void checkWithoutPrivileges() { Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_UPDATE_TABLE)); } - @Override - protected void testAlterTable() { + public void testAlterTable() { sparkSession.sql(SQL_ALTER_TABLE); sparkSession.sql(SQL_ALTER_TABLE_BACK); } @@ -183,8 +185,7 @@ public void createCatalog() { LOG.info("Catalog created: {}", catalog); } - @Override - protected void useCatalog() throws InterruptedException { + public void useCatalog() { String userName1 = System.getenv(HADOOP_USER_NAME); String roleName = currentFunName(); SecurableObject securableObject = @@ -198,7 +199,7 @@ protected void useCatalog() throws InterruptedException { waitForUpdatingPolicies(); } - protected void checkTableAllPrivilegesExceptForCreating() { + public void checkTableAllPrivilegesExceptForCreating() { // - a. Succeed to insert data into the table sparkSession.sql(SQL_INSERT_TABLE); diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java index c37fd20c85f..33ba6fbe770 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java @@ -110,7 +110,12 @@ public void stop() { } @Override - protected void useCatalog() throws InterruptedException { + protected String testUserName() { + return System.getenv(HADOOP_USER_NAME); + } + + @Override + protected void useCatalog() { String userName1 = System.getenv(HADOOP_USER_NAME); String roleName = currentFunName(); SecurableObject securableObject = diff --git a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/CatalogHiveIT.java b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/CatalogHiveIT.java index 7d8079d1ede..7f8d433d141 100644 --- a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/CatalogHiveIT.java +++ b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/CatalogHiveIT.java @@ -229,7 +229,14 @@ public void stop() throws IOException { catalog.asSchemas().dropSchema(schema, true); })); Arrays.stream(metalake.listCatalogs()) - .forEach((catalogName -> metalake.dropCatalog(catalogName, true))); + .forEach( + catalogName -> { + try { + metalake.dropCatalog(catalogName, true); + } catch (Exception e) { + // Ignore exception + } + }); client.dropMetalake(metalakeName, true); } if (hiveClientPool != null) { diff --git a/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/CatalogPostgreSqlIT.java b/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/CatalogPostgreSqlIT.java index 25f99c797c4..63132f400f9 100644 --- a/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/CatalogPostgreSqlIT.java +++ b/catalogs/catalog-jdbc-postgresql/src/test/java/org/apache/gravitino/catalog/postgresql/integration/test/CatalogPostgreSqlIT.java @@ -682,8 +682,7 @@ void testCreateAndLoadSchema() throws SQLException { Assertions.assertTrue(catalog.asSchemas().dropSchema(testSchemaName2, false)); createSchema(testSchemaName2); SupportsSchemas schemaOps = newCatalog.asSchemas(); - Assertions.assertThrows( - UnsupportedOperationException.class, () -> schemaOps.loadSchema(testSchemaName2)); + Assertions.assertDoesNotThrow(() -> schemaOps.loadSchema(testSchemaName2)); // recovered by re-build the catalog Assertions.assertTrue(metalake.dropCatalog(newCatalogName, true)); newCatalog = createCatalog(newCatalogName); diff --git a/core/build.gradle.kts b/core/build.gradle.kts index a0468168b69..3ca446a51c1 100644 --- a/core/build.gradle.kts +++ b/core/build.gradle.kts @@ -56,6 +56,7 @@ dependencies { testImplementation(libs.junit.jupiter.api) testImplementation(libs.junit.jupiter.params) testImplementation(libs.mockito.core) + testImplementation(libs.mockito.inline) testImplementation(libs.mysql.driver) testImplementation(libs.postgresql.driver) testImplementation(libs.testcontainers) diff --git a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationPrivilegesMappingProvider.java b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationPrivilegesMappingProvider.java index 218de26046e..8c70b2911ca 100644 --- a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationPrivilegesMappingProvider.java +++ b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationPrivilegesMappingProvider.java @@ -77,7 +77,7 @@ public interface AuthorizationPrivilegesMappingProvider { * Translate the Gravitino metadata object to the underlying data source metadata object. * * @param metadataObject The Gravitino metadata object. - * @return The underlying data source metadata object. + * @return The underlying data source metadata object list. */ - AuthorizationMetadataObject translateMetadataObject(MetadataObject metadataObject); + List translateMetadataObject(MetadataObject metadataObject); } diff --git a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java index 0e236b72635..72f12d401af 100644 --- a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java +++ b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java @@ -18,36 +18,49 @@ */ package org.apache.gravitino.authorization; +import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Sets; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.regex.Pattern; +import java.util.stream.Collectors; import org.apache.gravitino.Catalog; import org.apache.gravitino.Entity; import org.apache.gravitino.GravitinoEnv; import org.apache.gravitino.MetadataObject; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; +import org.apache.gravitino.Schema; import org.apache.gravitino.catalog.CatalogManager; +import org.apache.gravitino.catalog.FilesetDispatcher; +import org.apache.gravitino.catalog.hive.HiveConstants; import org.apache.gravitino.connector.BaseCatalog; import org.apache.gravitino.connector.authorization.AuthorizationPlugin; import org.apache.gravitino.dto.authorization.PrivilegeDTO; import org.apache.gravitino.dto.util.DTOConverters; +import org.apache.gravitino.exceptions.AuthorizationPluginException; import org.apache.gravitino.exceptions.ForbiddenException; import org.apache.gravitino.exceptions.IllegalPrivilegeException; import org.apache.gravitino.exceptions.NoSuchCatalogException; import org.apache.gravitino.exceptions.NoSuchMetadataObjectException; import org.apache.gravitino.exceptions.NoSuchUserException; +import org.apache.gravitino.file.Fileset; import org.apache.gravitino.meta.RoleEntity; +import org.apache.gravitino.rel.Table; import org.apache.gravitino.utils.MetadataObjectUtil; import org.apache.gravitino.utils.NameIdentifierUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /* The utilization class of authorization module*/ public class AuthorizationUtils { - + private static final Logger LOG = LoggerFactory.getLogger(AuthorizationUtils.class); static final String USER_DOES_NOT_EXIST_MSG = "User %s does not exist in the metalake %s"; static final String GROUP_DOES_NOT_EXIST_MSG = "Group %s does not exist in the metalake %s"; static final String ROLE_DOES_NOT_EXIST_MSG = "Role %s does not exist in the metalake %s"; @@ -61,6 +74,7 @@ public class AuthorizationUtils { private static final Set TOPIC_PRIVILEGES = Sets.immutableEnumSet( Privilege.Name.CREATE_TOPIC, Privilege.Name.PRODUCE_TOPIC, Privilege.Name.CONSUME_TOPIC); + private static final Pattern HDFS_PATTERN = Pattern.compile("^hdfs://[^/]*"); private AuthorizationUtils() {} @@ -249,12 +263,12 @@ public static void checkPrivilege( } public static void authorizationPluginRemovePrivileges( - NameIdentifier ident, Entity.EntityType type) { + NameIdentifier ident, Entity.EntityType type, List locations) { // If we enable authorization, we should remove the privileges about the entity in the // authorization plugin. if (GravitinoEnv.getInstance().accessControlDispatcher() != null) { MetadataObject metadataObject = NameIdentifierUtil.toMetadataObject(ident, type); - MetadataObjectChange removeObject = MetadataObjectChange.remove(metadataObject); + MetadataObjectChange removeObject = MetadataObjectChange.remove(metadataObject, locations); callAuthorizationPluginForMetadataObject( ident.namespace().level(0), metadataObject, @@ -364,4 +378,129 @@ private static void checkCatalogType( catalogIdent, catalog.type(), privilege); } } + + public static List getMetadataObjectLocation( + NameIdentifier ident, Entity.EntityType type) { + List locations = new ArrayList<>(); + MetadataObject metadataObject; + try { + metadataObject = NameIdentifierUtil.toMetadataObject(ident, type); + } catch (IllegalArgumentException e) { + LOG.warn("Illegal argument exception for metadata object %s type %s", ident, type, e); + return locations; + } + + String metalake = + (type == Entity.EntityType.METALAKE ? ident.name() : ident.namespace().level(0)); + try { + switch (metadataObject.type()) { + case METALAKE: + { + NameIdentifier[] identifiers = + GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake)); + List finalLocationPath = locations; + Arrays.stream(identifiers) + .collect(Collectors.toList()) + .forEach( + identifier -> { + Catalog catalogObj = + GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(identifier); + if (catalogObj.provider().equals("hive")) { + Schema schema = + GravitinoEnv.getInstance() + .schemaDispatcher() + .loadSchema( + NameIdentifier.of( + metalake, + catalogObj.name(), + "default" /*Hive default schema*/)); + if (schema.properties().containsKey(HiveConstants.LOCATION)) { + String defaultSchemaLocation = + schema.properties().get(HiveConstants.LOCATION); + Preconditions.checkArgument( + defaultSchemaLocation != null, + String.format("Catalog %s location is not found", ident)); + String location = + HDFS_PATTERN.matcher(defaultSchemaLocation).replaceAll(""); + finalLocationPath.add(location); + } + } + }); + } + break; + case CATALOG: + { + Catalog catalogObj = GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(ident); + if (catalogObj.provider().equals("hive")) { + Schema schema = + GravitinoEnv.getInstance() + .schemaDispatcher() + .loadSchema( + NameIdentifier.of( + metalake, catalogObj.name(), "default" /*Hive default schema*/)); + if (schema.properties().containsKey(HiveConstants.LOCATION)) { + String defaultSchemaLocation = schema.properties().get(HiveConstants.LOCATION); + Preconditions.checkArgument( + defaultSchemaLocation != null, + String.format("Catalog %s location is not found", ident)); + String location = HDFS_PATTERN.matcher(defaultSchemaLocation).replaceAll(""); + locations.add(location); + } + } + } + break; + case SCHEMA: + { + Schema schema = GravitinoEnv.getInstance().schemaDispatcher().loadSchema(ident); + if (schema.properties().containsKey(HiveConstants.LOCATION)) { + String schemaLocation = schema.properties().get(HiveConstants.LOCATION); + Preconditions.checkArgument( + schemaLocation != null, String.format("Schema %s location is not found", ident)); + String location = HDFS_PATTERN.matcher(schemaLocation).replaceAll(""); + locations.add(location); + } + } + break; + case TABLE: + { + Table table = GravitinoEnv.getInstance().tableDispatcher().loadTable(ident); + if (table.properties().containsKey(HiveConstants.LOCATION)) { + String schemaLocation = table.properties().get(HiveConstants.LOCATION); + Preconditions.checkArgument( + schemaLocation != null, String.format("Table %s location is not found", ident)); + String location = HDFS_PATTERN.matcher(schemaLocation).replaceAll(""); + locations.add(location); + } + } + break; + case FILESET: + FilesetDispatcher filesetDispatcher = GravitinoEnv.getInstance().filesetDispatcher(); + NameIdentifier identifier = getObjectNameIdentifier(metalake, metadataObject); + Fileset fileset = filesetDispatcher.loadFileset(identifier); + Preconditions.checkArgument( + fileset != null, String.format("Fileset %s is not found", identifier)); + String filesetLocation = fileset.storageLocation(); + Preconditions.checkArgument( + filesetLocation != null, + String.format("Fileset %s location is not found", identifier)); + locations.add(HDFS_PATTERN.matcher(filesetLocation).replaceAll("")); + break; + case TOPIC: + break; + default: + throw new AuthorizationPluginException( + "The metadata object type %s is not supported get location paths.", + metadataObject.type()); + } + } catch (Exception e) { + LOG.warn("Failed to get location paths for metadata object %s type %s", ident, type, e); + } + + return locations; + } + + private static NameIdentifier getObjectNameIdentifier( + String metalake, MetadataObject metadataObject) { + return NameIdentifier.parse(String.format("%s.%s", metalake, metadataObject.fullName())); + } } diff --git a/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java b/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java index 1e9c1d9d94f..a3d55d4a72b 100644 --- a/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java +++ b/core/src/main/java/org/apache/gravitino/catalog/CatalogManager.java @@ -745,6 +745,8 @@ private boolean containsUserCreatedSchemas( // PostgreSQL catalog includes the "public" schema, see // https://github.com/apache/gravitino/issues/2314 return !schemaEntities.get(0).name().equals("public"); + } else if ("hive".equals(catalogEntity.getProvider())) { + return !schemaEntities.get(0).name().equals("default"); } } diff --git a/core/src/main/java/org/apache/gravitino/hook/CatalogHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/CatalogHookDispatcher.java index efc6e2f4cbd..65b722fdf51 100644 --- a/core/src/main/java/org/apache/gravitino/hook/CatalogHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/CatalogHookDispatcher.java @@ -18,6 +18,7 @@ */ package org.apache.gravitino.hook; +import java.util.List; import java.util.Map; import org.apache.gravitino.Catalog; import org.apache.gravitino.CatalogChange; @@ -126,7 +127,10 @@ public boolean dropCatalog(NameIdentifier ident) { @Override public boolean dropCatalog(NameIdentifier ident, boolean force) throws NonEmptyEntityException, CatalogInUseException { - AuthorizationUtils.authorizationPluginRemovePrivileges(ident, Entity.EntityType.CATALOG); + List locations = + AuthorizationUtils.getMetadataObjectLocation(ident, Entity.EntityType.CATALOG); + AuthorizationUtils.authorizationPluginRemovePrivileges( + ident, Entity.EntityType.CATALOG, locations); return dispatcher.dropCatalog(ident, force); } diff --git a/core/src/main/java/org/apache/gravitino/hook/FilesetHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/FilesetHookDispatcher.java index 40d0cc5ec29..a1e19f9cfab 100644 --- a/core/src/main/java/org/apache/gravitino/hook/FilesetHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/FilesetHookDispatcher.java @@ -18,6 +18,7 @@ */ package org.apache.gravitino.hook; +import java.util.List; import java.util.Map; import org.apache.gravitino.Entity; import org.apache.gravitino.GravitinoEnv; @@ -103,8 +104,11 @@ public Fileset alterFileset(NameIdentifier ident, FilesetChange... changes) @Override public boolean dropFileset(NameIdentifier ident) { + List locations = + AuthorizationUtils.getMetadataObjectLocation(ident, Entity.EntityType.FILESET); boolean dropped = dispatcher.dropFileset(ident); - AuthorizationUtils.authorizationPluginRemovePrivileges(ident, Entity.EntityType.FILESET); + AuthorizationUtils.authorizationPluginRemovePrivileges( + ident, Entity.EntityType.FILESET, locations); return dropped; } diff --git a/core/src/main/java/org/apache/gravitino/hook/SchemaHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/SchemaHookDispatcher.java index e6e1a373654..df0925db2a1 100644 --- a/core/src/main/java/org/apache/gravitino/hook/SchemaHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/SchemaHookDispatcher.java @@ -18,6 +18,7 @@ */ package org.apache.gravitino.hook; +import java.util.List; import java.util.Map; import org.apache.gravitino.Entity; import org.apache.gravitino.GravitinoEnv; @@ -89,8 +90,11 @@ public Schema alterSchema(NameIdentifier ident, SchemaChange... changes) @Override public boolean dropSchema(NameIdentifier ident, boolean cascade) throws NonEmptySchemaException { + List locations = + AuthorizationUtils.getMetadataObjectLocation(ident, Entity.EntityType.SCHEMA); boolean dropped = dispatcher.dropSchema(ident, cascade); - AuthorizationUtils.authorizationPluginRemovePrivileges(ident, Entity.EntityType.SCHEMA); + AuthorizationUtils.authorizationPluginRemovePrivileges( + ident, Entity.EntityType.SCHEMA, locations); return dropped; } diff --git a/core/src/main/java/org/apache/gravitino/hook/TableHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/TableHookDispatcher.java index 1fe9db5d737..903f3d15343 100644 --- a/core/src/main/java/org/apache/gravitino/hook/TableHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/TableHookDispatcher.java @@ -18,6 +18,7 @@ */ package org.apache.gravitino.hook; +import java.util.List; import java.util.Map; import org.apache.gravitino.Entity; import org.apache.gravitino.GravitinoEnv; @@ -115,15 +116,21 @@ public Table alterTable(NameIdentifier ident, TableChange... changes) @Override public boolean dropTable(NameIdentifier ident) { + List locations = + AuthorizationUtils.getMetadataObjectLocation(ident, Entity.EntityType.TABLE); boolean dropped = dispatcher.dropTable(ident); - AuthorizationUtils.authorizationPluginRemovePrivileges(ident, Entity.EntityType.TABLE); + AuthorizationUtils.authorizationPluginRemovePrivileges( + ident, Entity.EntityType.TABLE, locations); return dropped; } @Override public boolean purgeTable(NameIdentifier ident) throws UnsupportedOperationException { + List locations = + AuthorizationUtils.getMetadataObjectLocation(ident, Entity.EntityType.TABLE); boolean purged = dispatcher.purgeTable(ident); - AuthorizationUtils.authorizationPluginRemovePrivileges(ident, Entity.EntityType.TABLE); + AuthorizationUtils.authorizationPluginRemovePrivileges( + ident, Entity.EntityType.TABLE, locations); return purged; } diff --git a/core/src/main/java/org/apache/gravitino/hook/TopicHookDispatcher.java b/core/src/main/java/org/apache/gravitino/hook/TopicHookDispatcher.java index bc0caeb3d19..546eede8b9e 100644 --- a/core/src/main/java/org/apache/gravitino/hook/TopicHookDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/hook/TopicHookDispatcher.java @@ -18,6 +18,7 @@ */ package org.apache.gravitino.hook; +import java.util.List; import java.util.Map; import org.apache.gravitino.Entity; import org.apache.gravitino.GravitinoEnv; @@ -88,8 +89,11 @@ public Topic alterTopic(NameIdentifier ident, TopicChange... changes) @Override public boolean dropTopic(NameIdentifier ident) { + List locations = + AuthorizationUtils.getMetadataObjectLocation(ident, Entity.EntityType.TOPIC); boolean dropped = dispatcher.dropTopic(ident); - AuthorizationUtils.authorizationPluginRemovePrivileges(ident, Entity.EntityType.TOPIC); + AuthorizationUtils.authorizationPluginRemovePrivileges( + ident, Entity.EntityType.TOPIC, locations); return dropped; } diff --git a/core/src/test/java/org/apache/gravitino/catalog/TestOperationDispatcher.java b/core/src/test/java/org/apache/gravitino/catalog/TestOperationDispatcher.java index 72415888c61..45e821515d3 100644 --- a/core/src/test/java/org/apache/gravitino/catalog/TestOperationDispatcher.java +++ b/core/src/test/java/org/apache/gravitino/catalog/TestOperationDispatcher.java @@ -25,6 +25,7 @@ import static org.mockito.Mockito.reset; import static org.mockito.Mockito.spy; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import java.io.IOException; import java.time.Instant; @@ -32,9 +33,11 @@ import org.apache.gravitino.Catalog; import org.apache.gravitino.Config; import org.apache.gravitino.Configs; +import org.apache.gravitino.Entity; import org.apache.gravitino.EntityStore; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.StringIdentifier; +import org.apache.gravitino.authorization.AuthorizationUtils; import org.apache.gravitino.exceptions.IllegalNamespaceException; import org.apache.gravitino.meta.AuditInfo; import org.apache.gravitino.meta.BaseMetalake; @@ -48,6 +51,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.function.Executable; +import org.mockito.MockedStatic; +import org.mockito.Mockito; public abstract class TestOperationDispatcher { @@ -139,4 +144,17 @@ void testPropertyException(Executable operation, String... errorMessage) { Assertions.assertTrue(exception.getMessage().contains(msg)); } } + + public static void withMockedAuthorizationUtils(Runnable testCode) { + try (MockedStatic authzUtilsMockedStatic = + Mockito.mockStatic(AuthorizationUtils.class)) { + authzUtilsMockedStatic + .when( + () -> + AuthorizationUtils.getMetadataObjectLocation( + Mockito.any(NameIdentifier.class), Mockito.any(Entity.EntityType.class))) + .thenReturn(ImmutableList.of("/test")); + testCode.run(); + } + } } diff --git a/core/src/test/java/org/apache/gravitino/hook/TestFilesetHookDispatcher.java b/core/src/test/java/org/apache/gravitino/hook/TestFilesetHookDispatcher.java index 63475ab0596..501381ae6b9 100644 --- a/core/src/test/java/org/apache/gravitino/hook/TestFilesetHookDispatcher.java +++ b/core/src/test/java/org/apache/gravitino/hook/TestFilesetHookDispatcher.java @@ -18,12 +18,28 @@ */ package org.apache.gravitino.hook; +import static org.apache.gravitino.Configs.CATALOG_CACHE_EVICTION_INTERVAL_MS; +import static org.apache.gravitino.Configs.DEFAULT_ENTITY_RELATIONAL_STORE; +import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_DRIVER; +import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_URL; +import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_STORE; +import static org.apache.gravitino.Configs.ENTITY_STORE; +import static org.apache.gravitino.Configs.RELATIONAL_ENTITY_STORE; +import static org.apache.gravitino.Configs.SERVICE_ADMINS; +import static org.apache.gravitino.Configs.STORE_DELETE_AFTER_TIME; +import static org.apache.gravitino.Configs.STORE_TRANSACTION_MAX_SKEW_TIME; +import static org.apache.gravitino.Configs.TREE_LOCK_CLEAN_INTERVAL; +import static org.apache.gravitino.Configs.TREE_LOCK_MAX_NODE_IN_MEMORY; +import static org.apache.gravitino.Configs.TREE_LOCK_MIN_NODE_IN_MEMORY; +import static org.apache.gravitino.Configs.VERSION_RETENTION_COUNT; import static org.mockito.ArgumentMatchers.any; import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; import java.io.IOException; import java.util.Map; import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.gravitino.Config; import org.apache.gravitino.GravitinoEnv; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; @@ -35,6 +51,7 @@ import org.apache.gravitino.connector.authorization.AuthorizationPlugin; import org.apache.gravitino.file.Fileset; import org.apache.gravitino.file.FilesetChange; +import org.apache.gravitino.lock.LockManager; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -75,14 +92,36 @@ public void testDropAuthorizationPrivilege() { NameIdentifier filesetIdent = NameIdentifier.of(filesetNs, "filesetNAME1"); filesetHookDispatcher.createFileset( filesetIdent, "comment", Fileset.Type.MANAGED, "fileset41", props); - Mockito.reset(authorizationPlugin); - - filesetHookDispatcher.dropFileset(filesetIdent); - Mockito.verify(authorizationPlugin).onMetadataUpdated(any()); - Mockito.reset(authorizationPlugin); - schemaHookDispatcher.dropSchema(NameIdentifier.of(filesetNs.levels()), true); - Mockito.verify(authorizationPlugin).onMetadataUpdated(any()); + withMockedAuthorizationUtils( + () -> { + filesetHookDispatcher.dropFileset(filesetIdent); + Config config = Mockito.mock(Config.class); + Mockito.when(config.get(SERVICE_ADMINS)) + .thenReturn(Lists.newArrayList("admin1", "admin2")); + Mockito.when(config.get(ENTITY_STORE)).thenReturn(RELATIONAL_ENTITY_STORE); + Mockito.when(config.get(ENTITY_RELATIONAL_STORE)) + .thenReturn(DEFAULT_ENTITY_RELATIONAL_STORE); + Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_URL)) + .thenReturn( + String.format("jdbc:h2:file:%s;DB_CLOSE_DELAY=-1;MODE=MYSQL", "/tmp/testdb")); + Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_DRIVER)) + .thenReturn("org.h2.Driver"); + Mockito.when(config.get(STORE_TRANSACTION_MAX_SKEW_TIME)).thenReturn(1000L); + Mockito.when(config.get(STORE_DELETE_AFTER_TIME)).thenReturn(20 * 60 * 1000L); + Mockito.when(config.get(VERSION_RETENTION_COUNT)).thenReturn(1L); + Mockito.when(config.get(CATALOG_CACHE_EVICTION_INTERVAL_MS)).thenReturn(1000L); + Mockito.doReturn(100000L).when(config).get(TREE_LOCK_MAX_NODE_IN_MEMORY); + Mockito.doReturn(1000L).when(config).get(TREE_LOCK_MIN_NODE_IN_MEMORY); + Mockito.doReturn(36000L).when(config).get(TREE_LOCK_CLEAN_INTERVAL); + try { + FieldUtils.writeField( + GravitinoEnv.getInstance(), "lockManager", new LockManager(config), true); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + schemaHookDispatcher.dropSchema(NameIdentifier.of(filesetNs.levels()), true); + }); } @Test diff --git a/core/src/test/java/org/apache/gravitino/hook/TestTableHookDispatcher.java b/core/src/test/java/org/apache/gravitino/hook/TestTableHookDispatcher.java index fd1137a0e9a..894c5df5fa5 100644 --- a/core/src/test/java/org/apache/gravitino/hook/TestTableHookDispatcher.java +++ b/core/src/test/java/org/apache/gravitino/hook/TestTableHookDispatcher.java @@ -18,12 +18,28 @@ */ package org.apache.gravitino.hook; +import static org.apache.gravitino.Configs.CATALOG_CACHE_EVICTION_INTERVAL_MS; +import static org.apache.gravitino.Configs.DEFAULT_ENTITY_RELATIONAL_STORE; +import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_DRIVER; +import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_URL; +import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_STORE; +import static org.apache.gravitino.Configs.ENTITY_STORE; +import static org.apache.gravitino.Configs.RELATIONAL_ENTITY_STORE; +import static org.apache.gravitino.Configs.SERVICE_ADMINS; +import static org.apache.gravitino.Configs.STORE_DELETE_AFTER_TIME; +import static org.apache.gravitino.Configs.STORE_TRANSACTION_MAX_SKEW_TIME; +import static org.apache.gravitino.Configs.TREE_LOCK_CLEAN_INTERVAL; +import static org.apache.gravitino.Configs.TREE_LOCK_MAX_NODE_IN_MEMORY; +import static org.apache.gravitino.Configs.TREE_LOCK_MIN_NODE_IN_MEMORY; +import static org.apache.gravitino.Configs.VERSION_RETENTION_COUNT; import static org.mockito.ArgumentMatchers.any; import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; import java.io.IOException; import java.util.Map; import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.gravitino.Config; import org.apache.gravitino.GravitinoEnv; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.Namespace; @@ -34,6 +50,7 @@ import org.apache.gravitino.catalog.TestTableOperationDispatcher; import org.apache.gravitino.connector.BaseCatalog; import org.apache.gravitino.connector.authorization.AuthorizationPlugin; +import org.apache.gravitino.lock.LockManager; import org.apache.gravitino.rel.Column; import org.apache.gravitino.rel.TableChange; import org.apache.gravitino.rel.expressions.NamedReference; @@ -120,13 +137,35 @@ public void testDropAuthorizationPrivilege() { tableHookDispatcher.createTable( tableIdent, columns, "comment", props, transforms, distribution, sortOrders, indexes); - Mockito.reset(authorizationPlugin); - tableHookDispatcher.dropTable(tableIdent); - Mockito.verify(authorizationPlugin).onMetadataUpdated(any()); - - Mockito.reset(authorizationPlugin); - schemaHookDispatcher.dropSchema(NameIdentifier.of(tableNs.levels()), true); - Mockito.verify(authorizationPlugin).onMetadataUpdated(any()); + withMockedAuthorizationUtils( + () -> { + tableHookDispatcher.dropTable(tableIdent); + Config config = Mockito.mock(Config.class); + Mockito.when(config.get(SERVICE_ADMINS)) + .thenReturn(Lists.newArrayList("admin1", "admin2")); + Mockito.when(config.get(ENTITY_STORE)).thenReturn(RELATIONAL_ENTITY_STORE); + Mockito.when(config.get(ENTITY_RELATIONAL_STORE)) + .thenReturn(DEFAULT_ENTITY_RELATIONAL_STORE); + Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_URL)) + .thenReturn( + String.format("jdbc:h2:file:%s;DB_CLOSE_DELAY=-1;MODE=MYSQL", "/tmp/testdb")); + Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_DRIVER)) + .thenReturn("org.h2.Driver"); + Mockito.when(config.get(STORE_TRANSACTION_MAX_SKEW_TIME)).thenReturn(1000L); + Mockito.when(config.get(STORE_DELETE_AFTER_TIME)).thenReturn(20 * 60 * 1000L); + Mockito.when(config.get(VERSION_RETENTION_COUNT)).thenReturn(1L); + Mockito.when(config.get(CATALOG_CACHE_EVICTION_INTERVAL_MS)).thenReturn(1000L); + Mockito.doReturn(100000L).when(config).get(TREE_LOCK_MAX_NODE_IN_MEMORY); + Mockito.doReturn(1000L).when(config).get(TREE_LOCK_MIN_NODE_IN_MEMORY); + Mockito.doReturn(36000L).when(config).get(TREE_LOCK_CLEAN_INTERVAL); + try { + FieldUtils.writeField( + GravitinoEnv.getInstance(), "lockManager", new LockManager(config), true); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + schemaHookDispatcher.dropSchema(NameIdentifier.of(tableNs.levels()), true); + }); } @Test diff --git a/core/src/test/java/org/apache/gravitino/hook/TestTopicHookDispatcher.java b/core/src/test/java/org/apache/gravitino/hook/TestTopicHookDispatcher.java index 5e2a51547f3..dab37bee051 100644 --- a/core/src/test/java/org/apache/gravitino/hook/TestTopicHookDispatcher.java +++ b/core/src/test/java/org/apache/gravitino/hook/TestTopicHookDispatcher.java @@ -72,8 +72,9 @@ public void testDropAuthorizationPrivilege() { NameIdentifier topicIdent = NameIdentifier.of(topicNs, "topicNAME"); topicHookDispatcher.createTopic(topicIdent, "comment", null, props); - Mockito.reset(authorizationPlugin); - topicHookDispatcher.dropTopic(topicIdent); - Mockito.verify(authorizationPlugin).onMetadataUpdated(any()); + withMockedAuthorizationUtils( + () -> { + topicHookDispatcher.dropTopic(topicIdent); + }); } }