Skip to content

Commit

Permalink
[apache#5956] feat(auth): Uses chain authorization to support Hive an…
Browse files Browse the repository at this point in the history
…d HDFS
  • Loading branch information
xunliu committed Jan 3, 2025
1 parent 936d045 commit 43b86f7
Show file tree
Hide file tree
Showing 42 changed files with 2,360 additions and 965 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.gravitino.authorization;

import com.google.common.base.Preconditions;
import java.util.List;
import java.util.Objects;
import org.apache.gravitino.MetadataObject;
import org.apache.gravitino.annotation.Evolving;
Expand All @@ -44,10 +45,11 @@ static MetadataObjectChange rename(
* Remove a metadata entity MetadataObjectChange.
*
* @param metadataObject The metadata object.
* @param locationPaths The location paths of the metadata object.
* @return return a MetadataObjectChange for the remove metadata object.
*/
static MetadataObjectChange remove(MetadataObject metadataObject) {
return new RemoveMetadataObject(metadataObject);
static MetadataObjectChange remove(MetadataObject metadataObject, List<String> locationPaths) {
return new RemoveMetadataObject(metadataObject, locationPaths);
}

/** A RenameMetadataObject is to rename securable object's metadata entity. */
Expand Down Expand Up @@ -127,9 +129,11 @@ public String toString() {
/** A RemoveMetadataObject is to remove securable object's metadata entity. */
final class RemoveMetadataObject implements MetadataObjectChange {
private final MetadataObject metadataObject;
private final List<String> locationPaths;

private RemoveMetadataObject(MetadataObject metadataObject) {
private RemoveMetadataObject(MetadataObject metadataObject, List<String> locationPaths) {
this.metadataObject = metadataObject;
this.locationPaths = locationPaths;
}

/**
Expand All @@ -141,6 +145,15 @@ public MetadataObject metadataObject() {
return metadataObject;
}

/**
* Returns the location path of the metadata object.
*
* @return return a location path.
*/
public List<String> getLocationPaths() {
return locationPaths;
}

/**
* Compares this RemoveMetadataObject instance with another object for equality. The comparison
* is based on the old metadata entity.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,17 +27,21 @@
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.gravitino.Catalog;
import org.apache.gravitino.Configs;
import org.apache.gravitino.MetadataObject;
import org.apache.gravitino.MetadataObjects;
import org.apache.gravitino.auth.AuthConstants;
import org.apache.gravitino.auth.AuthenticatorType;
import org.apache.gravitino.authorization.Owner;
import org.apache.gravitino.authorization.Privileges;
import org.apache.gravitino.authorization.SecurableObject;
import org.apache.gravitino.authorization.SecurableObjects;
import org.apache.gravitino.authorization.common.ChainedAuthorizationProperties;
import org.apache.gravitino.authorization.ranger.integration.test.RangerBaseE2EIT;
import org.apache.gravitino.authorization.ranger.integration.test.RangerITEnv;
import org.apache.gravitino.authorization.ranger.integration.test.SparkBaseIT;
import org.apache.gravitino.catalog.hive.HiveConstants;
import org.apache.gravitino.exceptions.UserAlreadyExistsException;
import org.apache.gravitino.integration.test.container.HiveContainer;
Expand All @@ -49,15 +53,19 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.kyuubi.plugin.spark.authz.AccessControlException;
import org.apache.ranger.RangerServiceException;
import org.apache.ranger.plugin.model.RangerPolicy;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.platform.commons.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class TestChainedAuthorizationIT extends RangerBaseE2EIT {
public class TestChainedAuthorizationIT extends SparkBaseIT {
private static final Logger LOG = LoggerFactory.getLogger(TestChainedAuthorizationIT.class);
private static String DEFAULT_FS;
private FileSystem fileSystem;
Expand All @@ -74,7 +82,7 @@ public void startIntegrationTest() throws Exception {
registerCustomConfigs(configs);

super.startIntegrationTest();
RangerITEnv.init(RangerBaseE2EIT.metalakeName, false);
RangerITEnv.init(SparkBaseIT.metalakeName, false);
RangerITEnv.startHiveRangerContainer();

HIVE_METASTORE_URIS =
Expand Down Expand Up @@ -150,12 +158,80 @@ public void stop() throws IOException {
RangerITEnv.cleanup();
}

@AfterEach
void clean() {
try {
List<RangerPolicy> rangerHivePolicies =
RangerITEnv.rangerClient.getPoliciesInService(RangerITEnv.RANGER_HIVE_REPO_NAME);
List<RangerPolicy> rangerHdfsPolicies =
RangerITEnv.rangerClient.getPoliciesInService(RangerITEnv.RANGER_HDFS_REPO_NAME);
rangerHivePolicies.stream().forEach(policy -> LOG.info("Ranger Hive policy: {}", policy));
rangerHdfsPolicies.stream().forEach(policy -> LOG.info("Ranger HDFS policy: {}", policy));
Preconditions.condition(
rangerHivePolicies.size() == 0, "Ranger Hive policies should be empty");
Preconditions.condition(
rangerHdfsPolicies.size() == 0, "Ranger HDFS policies should be empty");
} catch (RangerServiceException e) {
throw new RuntimeException(e);
}
}

@Override
protected String testUserName() {
return AuthConstants.ANONYMOUS_USER;
}

@Override
protected void createCatalog() {
Map<String, String> catalogConf = new HashMap<>();
catalogConf.put(HiveConstants.METASTORE_URIS, HIVE_METASTORE_URIS);
catalogConf.put(IMPERSONATION_ENABLE, "true");
catalogConf.put(Catalog.AUTHORIZATION_PROVIDER, "chain");
catalogConf.put(ChainedAuthorizationProperties.CHAIN_PLUGINS_PROPERTIES_KEY, "hive1,hdfs1");
catalogConf.put("authorization.chain.hive1.provider", "ranger");
catalogConf.put("authorization.chain.hive1.ranger.auth.type", RangerContainer.authType);
catalogConf.put("authorization.chain.hive1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL);
catalogConf.put("authorization.chain.hive1.ranger.username", RangerContainer.rangerUserName);
catalogConf.put("authorization.chain.hive1.ranger.password", RangerContainer.rangerPassword);
catalogConf.put("authorization.chain.hive1.ranger.service.type", "HadoopSQL");
catalogConf.put(
"authorization.chain.hive1.ranger.service.name", RangerITEnv.RANGER_HIVE_REPO_NAME);
catalogConf.put("authorization.chain.hdfs1.provider", "ranger");
catalogConf.put("authorization.chain.hdfs1.ranger.auth.type", RangerContainer.authType);
catalogConf.put("authorization.chain.hdfs1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL);
catalogConf.put("authorization.chain.hdfs1.ranger.username", RangerContainer.rangerUserName);
catalogConf.put("authorization.chain.hdfs1.ranger.password", RangerContainer.rangerPassword);
catalogConf.put("authorization.chain.hdfs1.ranger.service.type", "HDFS");
catalogConf.put(
"authorization.chain.hdfs1.ranger.service.name", RangerITEnv.RANGER_HDFS_REPO_NAME);

metalake.createCatalog(catalogName, Catalog.Type.RELATIONAL, "hive", "comment", catalogConf);
catalog = metalake.loadCatalog(catalogName);
LOG.info("Catalog created: {}", catalog);
}

private String storageLocation(String dirName) {
return DEFAULT_FS + "/" + dirName;
}

@Test
public void testCreateSchemaInCatalog() throws IOException {
SecurableObject securableObject =
SecurableObjects.ofCatalog(
catalogName, Lists.newArrayList(Privileges.CreateSchema.allow()));
doTestCreateSchema(currentFunName(), securableObject);
}

@Test
public void testCreateSchemaInMetalake() throws IOException {
SecurableObject securableObject =
SecurableObjects.ofMetalake(
metalakeName, Lists.newArrayList(Privileges.CreateSchema.allow()));
doTestCreateSchema(currentFunName(), securableObject);
}

private void doTestCreateSchema(String roleName, SecurableObject securableObject)
throws IOException {
// Choose a catalog
useCatalog();

Expand All @@ -168,22 +244,17 @@ public void testCreateSchemaInCatalog() throws IOException {
.contains(
String.format(
"Permission denied: user [%s] does not have [create] privilege",
AuthConstants.ANONYMOUS_USER))
testUserName()))
|| accessControlException
.getMessage()
.contains(
String.format(
"Permission denied: user=%s, access=WRITE", AuthConstants.ANONYMOUS_USER)));
String.format("Permission denied: user=%s, access=WRITE", testUserName())));
Path schemaPath = new Path(storageLocation(schemaName + ".db"));
Assertions.assertFalse(fileSystem.exists(schemaPath));
FileStatus fileStatus = fileSystem.getFileStatus(new Path(DEFAULT_FS));
Assertions.assertEquals(System.getenv(HADOOP_USER_NAME), fileStatus.getOwner());

// Second, grant the `CREATE_SCHEMA` role
String roleName = currentFunName();
SecurableObject securableObject =
SecurableObjects.ofCatalog(
catalogName, Lists.newArrayList(Privileges.CreateSchema.allow()));
metalake.createRole(roleName, Collections.emptyMap(), Lists.newArrayList(securableObject));
metalake.grantRolesToUser(Lists.newArrayList(roleName), AuthConstants.ANONYMOUS_USER);
waitForUpdatingPolicies();
Expand All @@ -198,7 +269,15 @@ public void testCreateSchemaInCatalog() throws IOException {
Assertions.assertThrows(AccessControlException.class, () -> sparkSession.sql(SQL_CREATE_TABLE));

// Clean up
// Set owner
MetadataObject schemaObject =
MetadataObjects.of(catalogName, schemaName, MetadataObject.Type.SCHEMA);
metalake.setOwner(schemaObject, testUserName(), Owner.Type.USER);
waitForUpdatingPolicies();
sparkSession.sql(SQL_DROP_SCHEMA);
catalog.asSchemas().dropSchema(schemaName, false);
Assertions.assertFalse(fileSystem.exists(schemaPath));

metalake.deleteRole(roleName);
waitForUpdatingPolicies();

Expand All @@ -218,33 +297,9 @@ public void testCreateSchemaInCatalog() throws IOException {
"Permission denied: user=%s, access=WRITE", AuthConstants.ANONYMOUS_USER)));
}

@Override
public void createCatalog() {
Map<String, String> catalogConf = new HashMap<>();
catalogConf.put(HiveConstants.METASTORE_URIS, HIVE_METASTORE_URIS);
catalogConf.put(IMPERSONATION_ENABLE, "true");
catalogConf.put(Catalog.AUTHORIZATION_PROVIDER, "chain");
catalogConf.put(ChainedAuthorizationProperties.CHAIN_PLUGINS_PROPERTIES_KEY, "hive1,hdfs1");
catalogConf.put("authorization.chain.hive1.provider", "ranger");
catalogConf.put("authorization.chain.hive1.ranger.auth.type", RangerContainer.authType);
catalogConf.put("authorization.chain.hive1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL);
catalogConf.put("authorization.chain.hive1.ranger.username", RangerContainer.rangerUserName);
catalogConf.put("authorization.chain.hive1.ranger.password", RangerContainer.rangerPassword);
catalogConf.put("authorization.chain.hive1.ranger.service.type", "HadoopSQL");
catalogConf.put(
"authorization.chain.hive1.ranger.service.name", RangerITEnv.RANGER_HIVE_REPO_NAME);
catalogConf.put("authorization.chain.hdfs1.provider", "ranger");
catalogConf.put("authorization.chain.hdfs1.ranger.auth.type", RangerContainer.authType);
catalogConf.put("authorization.chain.hdfs1.ranger.admin.url", RangerITEnv.RANGER_ADMIN_URL);
catalogConf.put("authorization.chain.hdfs1.ranger.username", RangerContainer.rangerUserName);
catalogConf.put("authorization.chain.hdfs1.ranger.password", RangerContainer.rangerPassword);
catalogConf.put("authorization.chain.hdfs1.ranger.service.type", "HDFS");
catalogConf.put(
"authorization.chain.hdfs1.ranger.service.name", RangerITEnv.RANGER_HDFS_REPO_NAME);

metalake.createCatalog(catalogName, Catalog.Type.RELATIONAL, "hive", "comment", catalogConf);
catalog = metalake.loadCatalog(catalogName);
LOG.info("Catalog created: {}", catalog);
@Test
public void testRenameMetalakeOrCatalog() {
// super.testRenameMetalakeOrCatalog();
}

@Test
Expand Down Expand Up @@ -308,8 +363,8 @@ void testChangeOwner() throws InterruptedException {
}

@Test
void testAllowUseSchemaPrivilege() throws InterruptedException {
// TODO
protected void testAllowUseSchemaPrivilege() throws InterruptedException {
super.testAllowUseSchemaPrivilege();
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,7 @@
package org.apache.gravitino.authorization.common;

import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import java.util.List;
import javax.annotation.Nullable;
import org.apache.gravitino.MetadataObject;
import org.apache.gravitino.authorization.AuthorizationMetadataObject;

Expand All @@ -44,29 +42,37 @@ public MetadataObject.Type metadataObjectType() {
}
}

private final String name;
private final String parent;
private final String path;

private final AuthorizationMetadataObject.Type type;

public PathBasedMetadataObject(String path, AuthorizationMetadataObject.Type type) {
public PathBasedMetadataObject(
String parent, String name, String path, AuthorizationMetadataObject.Type type) {
this.parent = parent;
this.name = name;
this.path = path;
this.type = type;
}

@Nullable
@Override
public String parent() {
return null;
public String name() {
return name;
}

@Override
public String name() {
return this.path;
public List<String> names() {
return DOT_SPLITTER.splitToList(fullName());
}

@Override
public List<String> names() {
return ImmutableList.of(this.path);
public String parent() {
return parent;
}

public String path() {
return path;
}

@Override
Expand All @@ -75,20 +81,15 @@ public AuthorizationMetadataObject.Type type() {
}

@Override
public void validateAuthorizationMetadataObject() throws IllegalArgumentException {
public void validate() throws IllegalArgumentException {
List<String> names = names();
Preconditions.checkArgument(
names != null && !names.isEmpty(),
"Cannot create a path based metadata object with no names");
Preconditions.checkArgument(
names.size() == 1,
"Cannot create a path based metadata object with the name length which is 1");
Preconditions.checkArgument(
type != null, "Cannot create a path based metadata object with no type");

path != null && !path.isEmpty(), "Cannot create a path based metadata object with no path");
Preconditions.checkArgument(
type == PathBasedMetadataObject.Type.PATH, "it must be the PATH type");

for (String name : names) {
Preconditions.checkArgument(
name != null, "Cannot create a path based metadata object with null name");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,12 @@ public class PathBasedSecurableObject extends PathBasedMetadataObject
private final List<AuthorizationPrivilege> privileges;

public PathBasedSecurableObject(
String path, AuthorizationMetadataObject.Type type, Set<AuthorizationPrivilege> privileges) {
super(path, type);
String parent,
String name,
String path,
AuthorizationMetadataObject.Type type,
Set<AuthorizationPrivilege> privileges) {
super(parent, name, path, type);
this.privileges = ImmutableList.copyOf(privileges);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ public Type type() {
}

@Override
public void validateAuthorizationMetadataObject() throws IllegalArgumentException {
public void validate() throws IllegalArgumentException {
List<String> names = names();
Preconditions.checkArgument(
names != null && !names.isEmpty(), "The name of the object is empty.");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ public static JdbcSecurableObject create(
: JdbcMetadataObject.Type.fromMetadataType(MetadataObject.Type.TABLE);

JdbcSecurableObject object = new JdbcSecurableObject(parent, name, type, privileges);
object.validateAuthorizationMetadataObject();
object.validate();
return object;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ public List<AuthorizationSecurableObject> translateOwner(MetadataObject metadata
}

@Override
public AuthorizationMetadataObject translateMetadataObject(MetadataObject metadataObject) {
public List<AuthorizationMetadataObject> translateMetadataObject(MetadataObject metadataObject) {
throw new UnsupportedOperationException("Not supported");
}

Expand Down
Loading

0 comments on commit 43b86f7

Please sign in to comment.