diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockLocationInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockLocationInfo.java index cf368b06802..019e16c2f13 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockLocationInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockLocationInfo.java @@ -164,9 +164,8 @@ public BlockLocationInfo build() { } @Override - public String toString() { - return "{blockID={containerID=" + blockID.getContainerID() + - ", localID=" + blockID.getLocalID() + "}" + + public String toString() { + return "{blockID={" + blockID + "}" + ", length=" + length + ", offset=" + offset + ", token=" + token + diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 4d123e9c0d2..6008ef8642e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -377,6 +377,10 @@ private OzoneConsts() { // For Multipart upload public static final int OM_MULTIPART_MIN_SIZE = 5 * 1024 * 1024; + // refer to : + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html + public static final int MAXIMUM_NUMBER_OF_PARTS_PER_UPLOAD = 10000; + // GRPC block token metadata header and context key public static final String OZONE_BLOCK_TOKEN = "blocktoken"; public static final Context.Key UGI_CTX_KEY = diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 5b3153632b9..665ed246863 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1787,6 +1787,16 @@ service principal. + + ozone.s3g.list-keys.shallow.enabled + true + OZONE, S3GATEWAY + If this is true, there will be efficiency optimization effects + when calling s3g list interface with delimiter '/' parameter, especially + when there are a large number of keys. + + + ozone.om.save.metrics.interval 5m diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md b/hadoop-hdds/docs/content/security/SecurityAcls.md index f25cedd82ad..9976cbbc4fb 100644 --- a/hadoop-hdds/docs/content/security/SecurityAcls.md +++ b/hadoop-hdds/docs/content/security/SecurityAcls.md @@ -81,7 +81,10 @@ allows the user to overwrite an existing ozone key. Where an _scope_ can be: 1. **ACCESS** – Access ACL is applied only to the specific object and not inheritable. It controls the access to the object itself. -2. **DEFAULT** - Default ACL is applied to the specific object and will be inherited by object's descendants. Default ACLs cannot be set on keys (as there can be no objects under a key). +2. **DEFAULT** - Default ACL is applied to the specific object and will be inherited by object's descendants. Default ACLs cannot be set on keys (as there can be no objects under a key).
+_Note_: ACLs inherited from parent's Default ACLs will follow the following rules based on different bucket layout: + - **Legacy with EnableFileSystem or FSO**: inherit the immediate parent's DEFAULT ACLs. If none, inherit the bucket DEFAULT ACLs. + - **Legacy with DisableFileSystem or OBS**: inherit the bucket DEFAULT ACLs. ## Ozone Native ACL APIs diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.zh.md b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md index 17e1507f79e..3d95fcf0877 100644 --- a/hadoop-hdds/docs/content/security/SecurityAcls.zh.md +++ b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md @@ -67,8 +67,10 @@ _权限_ 可选的值包括:: _范围_ 可选的值包括:: 1. **ACCESS** – 这类 ACL 仅作用于对象本身,不能被继承。它控制对对象本身的访问。 -2. **DEFAULT** - 这类 ACL 不仅作用于对象本身,还会被对象的后代继承。不能在叶子对象上设置该类 ACL(因为叶子对象下不能再有其他对象)。 - +2. **DEFAULT** - 这类 ACL 不仅作用于对象本身,还会被对象的后代继承。不能在叶子对象上设置该类 ACL(因为叶子对象下不能再有其他对象)。
+_注意_:从父级默认 ACL 继承的 ACL, 将根据不同的桶布局遵循以下规则: + - **启用文件系统的 Legacy 或 FSO**:继承直接父目录的默认ACL。如果直接父目录没有默认ACL,则继承存储桶的默认ACL。 + - **禁用文件系统的 Legacy 或 OBS**:继承桶的默认ACL。 ## Ozone 原生 ACL API diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index 2adb33ef066..20aef0d405c 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -1522,6 +1522,11 @@ public static Logger getLog() { return LOG; } + @VisibleForTesting + public String getCurrentCompactionLogPath() { + return currentCompactionLogPath; + } + @VisibleForTesting public ConcurrentHashMap getCompactionNodeMap() { return compactionNodeMap; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 151e72227aa..c575f740e73 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -318,6 +318,8 @@ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, } if (!enoughForData) { + LOG.debug("Datanode {} has no volumes with enough space to allocate {} " + + "bytes for data.", datanodeDetails, dataSizeRequired); return false; } @@ -332,8 +334,11 @@ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, } else { enoughForMeta = true; } - - return enoughForData && enoughForMeta; + if (!enoughForMeta) { + LOG.debug("Datanode {} has no volumes with enough space to allocate {} " + + "bytes for metadata.", datanodeDetails, metadataSizeRequired); + } + return enoughForMeta; } /** @@ -481,16 +486,20 @@ public boolean isValidNode(DatanodeDetails datanodeDetails, if (datanodeInfo == null) { LOG.error("Failed to find the DatanodeInfo for datanode {}", datanodeDetails); - } else { - if (datanodeInfo.getNodeStatus().isNodeWritable() && - (hasEnoughSpace(datanodeInfo, metadataSizeRequired, - dataSizeRequired))) { - LOG.debug("Datanode {} is chosen. Required metadata size is {} and " + - "required data size is {}", - datanodeDetails, metadataSizeRequired, dataSizeRequired); - return true; - } + return false; + } + NodeStatus nodeStatus = datanodeInfo.getNodeStatus(); + if (nodeStatus.isNodeWritable() && + (hasEnoughSpace(datanodeInfo, metadataSizeRequired, + dataSizeRequired))) { + LOG.debug("Datanode {} is chosen. Required metadata size is {} and " + + "required data size is {} and NodeStatus is {}", + datanodeDetails, metadataSizeRequired, dataSizeRequired, nodeStatus); + return true; } + LOG.debug("Datanode {} is not chosen. Required metadata size is {} and " + + "required data size is {} and NodeStatus is {}", + datanodeDetails, metadataSizeRequired, dataSizeRequired, nodeStatus); return false; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java index 2a1c8956e94..b82cae9ffa5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java @@ -78,7 +78,8 @@ public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, Optional.ofNullable(container.getState()) .filter(state -> (state == HddsProtos.LifeCycleState.QUASI_CLOSED || - state == HddsProtos.LifeCycleState.CLOSED)) + state == HddsProtos.LifeCycleState.CLOSED) + && container.getNumberOfKeys() > 0) .ifPresent(s -> containerMap.put(container.getContainerID(), container)); }); @@ -166,7 +167,8 @@ private void reInitializeRule() { Optional.ofNullable(container.getState()) .filter(state -> (state == HddsProtos.LifeCycleState.QUASI_CLOSED || - state == HddsProtos.LifeCycleState.CLOSED)) + state == HddsProtos.LifeCycleState.CLOSED) + && container.getNumberOfKeys() > 0) .ifPresent(s -> containerMap.put(container.getContainerID(), container)); }); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 2f677629a03..e75f6e6f41a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -83,7 +83,6 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.util.Map; @@ -669,7 +668,6 @@ public void testSetNodeOpStateAndCommandFired() * @throws TimeoutException */ @Test - @Disabled("HDDS-5098") public void testScmDetectStaleAndDeadNode() throws IOException, InterruptedException, AuthenticationException { final int interval = 100; @@ -721,13 +719,13 @@ public void testScmDetectStaleAndDeadNode() "Expected to find 1 stale node"); assertEquals(staleNode.getUuid(), staleNodeList.get(0).getUuid(), "Stale node is not the expected ID"); - Thread.sleep(1000); Map> nodeCounts = nodeManager.getNodeCount(); assertEquals(1, nodeCounts.get(HddsProtos.NodeOperationalState.IN_SERVICE.name()) .get(HddsProtos.NodeState.STALE.name()).intValue()); + Thread.sleep(1000); // heartbeat good nodes again. for (DatanodeDetails dn : nodeList) { nodeManager.processHeartbeat(dn, layoutInfo); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index f17722612fb..c02dd5ccdd6 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -127,6 +127,7 @@ private void testSafeMode(int numContainers) throws Exception { // Currently, only considered containers which are not in open state. for (ContainerInfo container : containers) { container.setState(HddsProtos.LifeCycleState.CLOSED); + container.setNumberOfKeys(10); } scmSafeModeManager = new SCMSafeModeManager( config, containers, null, null, queue, @@ -160,6 +161,7 @@ public void testSafeModeExitRule() throws Exception { // container list for (ContainerInfo container : containers) { container.setState(HddsProtos.LifeCycleState.CLOSED); + container.setNumberOfKeys(10); } scmSafeModeManager = new SCMSafeModeManager( config, containers, null, null, queue, @@ -501,12 +503,19 @@ public void testContainerSafeModeRule() throws Exception { // Add 100 containers to the list of containers in SCM containers.addAll(HddsTestUtils.getContainerInfo(25 * 4)); // Assign CLOSED state to first 25 containers and OPEN state to rest - // of the containers + // of the containers. Set container key count = 10 in each container. for (ContainerInfo container : containers.subList(0, 25)) { container.setState(HddsProtos.LifeCycleState.CLOSED); + container.setNumberOfKeys(10); } for (ContainerInfo container : containers.subList(25, 100)) { container.setState(HddsProtos.LifeCycleState.OPEN); + container.setNumberOfKeys(10); + } + + // Set the last 5 closed containers to be empty + for (ContainerInfo container : containers.subList(20, 25)) { + container.setNumberOfKeys(0); } scmSafeModeManager = new SCMSafeModeManager( @@ -515,14 +524,15 @@ public void testContainerSafeModeRule() throws Exception { assertTrue(scmSafeModeManager.getInSafeMode()); // When 10 CLOSED containers are reported by DNs, the computed container - // threshold should be 10/25 as there are only 25 CLOSED containers. + // threshold should be 10/20 as there are only 20 CLOSED NON-EMPTY + // containers. // Containers in OPEN state should not contribute towards list of // containers while calculating container threshold in SCMSafeNodeManager - testContainerThreshold(containers.subList(0, 10), 0.4); + testContainerThreshold(containers.subList(0, 10), 0.5); assertTrue(scmSafeModeManager.getInSafeMode()); - // When remaining 15 OPEN containers are reported by DNs, the container - // threshold should be (10+15)/25. + // When remaining 10 CLOSED NON-EMPTY containers are reported by DNs, + // the container threshold should be (10+10)/20. testContainerThreshold(containers.subList(10, 25), 1.0); GenericTestUtils.waitFor(() -> !scmSafeModeManager.getInSafeMode(), diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java index babcabe5925..9ac275fd5cb 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java @@ -22,12 +22,17 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.Map; import java.util.HashMap; import java.util.InvalidPropertiesFormatException; import java.util.List; import java.util.TreeSet; import java.util.stream.Collectors; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.apache.hadoop.hdds.cli.SubcommandWithParent; @@ -39,6 +44,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; +import org.apache.hadoop.hdds.server.JsonUtils; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -82,14 +88,17 @@ public class TopologySubcommand extends ScmSubcommand " HEALTHY, STALE, DEAD)") private String nodeState; + @CommandLine.Option(names = { "--json" }, + defaultValue = "false", + description = "Format output as JSON") + private boolean json; + @Override public void execute(ScmClient scmClient) throws IOException { for (HddsProtos.NodeState state : STATES) { List nodes = scmClient.queryNode(null, state, HddsProtos.QueryScope.CLUSTER, ""); if (nodes != null && nodes.size() > 0) { - // show node state - System.out.println("State = " + state.toString()); if (nodeOperationalState != null) { if (nodeOperationalState.equals("IN_SERVICE") || nodeOperationalState.equals("DECOMMISSIONING") || @@ -120,9 +129,9 @@ public void execute(ScmClient scmClient) throws IOException { } } if (order) { - printOrderedByLocation(nodes); + printOrderedByLocation(nodes, state.toString()); } else { - printNodesWithLocation(nodes); + printNodesWithLocation(nodes, state.toString()); } } } @@ -136,10 +145,10 @@ public Class getParentType() { // Format // Location: rack1 // ipAddress(hostName) OperationalState - private void printOrderedByLocation(List nodes) { - HashMap> tree = - new HashMap<>(); - HashMap state = + private void printOrderedByLocation(List nodes, + String state) throws IOException { + Map> tree = new HashMap<>(); + Map operationalState = new HashMap<>(); for (HddsProtos.Node node : nodes) { String location = node.getNodeID().getNetworkLocation(); @@ -148,16 +157,31 @@ private void printOrderedByLocation(List nodes) { } DatanodeDetails dn = DatanodeDetails.getFromProtoBuf(node.getNodeID()); tree.get(location).add(dn); - state.put(dn, node.getNodeOperationalStates(0)); + operationalState.put(dn, node.getNodeOperationalStates(0)); } ArrayList locations = new ArrayList<>(tree.keySet()); Collections.sort(locations); + if (json) { + List nodesJson = new ArrayList<>(); + locations.forEach(location -> { + tree.get(location).forEach(n -> { + NodeTopologyOrder nodeJson = new NodeTopologyOrder(n, state, + operationalState.get(n).toString()); + nodesJson.add(nodeJson); + }); + }); + System.out.println( + JsonUtils.toJsonStringWithDefaultPrettyPrinter(nodesJson)); + return; + } + // show node state + System.out.println("State = " + state); locations.forEach(location -> { System.out.println("Location: " + location); tree.get(location).forEach(n -> { System.out.println(" " + n.getIpAddress() + "(" + n.getHostName() - + ") " + state.get(n)); + + ") " + operationalState.get(n)); }); }); } @@ -180,7 +204,33 @@ private String getAdditionNodeOutput(HddsProtos.Node node) { // Format "ipAddress(hostName):PortName1=PortValue1 OperationalState // networkLocation - private void printNodesWithLocation(Collection nodes) { + private void printNodesWithLocation(Collection nodes, + String state) throws IOException { + if (json) { + if (fullInfo) { + List nodesJson = new ArrayList<>(); + nodes.forEach(node -> { + NodeTopologyFull nodeJson = + new NodeTopologyFull( + DatanodeDetails.getFromProtoBuf(node.getNodeID()), state); + nodesJson.add(nodeJson); + }); + System.out.println( + JsonUtils.toJsonStringWithDefaultPrettyPrinter(nodesJson)); + return; + } + List nodesJson = new ArrayList<>(); + nodes.forEach(node -> { + NodeTopologyDefault nodeJson = new NodeTopologyDefault( + DatanodeDetails.getFromProtoBuf(node.getNodeID()), state); + nodesJson.add(nodeJson); + }); + System.out.println( + JsonUtils.toJsonStringWithDefaultPrettyPrinter(nodesJson)); + return; + } + // show node state + System.out.println("State = " + state); nodes.forEach(node -> { System.out.print(" " + getAdditionNodeOutput(node) + node.getNodeID().getIpAddress() + "(" + @@ -192,4 +242,82 @@ private void printNodesWithLocation(Collection nodes) { node.getNodeID().getNetworkLocation() : "NA")); }); } + + private static class ListJsonSerializer extends + JsonSerializer> { + @Override + public void serialize(List value, JsonGenerator jgen, + SerializerProvider provider) + throws IOException { + jgen.writeStartObject(); + for (DatanodeDetails.Port port : value) { + jgen.writeNumberField(port.getName().toString(), port.getValue()); + } + jgen.writeEndObject(); + } + } + + private static class NodeTopologyOrder { + private String ipAddress; + private String hostName; + private String nodeState; + private String operationalState; + private String networkLocation; + + NodeTopologyOrder(DatanodeDetails node, String state, String opState) { + ipAddress = node.getIpAddress(); + hostName = node.getHostName(); + nodeState = state; + operationalState = opState; + networkLocation = (node.getNetworkLocation() != null ? + node.getNetworkLocation() : "NA"); + } + + public String getIpAddress() { + return ipAddress; + } + + public String getHostName() { + return hostName; + } + + public String getNodeState() { + return nodeState; + } + + public String getOperationalState() { + return operationalState; + } + + public String getNetworkLocation() { + return networkLocation; + } + } + + private static class NodeTopologyDefault extends NodeTopologyOrder { + private List ports; + + NodeTopologyDefault(DatanodeDetails node, String state) { + super(node, state, node.getPersistedOpState().toString()); + ports = node.getPorts(); + } + + @JsonSerialize(using = ListJsonSerializer.class) + public List getPorts() { + return ports; + } + } + + private static class NodeTopologyFull extends NodeTopologyDefault { + private String uuid; + + NodeTopologyFull(DatanodeDetails node, String state) { + super(node, state); + uuid = node.getUuid().toString(); + } + + public String getUuid() { + return uuid; + } + } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 4210876f987..bc8dcdd0e5c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -58,6 +58,7 @@ import java.util.Map; import java.util.Stack; import java.util.NoSuchElementException; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -556,9 +557,25 @@ public Iterator listKeys(String keyPrefix) */ public Iterator listKeys(String keyPrefix, String prevKey) throws IOException { + return listKeys(keyPrefix, prevKey, false); + } + /** + * Returns Iterator to iterate over all keys after prevKey in the bucket. + * If shallow is true, iterator will only contain immediate children. + * This applies to the aws s3 list with delimiter '/' scenario. + * Note: When shallow is true, whether keyPrefix ends with slash or not + * will affect the results, see {@code getNextShallowListOfKeys}. + * + * @param keyPrefix Bucket prefix to match + * @param prevKey Keys will be listed after this key name + * @param shallow If true, only list immediate children ozoneKeys + * @return {@code Iterator} + */ + public Iterator listKeys(String keyPrefix, String prevKey, + boolean shallow) throws IOException { return new KeyIteratorFactory() - .getKeyIterator(keyPrefix, prevKey, bucketLayout); + .getKeyIterator(keyPrefix, prevKey, bucketLayout, shallow); } /** @@ -1049,6 +1066,9 @@ private class KeyIterator implements Iterator { private String keyPrefix = null; private Iterator currentIterator; private OzoneKey currentValue; + private final boolean shallow; + private boolean addedKeyPrefix; + private String delimiterKeyPrefix; String getKeyPrefix() { return keyPrefix; @@ -1058,15 +1078,27 @@ void setKeyPrefix(String keyPrefixPath) { keyPrefix = keyPrefixPath; } + boolean addedKeyPrefix() { + return addedKeyPrefix; + } + + void setAddedKeyPrefix(boolean addedKeyPrefix) { + this.addedKeyPrefix = addedKeyPrefix; + } + /** * Creates an Iterator to iterate over all keys after prevKey in the bucket. * If prevKey is null it iterates from the first key in the bucket. * The returned keys match key prefix. * @param keyPrefix + * @param prevKey + * @param shallow */ - KeyIterator(String keyPrefix, String prevKey) throws IOException { + KeyIterator(String keyPrefix, String prevKey, boolean shallow) + throws IOException { setKeyPrefix(keyPrefix); this.currentValue = null; + this.shallow = shallow; this.currentIterator = getNextListOfKeys(prevKey).iterator(); } @@ -1099,9 +1131,139 @@ public OzoneKey next() { */ List getNextListOfKeys(String prevKey) throws IOException { + // If shallow is true, only list immediate children + if (shallow) { + return getNextShallowListOfKeys(prevKey); + } return proxy.listKeys(volumeName, name, keyPrefix, prevKey, listCacheSize); } + + /** + * Using listStatus instead of listKeys avoiding listing all children keys. + * Giving the structure of keys delimited by "/": + * + * buck-1 + * | + * a + * | + * ----------------------------------- + * | | | + * b1 b2 b3 + * ----- -------- ---------- + * | | | | | | | | + * c1 c2 d1 d2 d3 e1 e2 e3 + * | | + * -------- | + * | | | + * d21.txt d22.txt e11.txt + * + * For the above structure, the keys listed delimited "/" in order are + * as follows: + * a/ + * a/b1/ + * a/b1/c1/ + * a/b1/c2/ + * a/b2/ + * a/b2/d1/ + * a/b2/d2/ + * a/b2/d2/d21.txt + * a/b2/d2/d22.txt + * a/b2/d3/ + * a/b3/ + * a/b3/e1/ + * a/b3/e1/e11.txt + * a/b3/e2/ + * a/b3/e3/ + * + * When keyPrefix ends without slash (/), the result as Example 1: + * Example 1: keyPrefix="a/b2", prevKey="" + * result: [a/b2/] + * Example 2: keyPrefix="a/b2/", prevKey="" + * result: [a/b2/d1/, a/b2/d2/, a/b2/d3/] + * Example 3: keyPrefix="a/b2/", prevKey="a/b2/d2/d21.txt" + * result: [a/b2/d2/, a/b2/d3/] + * Example 4: keyPrefix="a/b2/", prevKey="a/b2/d2/d22.txt" + * result: [a/b2/d3/] + * Say, keyPrefix="a/b" and prevKey="", the results will be + * [a/b1/, a/b2/, a/b3/] + * In implementation, the keyPrefix "a/b" can be identified in listKeys, + * but cannot be identified in listStatus. Therefore, keyPrefix "a/b" + * needs to be split into keyPrefix "a" and call listKeys method to get + * the next one key as the startKey in listStatus. + */ + protected List getNextShallowListOfKeys(String prevKey) + throws IOException { + List resultList = new ArrayList<>(); + String startKey = prevKey; + + // handle for first round + if (!addedKeyPrefix) { + // prepare startKey + List nextOneKeys = + proxy.listKeys(volumeName, name, getKeyPrefix(), prevKey, 1); + if (nextOneKeys.isEmpty()) { + return nextOneKeys; + } + // Special case: ListKey expects keyPrefix element should present in + // the resultList if startKey is blank or equals to keyPrefix. + // The nextOneKey needs be added to the result because it will not be + // present when using the 'listStatus' method. + // Consider the case, keyPrefix="test/", prevKey="" or 'test1/', + // then 'test/' will be added to the list result. + startKey = nextOneKeys.get(0).getName(); + if (getKeyPrefix().endsWith(OZONE_URI_DELIMITER) && + startKey.equals(getKeyPrefix())) { + resultList.add(nextOneKeys.get(0)); + } + + // prepare delimiterKeyPrefix + delimiterKeyPrefix = getKeyPrefix(); + if (!getKeyPrefix().endsWith(OZONE_URI_DELIMITER)) { + delimiterKeyPrefix = OzoneFSUtils.getParentDir(getKeyPrefix()); + } + } + + // Elements in statuses must be sorted after startKey, + // which means they come after the keyPrefix. + List statuses = proxy.listStatus(volumeName, name, + delimiterKeyPrefix, false, startKey, listCacheSize); + + if (addedKeyPrefix) { + // previous round already include the startKey, so remove it + statuses.remove(0); + } else { + setAddedKeyPrefix(true); + } + + List ozoneKeys = buildOzoneKeysFromFileStatus(statuses) + .stream() + .filter(key -> StringUtils.startsWith(key.getName(), getKeyPrefix())) + .collect(Collectors.toList()); + + resultList.addAll(ozoneKeys); + return resultList; + } + + private List buildOzoneKeysFromFileStatus( + List statuses) { + return statuses.stream() + .map(status -> { + OmKeyInfo keyInfo = status.getKeyInfo(); + String keyName = keyInfo.getKeyName(); + if (status.isDirectory()) { + // add trailing slash to represent directory + keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); + } + return new OzoneKey(keyInfo.getVolumeName(), + keyInfo.getBucketName(), keyName, + keyInfo.getDataSize(), keyInfo.getCreationTime(), + keyInfo.getModificationTime(), + keyInfo.getReplicationConfig(), keyInfo.isFile()); + }) + .collect(Collectors.toList()); + } + } @@ -1136,7 +1298,6 @@ List getNextListOfKeys(String prevKey) throws private class KeyIteratorWithFSO extends KeyIterator { private Stack> stack; - private boolean addedKeyPrefix; private String removeStartKey = ""; /** @@ -1146,9 +1307,11 @@ private class KeyIteratorWithFSO extends KeyIterator { * * @param keyPrefix * @param prevKey + * @param shallow */ - KeyIteratorWithFSO(String keyPrefix, String prevKey) throws IOException { - super(keyPrefix, prevKey); + KeyIteratorWithFSO(String keyPrefix, String prevKey, boolean shallow) + throws IOException { + super(keyPrefix, prevKey, shallow); } /** @@ -1189,7 +1352,7 @@ List getNextListOfKeys(String prevKey) throws IOException { } // normalize paths - if (!addedKeyPrefix) { + if (!addedKeyPrefix()) { prevKey = OmUtils.normalizeKey(prevKey, true); String keyPrefixName = ""; if (StringUtils.isNotBlank(getKeyPrefix())) { @@ -1421,12 +1584,12 @@ private void removeStartKeyIfExistsInStatusList(String startKey, private void addKeyPrefixInfoToResultList(String keyPrefix, String startKey, List keysResultList) throws IOException { - if (addedKeyPrefix) { + if (addedKeyPrefix()) { return; } // setting flag to true. - addedKeyPrefix = true; + setAddedKeyPrefix(true); // not required to addKeyPrefix // case-1) if keyPrefix is null/empty/just contains snapshot indicator @@ -1450,14 +1613,16 @@ private void addKeyPrefixInfoToResultList(String keyPrefix, } if (status != null) { - OmKeyInfo keyInfo = status.getKeyInfo(); - String keyName = keyInfo.getKeyName(); - - if (status.isDirectory()) { - // add trailing slash to represent directory - keyName = - OzoneFSUtils.addTrailingSlashIfNeeded(keyInfo.getKeyName()); + // not required to addKeyPrefix + // case-3) if the keyPrefix corresponds to a file and not a dir, + // prefix should not be added to avoid duplicate entry + if (!status.isDirectory()) { + return; } + OmKeyInfo keyInfo = status.getKeyInfo(); + // add trailing slash to represent directory + String keyName = + OzoneFSUtils.addTrailingSlashIfNeeded(keyInfo.getKeyName()); // removeStartKey - as the startKey is a placeholder, which is // managed internally to traverse leaf node's sub-paths. @@ -1479,11 +1644,11 @@ private void addKeyPrefixInfoToResultList(String keyPrefix, private class KeyIteratorFactory { KeyIterator getKeyIterator(String keyPrefix, String prevKey, - BucketLayout bType) throws IOException { + BucketLayout bType, boolean shallow) throws IOException { if (bType.isFileSystemOptimized()) { - return new KeyIteratorWithFSO(keyPrefix, prevKey); + return new KeyIteratorWithFSO(keyPrefix, prevKey, shallow); } else { - return new KeyIterator(keyPrefix, prevKey); + return new KeyIterator(keyPrefix, prevKey, shallow); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 0624e84dae4..15ff53c200f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -167,6 +167,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_REQUIRED_OM_VERSION_MIN_KEY; +import static org.apache.hadoop.ozone.OzoneConsts.MAXIMUM_NUMBER_OF_PARTS_PER_UPLOAD; import static org.apache.hadoop.ozone.OzoneConsts.OLD_QUOTA_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_MAXIMUM_ACCESS_ID_LENGTH; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; @@ -1738,8 +1739,11 @@ private OpenKeySession newMultipartOpenKey( HddsClientUtils.verifyKeyName(keyName); } HddsClientUtils.checkNotNull(keyName, uploadID); - Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000, "Part " + - "number should be greater than zero and less than or equal to 10000"); + if (partNumber <= 0 || partNumber > MAXIMUM_NUMBER_OF_PARTS_PER_UPLOAD) { + throw new OMException("Part number must be an integer between 1 and " + + MAXIMUM_NUMBER_OF_PARTS_PER_UPLOAD + ", inclusive", + OMException.ResultCodes.INVALID_PART); + } Preconditions.checkArgument(size >= 0, "size should be greater than or " + "equal to zero"); OmKeyArgs keyArgs = new OmKeyArgs.Builder() diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index e48cf98e906..57bcd39f6ff 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -425,6 +425,25 @@ public FileChecksum getFileChecksum() { return fileChecksum; } + @Override + public String toString() { + return "OmKeyInfo{" + + "volumeName='" + volumeName + '\'' + + ", bucketName='" + bucketName + '\'' + + ", keyName='" + keyName + '\'' + + ", dataSize=" + dataSize + + ", keyLocationVersions=" + keyLocationVersions + + ", creationTime=" + creationTime + + ", modificationTime=" + modificationTime + + ", replicationConfig=" + replicationConfig + + ", encInfo=" + (encInfo == null ? "null" : "") + + ", fileChecksum=" + fileChecksum + + ", isFile=" + isFile + + ", fileName='" + fileName + '\'' + + ", acls=" + acls + + '}'; + } + /** * Builder of OmKeyInfo. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java index 931657e8e7f..e934ef1b220 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java @@ -80,6 +80,13 @@ public boolean isMultipartKey() { return isMultipartKey; } + /** + * @return Raw internal locationVersionMap. + */ + public Map> getLocationVersionMap() { + return locationVersionMap; + } + /** * Return only the blocks that are created in the most recent version. * @@ -182,7 +189,12 @@ public String toString() { sb.append("isMultipartKey:").append(isMultipartKey).append(" "); for (List kliList : locationVersionMap.values()) { for (OmKeyLocationInfo kli: kliList) { - sb.append(kli.getLocalID()).append(" || "); + sb.append("conID ").append(kli.getContainerID()); + sb.append(" "); + sb.append("locID ").append(kli.getLocalID()); + sb.append(" "); + sb.append("bcsID ").append(kli.getBlockCommitSequenceId()); + sb.append(" || "); } } return sb.toString(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java index 48c8a6b5623..134675cdce8 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java @@ -180,6 +180,15 @@ public static boolean inheritDefaultAcls(List acls, return false; } + /** + * Helper function to convert the scope of ACLs to DEFAULT. + * This method is called in ACL inheritance scenarios. + * @param acls + */ + public static void toDefaultScope(List acls) { + acls.forEach(a -> a.setAclScope(DEFAULT)); + } + /** * Convert a list of OzoneAclInfo(protoc) to list of OzoneAcl(java). * @param protoAcls diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index 48c00cef35b..2ee5420a4cd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -125,6 +125,13 @@ public RepeatedKeyInfo getProto(boolean compact, int clientVersion) { return builder.build(); } + @Override + public String toString() { + return "RepeatedOmKeyInfo{" + + "omKeyInfoList=" + omKeyInfoList + + '}'; + } + /** * Builder of RepeatedOmKeyInfo. */ diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/printTopology.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/printTopology.robot new file mode 100644 index 00000000000..50cce5652bf --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/printTopology.robot @@ -0,0 +1,32 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone admin printTopology command +Library OperatingSystem +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes + + +*** Test Cases *** +Run printTopology + ${output} = Execute ozone admin printTopology + Should Match Regexp ${output} State = + +Run printTopology as JSON + ${output} = Execute ozone admin printTopology --json + ${keys} = Execute echo '${output}' | jq -r '.[0] | keys' + Should Contain ${output} ipAddress diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index 07a5cc3a48e..b313aa80fb5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -44,6 +44,8 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -54,10 +56,17 @@ import org.apache.hadoop.ozone.client.io.ECKeyOutputStream; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest; +import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestWithFSO; +import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; @@ -65,7 +74,9 @@ import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -122,6 +133,11 @@ public static void init() throws Exception { // create a volume and a bucket to be used by OzoneFileSystem bucket = TestDataUtil.createVolumeAndBucket(client, layout); + + // Enable DEBUG level logging for relevant classes + GenericTestUtils.setLogLevel(OMKeyRequest.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(OMKeyCommitRequest.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(OMKeyCommitRequestWithFSO.LOG, Level.DEBUG); } @AfterAll @@ -132,6 +148,52 @@ public static void teardown() { } } + @Test + public void testKeyHSyncThenClose() throws Exception { + // Check that deletedTable should not have keys with the same block as in + // keyTable's when a key is hsync()'ed then close()'d. + + // Set the fs.defaultFS + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY)); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + + String data = "random data"; + final Path file = new Path(dir, "file-hsync-then-close"); + try (FileSystem fs = FileSystem.get(CONF)) { + try (FSDataOutputStream outputStream = fs.create(file, true)) { + outputStream.write(data.getBytes(UTF_8), 0, data.length()); + outputStream.hsync(); + } + } + + OzoneManager ozoneManager = cluster.getOzoneManager(); + // Wait for double buffer to trigger all pending addToDBBatch(), + // including OMKeyCommitResponse(WithFSO)'s that writes to deletedTable. + ozoneManager.awaitDoubleBufferFlush(); + + OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); + // deletedTable should not have an entry for file at all in this case + try (TableIterator> + tableIter = metadataManager.getDeletedTable().iterator()) { + while (tableIter.hasNext()) { + Table.KeyValue kv = tableIter.next(); + String key = kv.getKey(); + if (key.startsWith(file.toString())) { + RepeatedOmKeyInfo val = kv.getValue(); + LOG.error("Unexpected deletedTable entry: key = {}, val = {}", + key, val); + Assertions.fail("deletedTable should not have such entry. key = " + + key); + } + } + } + } + @Test public void testO3fsHSync() throws Exception { // Set the fs.defaultFS diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 5ce2f8a1ed0..f89aa85d15d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -406,6 +406,26 @@ public void testMultipartUploadWithMissingParts() throws Exception { () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); } + @Test + public void testMultipartPartNumberExceedingAllowedRange() throws Exception { + String uploadID = initiateMultipartUpload(bucket, keyName, + RATIS, ONE); + byte[] data = "data".getBytes(UTF_8); + + // Multipart part number must be an integer between 1 and 10000. So the + // part number 1, 5000, 10000 will succeed, + // the part number 0, 10001 will fail. + bucket.createMultipartKey(keyName, data.length, 1, uploadID); + bucket.createMultipartKey(keyName, data.length, 5000, uploadID); + bucket.createMultipartKey(keyName, data.length, 10000, uploadID); + OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART, + () -> bucket.createMultipartKey( + keyName, data.length, 0, uploadID)); + OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART, + () -> bucket.createMultipartKey( + keyName, data.length, 10001, uploadID)); + } + @Test public void testCommitPartAfterCompleteUpload() throws Exception { String parentDir = "a/b/c/d/"; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 8cf2f9cea9b..4bd07fbcad5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -2972,6 +2972,35 @@ public void testMultipartUploadWithMissingParts() throws Exception { () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); } + @Test + public void testMultipartPartNumberExceedingAllowedRange() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + String sampleData = "sample Value"; + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName); + assertNotNull(multipartInfo); + String uploadID = multipartInfo.getUploadID(); + + // Multipart part number must be an integer between 1 and 10000. So the + // part number 1, 5000, 10000 will succeed, + // the part number 0, 10001 will fail. + bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); + bucket.createMultipartKey(keyName, sampleData.length(), 5000, uploadID); + bucket.createMultipartKey(keyName, sampleData.length(), 10000, uploadID); + OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, () -> + bucket.createMultipartKey( + keyName, sampleData.length(), 0, uploadID)); + OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, () -> + bucket.createMultipartKey( + keyName, sampleData.length(), 10001, uploadID)); + } + @Test public void testAbortUploadFail() throws Exception { String volumeName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java new file mode 100644 index 00000000000..344623b7603 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java @@ -0,0 +1,341 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.rules.Timeout; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.ArrayList; +import java.util.Optional; +import java.util.UUID; +import java.util.stream.Stream; + +import static com.google.common.collect.Lists.newLinkedList; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.junit.jupiter.params.provider.Arguments.of; + +/** + * Test covers listKeys(keyPrefix, startKey, shallow) combinations + * in a legacy/OBS bucket layout type. + */ +public class TestListKeys { + + private static MiniOzoneCluster cluster = null; + + private static OzoneConfiguration conf; + private static String clusterId; + private static String scmId; + private static String omId; + + private static OzoneBucket legacyOzoneBucket; + private static OzoneClient client; + + @Rule + public Timeout timeout = new Timeout(1200000); + + /** + * Create a MiniDFSCluster for testing. + *

+ * + * @throws IOException + */ + @BeforeAll + public static void init() throws Exception { + conf = new OzoneConfiguration(); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + clusterId = UUID.randomUUID().toString(); + scmId = UUID.randomUUID().toString(); + omId = UUID.randomUUID().toString(); + // Set the number of keys to be processed during batch operate. + conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3); + conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3); + cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) + .setScmId(scmId).setOmId(omId).build(); + cluster.waitForClusterToBeReady(); + client = cluster.newClient(); + + // create a volume and a LEGACY bucket + legacyOzoneBucket = TestDataUtil + .createVolumeAndBucket(client, BucketLayout.LEGACY); + + initFSNameSpace(); + } + + @AfterAll + public static void teardownClass() { + IOUtils.closeQuietly(client); + if (cluster != null) { + cluster.shutdown(); + } + } + + private static void initFSNameSpace() throws Exception { + buildNameSpaceTree(legacyOzoneBucket); + } + + /** + * Verify listKeys at different levels. + * + * buck-1 + * | + * a1 + * | + * ----------------------------------- + * | | | + * b1 b2 b3 + * ------- --------- ----------- + * | | | | | | | | + * c1 c2 d1 d2 d3 e1 e2 e3 + * | | | | | | | | + * c1.tx c2.tx d11.tx | d31.tx | | e31.tx + * --------- | e21.tx + * | | | + * d21.tx d22.tx e11.tx + * + * Above is the key namespace tree structure. + */ + private static void buildNameSpaceTree(OzoneBucket ozoneBucket) + throws Exception { + LinkedList keys = new LinkedList<>(); + keys.add("/a1/b1/c1111.tx"); + keys.add("/a1/b1/c1222.tx"); + keys.add("/a1/b1/c1333.tx"); + keys.add("/a1/b1/c1444.tx"); + keys.add("/a1/b1/c1555.tx"); + keys.add("/a1/b1/c1/c1.tx"); + keys.add("/a1/b1/c12/c2.tx"); + keys.add("/a1/b1/c12/c3.tx"); + + keys.add("/a1/b2/d1/d11.tx"); + keys.add("/a1/b2/d2/d21.tx"); + keys.add("/a1/b2/d2/d22.tx"); + keys.add("/a1/b2/d3/d31.tx"); + + keys.add("/a1/b3/e1/e11.tx"); + keys.add("/a1/b3/e2/e21.tx"); + keys.add("/a1/b3/e3/e31.tx"); + + createKeys(ozoneBucket, keys); + } + + private static Stream shallowListDataWithTrailingSlash() { + return Stream.of( + + // Case-1: StartKey is less than prefixKey, return emptyList. + of("a1/b2/", "a1", newLinkedList(Collections.emptyList())), + + // Case-2: StartKey is empty, return all immediate node. + of("a1/b2/", "", newLinkedList(Arrays.asList( + "a1/b2/", + "a1/b2/d1/", + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-3: StartKey is same as prefixKey, return all immediate nodes. + of("a1/b2/", "a1/b2", newLinkedList(Arrays.asList( + "a1/b2/", + "a1/b2/d1/", + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-4: StartKey is greater than prefixKey + of("a1/b2/", "a1/b2/d2/d21.tx", newLinkedList(Arrays.asList( + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-5: StartKey reaches last element, return emptyList + of("a1/b2/", "a1/b2/d3/d31.tx", newLinkedList( + Collections.emptyList() + )), + + // Case-6: Mix result + of("a1/b1/", "a1/b1/c12", newLinkedList(Arrays.asList( + "a1/b1/c12/", + "a1/b1/c1222.tx", + "a1/b1/c1333.tx", + "a1/b1/c1444.tx", + "a1/b1/c1555.tx" + ))) + ); + } + + private static Stream shallowListDataWithoutTrailingSlash() { + return Stream.of( + + // Case-1: StartKey is less than prefixKey, return emptyList. + of("a1/b2", "a1", newLinkedList(Collections.emptyList())), + + // Case-2: StartKey is empty, return all immediate node. + of("a1/b2", "", newLinkedList(Arrays.asList( + "a1/b2/" + ))), + + // Case-3: StartKey is same as prefixKey. + of("a1/b2", "a1/b2", newLinkedList(Arrays.asList( + "a1/b2/" + ))), + + // Case-4: StartKey is greater than prefixKey, return immediate + // nodes which after startKey. + of("a1/b2", "a1/b2/d2/d21.tx", newLinkedList(Arrays.asList( + "a1/b2/" + ))), + + // Case-5: StartKey reaches last element, return emptyList + of("a1/b2", "a1/b2/d3/d31.tx", newLinkedList( + Collections.emptyList() + )), + + // Case-6: StartKey is invalid (less than last element) + of("a1/b1/c1", "a1/b1/c1/c0invalid", newLinkedList(Arrays.asList( + "a1/b1/c1/", + "a1/b1/c1111.tx", + "a1/b1/c12/", + "a1/b1/c1222.tx", + "a1/b1/c1333.tx", + "a1/b1/c1444.tx", + "a1/b1/c1555.tx" + ))), + + // Case-7: StartKey reaches last element + of("a1/b1/c1", "a1/b1/c1/c2.tx", newLinkedList(Arrays.asList( + "a1/b1/c1111.tx", + "a1/b1/c12/", + "a1/b1/c1222.tx", + "a1/b1/c1333.tx", + "a1/b1/c1444.tx", + "a1/b1/c1555.tx" + ))), + + // Case-8: StartKey is invalid (greater than last element) + of("a1/b1/c1", "a1/b1/c1/c2invalid", newLinkedList(Arrays.asList( + "a1/b1/c1111.tx", + "a1/b1/c12/", + "a1/b1/c1222.tx", + "a1/b1/c1333.tx", + "a1/b1/c1444.tx", + "a1/b1/c1555.tx" + ))), + + // Case-9: + of("a1/b1/c12", "", newLinkedList(Arrays.asList( + "a1/b1/c12/", + "a1/b1/c1222.tx" + ))) + + ); + } + + @ParameterizedTest + @MethodSource("shallowListDataWithTrailingSlash") + public void testShallowListKeysWithPrefixTrailingSlash(String keyPrefix, + String startKey, List expectedKeys) throws Exception { + checkKeyShallowList(keyPrefix, startKey, expectedKeys, legacyOzoneBucket); + } + + @ParameterizedTest + @MethodSource("shallowListDataWithoutTrailingSlash") + public void testShallowListKeysWithoutPrefixTrailingSlash(String keyPrefix, + String startKey, List expectedKeys) throws Exception { + checkKeyShallowList(keyPrefix, startKey, expectedKeys, legacyOzoneBucket); + } + + private void checkKeyShallowList(String keyPrefix, String startKey, + List keys, OzoneBucket bucket) + throws Exception { + + Iterator ozoneKeyIterator = + bucket.listKeys(keyPrefix, startKey, true); + ReplicationConfig expectedReplication = + Optional.ofNullable(bucket.getReplicationConfig()) + .orElse(cluster.getOzoneManager().getDefaultReplicationConfig()); + + List keyLists = new ArrayList<>(); + while (ozoneKeyIterator.hasNext()) { + OzoneKey ozoneKey = ozoneKeyIterator.next(); + Assert.assertEquals(expectedReplication, ozoneKey.getReplicationConfig()); + keyLists.add(ozoneKey.getName()); + } + LinkedList outputKeysList = new LinkedList(keyLists); + System.out.println("BEGIN:::keyPrefix---> " + keyPrefix + ":::---> " + + startKey); + for (String key : keys) { + System.out.println(" " + key); + } + System.out.println("END:::keyPrefix---> " + keyPrefix + ":::---> " + + startKey); + Assert.assertEquals(keys, outputKeysList); + } + + private static void createKeys(OzoneBucket ozoneBucket, List keys) + throws Exception { + int length = 10; + byte[] input = new byte[length]; + Arrays.fill(input, (byte) 96); + for (String key : keys) { + createKey(ozoneBucket, key, 10, input); + } + } + + private static void createKey(OzoneBucket ozoneBucket, String key, int length, + byte[] input) throws Exception { + + OzoneOutputStream ozoneOutputStream = + ozoneBucket.createKey(key, length); + + ozoneOutputStream.write(input); + ozoneOutputStream.write(input, 0, 10); + ozoneOutputStream.close(); + + // Read the key with given key name. + OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key); + byte[] read = new byte[length]; + ozoneInputStream.read(read, 0, length); + ozoneInputStream.close(); + + Assert.assertEquals(new String(input, StandardCharsets.UTF_8), + new String(read, StandardCharsets.UTF_8)); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java index 5b6edace4df..3902e0b6ac0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java @@ -263,6 +263,11 @@ public void testListKeysWithValidStartKey() throws Exception { expectedKeys = getExpectedKeyList("a1", "a1/b3/e3/e31.tx", legacyOzoneBucket); checkKeyList("a1", "a1/b3/e3/e31.tx", expectedKeys, fsoOzoneBucket); + + // case-10: keyPrefix corresponds an exist file + expectedKeys = + getExpectedKeyList("a1/b3/e3/e31.tx", "", legacyOzoneBucket); + checkKeyList("a1/b3/e3/e31.tx", "", expectedKeys, fsoOzoneBucket); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index 2fe3365093d..7f375cb67e6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -31,6 +31,9 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.client.BucketArgs; @@ -42,13 +45,20 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; +import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import org.apache.ratis.server.protocol.TermIndex; @@ -56,6 +66,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.api.Timeout; @@ -65,6 +76,7 @@ import org.slf4j.Logger; import org.slf4j.event.Level; +import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; @@ -84,16 +96,26 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; +import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.TestOzoneManagerHAWithData.createKey; +import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -143,6 +165,20 @@ public void init(TestInfo testInfo) throws Exception { StorageUnit.KB); conf.setStorageSize(OMConfigKeys. OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, 16, StorageUnit.KB); + if ("testSnapshotBackgroundServices".equals(testInfo.getDisplayName())) { + conf.setTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, + 5, TimeUnit.SECONDS); + conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 5, + TimeUnit.SECONDS); + conf.setTimeDuration(OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED, + 1, TimeUnit.MILLISECONDS); + conf.setTimeDuration( + OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL, + 60, TimeUnit.SECONDS); + conf.setTimeDuration( + OZONE_BLOCK_DELETING_SERVICE_INTERVAL, + 30, TimeUnit.SECONDS); + } long snapshotThreshold = SNAPSHOT_THRESHOLD; // TODO: refactor tests to run under a new class with different configs. if (testInfo.getTestMethod().isPresent() && @@ -1036,6 +1072,370 @@ public void testInstallCorruptedCheckpointFailure() throws Exception { assertLogCapture(logCapture, msg); } + /** + * Goal of this test is to check whether background services work after + * leadership transfer. + * Services tested: + * -- SST filtering + * -- key deletion + * -- snapshot deletion + * -- compaction backup pruning + * On top of that there are some simple tests to confirm system integrity. + */ + @Test + @DisplayName("testSnapshotBackgroundServices") + @SuppressWarnings("methodlength") + public void testSnapshotBackgroundServices() + throws Exception { + // Get the leader OM + String leaderOMNodeId = OmFailoverProxyUtil + .getFailoverProxyProvider(objectStore.getClientProxy()) + .getCurrentProxyOMNodeId(); + OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId); + + // Find the inactive OM + String followerNodeId = leaderOM.getPeerNodes().get(0).getNodeId(); + if (cluster.isOMActive(followerNodeId)) { + followerNodeId = leaderOM.getPeerNodes().get(1).getNodeId(); + } + OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); + + // Create some snapshots, each with new keys + int keyIncrement = 10; + String snapshotNamePrefix = "snapshot"; + for (int snapshotCount = 0; snapshotCount < 10; + snapshotCount++) { + String snapshotName = snapshotNamePrefix + snapshotCount; + writeKeys(keyIncrement); + createOzoneSnapshot(leaderOM, snapshotName); + } + + // Get the latest db checkpoint from the leader OM. + TransactionInfo transactionInfo = + TransactionInfo.readTransactionInfo(leaderOM.getMetadataManager()); + TermIndex leaderOMTermIndex = + TermIndex.valueOf(transactionInfo.getTerm(), + transactionInfo.getTransactionIndex()); + long leaderOMSnapshotIndex = leaderOMTermIndex.getIndex(); + + // Start the inactive OM. Checkpoint installation will happen spontaneously. + cluster.startInactiveOM(followerNodeId); + + // The recently started OM should be lagging behind the leader OM. + // Wait & for follower to update transactions to leader snapshot index. + // Timeout error if follower does not load update within 10s + GenericTestUtils.waitFor(() -> + followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex() + >= leaderOMSnapshotIndex - 1, 100, 10000); + + // Verify RPC server is running + GenericTestUtils.waitFor(followerOM::isOmRpcServerRunning, 100, 5000); + + // Read & Write after snapshot installed. + List newKeys = writeKeys(1); + readKeys(newKeys); + + OzoneManager newLeaderOM = + getNewLeader(leaderOM, followerNodeId, followerOM); + OzoneManager newFollowerOM = + cluster.getOzoneManager(leaderOM.getOMNodeId()); + Assertions.assertEquals(leaderOM, newFollowerOM); + readKeys(newKeys); + + // Prepare baseline data for compaction logs + String currentCompactionLogPath = newLeaderOM + .getMetadataManager() + .getStore() + .getRocksDBCheckpointDiffer() + .getCurrentCompactionLogPath(); + Assertions.assertNotNull(currentCompactionLogPath); + int lastIndex = currentCompactionLogPath.lastIndexOf(OM_KEY_PREFIX); + String compactionLogsPath = currentCompactionLogPath + .substring(0, lastIndex); + File compactionLogsDir = new File(compactionLogsPath); + Assertions.assertNotNull(compactionLogsDir); + int numberOfLogFiles = compactionLogsDir.listFiles().length; + long contentLength; + Path currentCompactionLog = Paths.get(currentCompactionLogPath); + try (BufferedReader bufferedReader = + Files.newBufferedReader(currentCompactionLog)) { + contentLength = bufferedReader.lines() + .mapToLong(String::length) + .reduce(0L, Long::sum); + } + + SnapshotInfo newSnapshot = + getSnapshotProcessedBySfs(snapshotNamePrefix, newLeaderOM); + + /* + Check whether newly created key data is reclaimed + create key a + create snapshot b + delete key a + create snapshot c + assert that a is in c's deleted table + create snapshot d + delete snapshot c + wait until key a appears in deleted table of d. + */ + // create key a + String keyNameA = writeKeys(1).get(0); + String keyA = OM_KEY_PREFIX + ozoneBucket.getVolumeName() + + OM_KEY_PREFIX + ozoneBucket.getName() + + OM_KEY_PREFIX + keyNameA; + Table omKeyInfoTable = newLeaderOM + .getMetadataManager() + .getKeyTable(ozoneBucket.getBucketLayout()); + OmKeyInfo keyInfoA = omKeyInfoTable.get(keyA); + Assertions.assertNotNull(keyInfoA); + + // create snapshot b + SnapshotInfo snapshotInfoB = createOzoneSnapshot(newLeaderOM, + snapshotNamePrefix + RandomStringUtils.randomNumeric(5)); + Assertions.assertNotNull(snapshotInfoB); + + // delete key a + ozoneBucket.deleteKey(keyNameA); + + GenericTestUtils.waitFor(() -> { + try { + return Objects.isNull(omKeyInfoTable.get(keyA)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 10000); + + // create snapshot c + SnapshotInfo snapshotInfoC = createOzoneSnapshot(newLeaderOM, + snapshotNamePrefix + RandomStringUtils.randomNumeric(5)); + + // get snapshot c + OmSnapshot snapC; + try (ReferenceCounted rcC = newLeaderOM + .getOmSnapshotManager() + .checkForSnapshot(volumeName, bucketName, + getSnapshotPrefix(snapshotInfoC.getName()), true)) { + Assertions.assertNotNull(rcC); + snapC = (OmSnapshot) rcC.get(); + } + + // assert that key a is in snapshot c's deleted table + GenericTestUtils.waitFor(() -> { + try (TableIterator> iterator = + snapC.getMetadataManager().getDeletedTable().iterator()) { + while (iterator.hasNext()) { + if (iterator.next().getKey().contains(keyA)) { + return true; + } + } + + return false; + } catch (IOException e) { + Assertions.fail(); + return false; + } + }, 1000, 10000); + + // create snapshot d + SnapshotInfo snapshotInfoD = createOzoneSnapshot(newLeaderOM, + snapshotNamePrefix + RandomStringUtils.randomNumeric(5)); + + File sstBackupDir = getSstBackupDir(newLeaderOM); + int numberOfSstFiles = sstBackupDir.listFiles().length; + + // delete snapshot c + client.getObjectStore() + .deleteSnapshot(volumeName, bucketName, snapshotInfoC.getName()); + + GenericTestUtils.waitFor(() -> { + Table snapshotInfoTable = + newLeaderOM.getMetadataManager().getSnapshotInfoTable(); + try { + return null == snapshotInfoTable.get(snapshotInfoC.getTableKey()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 60000); + + // get snapshot d + OmSnapshot snapD; + try (ReferenceCounted rcD = newLeaderOM + .getOmSnapshotManager() + .checkForSnapshot(volumeName, bucketName, + getSnapshotPrefix(snapshotInfoD.getName()), true)) { + Assertions.assertNotNull(rcD); + snapD = (OmSnapshot) rcD.get(); + } + + // wait until key a appears in deleted table of snapshot d + GenericTestUtils.waitFor(() -> { + try (TableIterator> iterator = + snapD.getMetadataManager().getDeletedTable().iterator()) { + while (iterator.hasNext()) { + Table.KeyValue next = iterator.next(); + if (next.getKey().contains(keyA)) { + return true; + } + } + + return false; + } catch (IOException e) { + Assertions.fail(); + return false; + } + }, 1000, 120000); + + // Confirm entry for deleted snapshot removed from info table + client.getObjectStore() + .deleteSnapshot(volumeName, bucketName, newSnapshot.getName()); + GenericTestUtils.waitFor(() -> { + Table snapshotInfoTable = + newLeaderOM.getMetadataManager().getSnapshotInfoTable(); + try { + return null == snapshotInfoTable.get(newSnapshot.getTableKey()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 10000); + + checkIfCompactionLogsGetAppendedByForcingCompaction(newLeaderOM, + compactionLogsDir, numberOfLogFiles, contentLength, + currentCompactionLog); + + checkIfCompactionBackupFilesWerePruned(sstBackupDir, numberOfSstFiles); + + confirmSnapDiffForTwoSnapshotsDifferingBySingleKey(snapshotNamePrefix, + newLeaderOM); + } + + private void confirmSnapDiffForTwoSnapshotsDifferingBySingleKey( + String snapshotNamePrefix, OzoneManager ozoneManager) + throws IOException, InterruptedException, TimeoutException { + String firstSnapshot = createOzoneSnapshot(ozoneManager, + snapshotNamePrefix + RandomStringUtils.randomNumeric(10)).getName(); + String diffKey = writeKeys(1).get(0); + String secondSnapshot = createOzoneSnapshot(ozoneManager, + snapshotNamePrefix + RandomStringUtils.randomNumeric(10)).getName(); + SnapshotDiffReportOzone diff = getSnapDiffReport(volumeName, bucketName, + firstSnapshot, secondSnapshot); + Assertions.assertEquals(Collections.singletonList( + SnapshotDiffReportOzone.getDiffReportEntry( + SnapshotDiffReport.DiffType.CREATE, diffKey, null)), + diff.getDiffList()); + } + + private static void checkIfCompactionBackupFilesWerePruned(File sstBackupDir, + int numberOfSstFiles) throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor(() -> { + int newNumberOfSstFiles = sstBackupDir.listFiles().length; + return numberOfSstFiles > newNumberOfSstFiles; + }, 1000, 120000); + } + + private static void checkIfCompactionLogsGetAppendedByForcingCompaction( + OzoneManager ozoneManager, + File compactionLogsDir, int numberOfLogFiles, + long contentLength, Path currentCompactionLog) + throws IOException { + ozoneManager.getMetadataManager() + .getStore() + .compactDB(); + int newNumberOfLogFiles = compactionLogsDir.listFiles().length; + long newContentLength; + try (BufferedReader bufferedReader = + Files.newBufferedReader(currentCompactionLog)) { + newContentLength = bufferedReader.lines() + .mapToLong(String::length) + .reduce(0L, Long::sum); + } + Assertions.assertTrue(numberOfLogFiles < newNumberOfLogFiles + || contentLength < newContentLength); + } + + private static File getSstBackupDir(OzoneManager ozoneManager) { + String sstBackupDirPath = ozoneManager + .getMetadataManager() + .getStore() + .getRocksDBCheckpointDiffer() + .getSSTBackupDir(); + Assertions.assertNotNull(sstBackupDirPath); + File sstBackupDir = new File(sstBackupDirPath); + Assertions.assertNotNull(sstBackupDir); + return sstBackupDir; + } + + private SnapshotInfo getSnapshotProcessedBySfs(String snapshotNamePrefix, + OzoneManager ozoneManager) + throws IOException, TimeoutException, InterruptedException { + writeKeys(1); + SnapshotInfo newSnapshot = createOzoneSnapshot(ozoneManager, + snapshotNamePrefix + RandomStringUtils.randomNumeric(5)); + Assertions.assertNotNull(newSnapshot); + Table snapshotInfoTable = + ozoneManager.getMetadataManager().getSnapshotInfoTable(); + GenericTestUtils.waitFor(() -> { + SnapshotInfo snapshotInfo = null; + try { + snapshotInfo = snapshotInfoTable.get(newSnapshot.getTableKey()); + } catch (IOException e) { + Assertions.fail(); + } + return snapshotInfo.isSstFiltered(); + }, 1000, 30000); + return newSnapshot; + } + + private OzoneManager getNewLeader(OzoneManager leaderOM, + String followerNodeId, + OzoneManager followerOM) + throws IOException, TimeoutException, InterruptedException { + verifyLeadershipTransfer(leaderOM, followerNodeId, followerOM); + OzoneManager newLeaderOM = cluster.getOMLeader(); + Assertions.assertEquals(followerOM, newLeaderOM); + return newLeaderOM; + } + + private static void verifyLeadershipTransfer(OzoneManager leaderOM, + String followerNodeId, + OzoneManager followerOM) + throws IOException, TimeoutException, InterruptedException { + leaderOM.transferLeadership(followerNodeId); + + GenericTestUtils.waitFor(() -> { + try { + followerOM.checkLeaderStatus(); + return true; + } catch (OMNotLeaderException | OMLeaderNotReadyException e) { + return false; + } + }, 100, 10000); + } + + private SnapshotDiffReportOzone getSnapDiffReport(String volume, + String bucket, + String fromSnapshot, + String toSnapshot) + throws InterruptedException, TimeoutException { + AtomicReference response = new AtomicReference<>(); + AtomicLong responseInMillis = new AtomicLong(100L); + GenericTestUtils.waitFor(() -> { + try { + response.set(client.getObjectStore() + .snapshotDiff( + volume, bucket, fromSnapshot, toSnapshot, null, 0, false, + false)); + responseInMillis.set(response.get().getWaitTimeInMs()); + return response.get().getJobStatus() == DONE; + } catch (IOException e) { + throw new RuntimeException(e); + } + }, responseInMillis.intValue(), 10000); + + return response.get().getSnapshotDiffReport(); + } + private SnapshotInfo createOzoneSnapshot(OzoneManager leaderOM, String name) throws IOException { objectStore.createSnapshot(volumeName, bucketName, name); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java index 2da98434bbc..55ffca602a1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java @@ -375,7 +375,7 @@ public void testLookupKey() throws Exception { * | * a * | - * ----------------------------------- + * -------------------------------------- * | | | * b1 b2 b3 * ----- -------- ---------- @@ -454,12 +454,17 @@ public void testListKeysAtDifferentLevels() throws Exception { checkKeyList(ozoneKeyIterator, expectedKeys); // Intermediate level keyPrefix - 3rd level + // Without trailing slash ozoneKeyIterator = ozoneBucket.listKeys("a/b2/d1", null); expectedKeys = new LinkedList<>(); expectedKeys.add("a/b2/d1/"); expectedKeys.add("a/b2/d1/d11.tx"); checkKeyList(ozoneKeyIterator, expectedKeys); + // With trailing slash + ozoneKeyIterator = + ozoneBucket.listKeys("a/b2/d1/", null); + checkKeyList(ozoneKeyIterator, expectedKeys); // Boundary of a level ozoneKeyIterator = @@ -473,6 +478,21 @@ public void testListKeysAtDifferentLevels() throws Exception { ozoneBucket.listKeys("a/b3/e3", "a/b3/e3/e31.tx"); expectedKeys = new LinkedList<>(); checkKeyList(ozoneKeyIterator, expectedKeys); + + // Key level, prefix=key case + ozoneKeyIterator = + ozoneBucket.listKeys("a/b1/c1/c1.tx"); + expectedKeys = new LinkedList<>(); + expectedKeys.add("a/b1/c1/c1.tx"); + checkKeyList(ozoneKeyIterator, expectedKeys); + + // Key directly under bucket + createTestKey(ozoneBucket, "key1.tx", "key1"); + ozoneKeyIterator = + ozoneBucket.listKeys("key1.tx"); + expectedKeys = new LinkedList<>(); + expectedKeys.add("key1.tx"); + checkKeyList(ozoneKeyIterator, expectedKeys); } private void verifyFullTreeStructure(Iterator keyItr) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java index 36f5eb9c040..0d3549b342c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java @@ -42,6 +42,7 @@ import org.junit.jupiter.api.Timeout; import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -86,13 +87,10 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { /* r = READ, w = WRITE, c = CREATE, d = DELETE l = LIST, a = ALL, n = NONE, x = READ_ACL, y = WRITE_ACL */ String aclWorldAll = "world::a"; - List keys = new ArrayList<>(); // Create volumes with user1 - try (OzoneClient client = cluster.newClient()) { ObjectStore objectStore = client.getObjectStore(); - createVolumeWithOwnerAndAcl(objectStore, "volume1", "user1", aclWorldAll); } @@ -100,9 +98,7 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { UserGroupInformation.setLoginUser(user1); try (OzoneClient client = cluster.newClient()) { ObjectStore objectStore = client.getObjectStore(); - OzoneVolume volume = objectStore.getVolume("volume1"); - BucketArgs omBucketArgs = BucketArgs.newBuilder().setStorageType(StorageType.DISK).build(); @@ -134,7 +130,7 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { * Try deleting b2 * * Test case 2: - * Remove delete acl fro dir c2 + * Remove delete acl from dir c2 * Try deleting b1 * * Test case 3 @@ -147,7 +143,6 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { String keyf4 = "a/b2/d2/d21/f4"; String keyf5 = "/a/b3/e1/f5"; String keyf6 = "/a/b3/e2/f6"; - String file1 = "a/" + "file" + RandomStringUtils.randomNumeric(5); String file2 = "a/b2/d2/" + "file" + RandomStringUtils.randomNumeric(5); @@ -159,7 +154,6 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { keys.add(keyf6); keys.add(file1); keys.add(file2); - createKeys(objectStore, ozoneBucket, keys); // Test case 1 @@ -184,7 +178,6 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { assertEquals(OMException.ResultCodes.PERMISSION_DENIED, ome.getResult(), "Permission check failed"); } - // perform rename try { ozoneBucket.renameKey("a/b2", "a/b2_renamed"); @@ -198,9 +191,14 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { // Test case 2 // Remove acl from directory c2, delete/rename a/b1 should throw // permission denied since c2 is a subdirectory - - UserGroupInformation.setLoginUser(user1); - removeAclsFromKey(objectStore, ozoneBucket, "a/b1/c2"); + user1.doAs((PrivilegedExceptionAction) () -> { + try (OzoneClient c = cluster.newClient()) { + ObjectStore o = c.getObjectStore(); + OzoneBucket b = o.getVolume("volume1").getBucket("bucket1"); + removeAclsFromKey(o, b, "a/b1/c2"); + } + return null; + }); UserGroupInformation.setLoginUser(user2); // perform delete @@ -224,9 +222,15 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { } // Test case 3 - // delete b3 and this shouldn't throw exception because acls have not - // been removed from subpaths. - ozoneBucket.deleteDirectory("a/b3", true); + // delete b3 and this should throw exception because user2 has no acls + try { + ozoneBucket.deleteDirectory("a/b3", true); + fail("Should throw permission denied !"); + } catch (OMException ome) { + // expect permission error + assertEquals(OMException.ResultCodes.PERMISSION_DENIED, + ome.getResult(), "Permission check failed"); + } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index df202cb5076..ffb67ba38f5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -85,6 +85,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import org.junit.Assert; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -1660,4 +1661,64 @@ public void testRecursiveVolumeDelete() () -> client.getObjectStore().getVolume(volume1)); assertEquals(VOLUME_NOT_FOUND, omExecution.getResult()); } + + @Test + public void testLinkedAndNonLinkedBucketMetaData() + throws Exception { + String volumeName = "volume1"; + // Create volume volume1 + String[] args = new String[] { + "volume", "create", "o3://" + omServiceId + + OZONE_URI_DELIMITER + volumeName}; + execute(ozoneShell, args); + out.reset(); + + // Create bucket bucket1 + args = new String[] {"bucket", "create", "o3://" + omServiceId + + OZONE_URI_DELIMITER + volumeName + "/bucket1"}; + execute(ozoneShell, args); + out.reset(); + + // ozone sh bucket list + out.reset(); + execute(ozoneShell, new String[] {"bucket", "list", "/volume1"}); + + // Expect valid JSON array + final ArrayList> bucketListOut = + parseOutputIntoArrayList(); + + Assert.assertTrue(bucketListOut.size() == 1); + boolean link = + String.valueOf(bucketListOut.get(0).get("link")).equals("false"); + assertTrue(link); + + // Create linked bucket under volume1 + out.reset(); + execute(ozoneShell, new String[]{"bucket", "link", "/volume1/bucket1", + "/volume1/link-to-bucket1"}); + + // ozone sh bucket list under volume1 and this should give both linked + // and non-linked buckets + out.reset(); + execute(ozoneShell, new String[] {"bucket", "list", "/volume1"}); + + // Expect valid JSON array + final ArrayList> bucketListLinked = + parseOutputIntoArrayList(); + + Assert.assertTrue(bucketListLinked.size() == 2); + link = String.valueOf(bucketListLinked.get(1).get("link")).equals("true"); + assertTrue(link); + + // Clean up + out.reset(); + execute(ozoneShell, new String[] {"bucket", "delete", "/volume1/bucket1"}); + out.reset(); + execute(ozoneShell, + new String[]{"bucket", "delete", "/volume1/link-to-bucket1"}); + out.reset(); + execute(ozoneShell, + new String[]{"volume", "delete", "/volume1"}); + out.reset(); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index f5bd2ad602b..8c0950d6df8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -4709,4 +4709,17 @@ public ReplicationConfigValidator getReplicationConfigValidator() { public ReconfigurationHandler getReconfigurationHandler() { return reconfigurationHandler; } + + /** + * Wait until both buffers are flushed. This is used in cases like + * "follower bootstrap tarball creation" where the rocksDb for the active + * fs needs to synchronized with the rocksdb's for the snapshots. + */ + public void awaitDoubleBufferFlush() throws InterruptedException { + if (isRatisEnabled()) { + getOmRatisServer().getOmStateMachine().awaitDoubleBufferFlush(); + } else { + getOmServerProtocol().awaitDoubleBufferFlush(); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index ab4e99470ed..2a1cca4e1df 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -678,7 +678,7 @@ void resume() { isRunning.set(true); } - void awaitFlush() throws InterruptedException { + public void awaitFlush() throws InterruptedException { flushNotifier.await(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 73f30c80aa3..44f45bbdb7e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -30,9 +30,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -200,18 +198,17 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omDirectoryResult == NONE) { List missingParents = omPathInfo.getMissingParents(); long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); - List inheritAcls = omPathInfo.getAcls(); + OmBucketInfo omBucketInfo = + getBucketInfo(omMetadataManager, volumeName, bucketName); dirKeyInfo = createDirectoryKeyInfoWithACL(keyName, keyArgs, baseObjId, - OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()), trxnLogIndex, + omBucketInfo, omPathInfo, trxnLogIndex, ozoneManager.getDefaultReplicationConfig()); missingParentInfos = getAllParentInfo(ozoneManager, keyArgs, - missingParents, inheritAcls, trxnLogIndex); + missingParents, omBucketInfo, omPathInfo, trxnLogIndex); numMissingParents = missingParentInfos.size(); - OmBucketInfo omBucketInfo = - getBucketInfo(omMetadataManager, volumeName, bucketName); checkBucketQuotaInNamespace(omBucketInfo, numMissingParents + 1L); omBucketInfo.incrUsedNamespace(numMissingParents + 1L); @@ -257,14 +254,16 @@ dirKeyInfo, missingParentInfos, result, getBucketLayout(), * @param ozoneManager * @param keyArgs * @param missingParents list of parent directories to be created - * @param inheritAcls ACLs to be assigned to each new parent dir + * @param bucketInfo + * @param omPathInfo * @param trxnLogIndex * @return * @throws IOException */ public static List getAllParentInfo(OzoneManager ozoneManager, - KeyArgs keyArgs, List missingParents, List inheritAcls, - long trxnLogIndex) throws IOException { + KeyArgs keyArgs, List missingParents, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, long trxnLogIndex) + throws IOException { OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); List missingParentInfos = new ArrayList<>(); @@ -290,10 +289,10 @@ public static List getAllParentInfo(OzoneManager ozoneManager, } LOG.debug("missing parent {} getting added to KeyTable", missingKey); - // what about keyArgs for parent directories? TODO + OmKeyInfo parentKeyInfo = createDirectoryKeyInfoWithACL(missingKey, keyArgs, nextObjId, - inheritAcls, trxnLogIndex, + bucketInfo, omPathInfo, trxnLogIndex, ozoneManager.getDefaultReplicationConfig()); objectCount++; @@ -349,15 +348,19 @@ private void logResult(CreateDirectoryRequest createDirectoryRequest, * @param keyName * @param keyArgs * @param objectId + * @param bucketInfo + * @param omPathInfo * @param transactionIndex * @param serverDefaultReplConfig * @return the OmKeyInfo structure */ public static OmKeyInfo createDirectoryKeyInfoWithACL(String keyName, - KeyArgs keyArgs, long objectId, List inheritAcls, - long transactionIndex, ReplicationConfig serverDefaultReplConfig) { + KeyArgs keyArgs, long objectId, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, long transactionIndex, + ReplicationConfig serverDefaultReplConfig) { return dirKeyInfoBuilderNoACL(keyName, keyArgs, objectId, - serverDefaultReplConfig).setAcls(inheritAcls) + serverDefaultReplConfig) + .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo)) .setUpdateID(transactionIndex).build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index 081efd42e78..61ada892d62 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.request.file; -import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -28,7 +27,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -151,10 +149,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH || omDirectoryResult == NONE) { + OmBucketInfo omBucketInfo = + getBucketInfo(omMetadataManager, volumeName, bucketName); // prepare all missing parents missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( - ozoneManager, keyArgs, omPathInfo, trxnLogIndex); + OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( + ozoneManager, keyArgs, omBucketInfo, omPathInfo, trxnLogIndex); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omMetadataManager @@ -162,8 +162,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // total number of keys created. numKeysCreated = missingParentInfos.size() + 1; - OmBucketInfo omBucketInfo = - getBucketInfo(omMetadataManager, volumeName, bucketName); checkBucketQuotaInNamespace(omBucketInfo, numKeysCreated); omBucketInfo.incrUsedNamespace(numKeysCreated); @@ -172,7 +170,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omPathInfo.getLeafNodeName(), keyArgs, omPathInfo.getLeafNodeObjectId(), omPathInfo.getLastKnownParentId(), trxnLogIndex, - OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); + omBucketInfo, omPathInfo); OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, volumeId, bucketId, trxnLogIndex, missingParentInfos, dirInfo); @@ -254,9 +252,9 @@ private void logResult(CreateDirectoryRequest createDirectoryRequest, * @throws IOException DB failure */ public static List getAllMissingParentDirInfo( - OzoneManager ozoneManager, KeyArgs keyArgs, - OMFileRequest.OMPathInfoWithFSO pathInfo, long trxnLogIndex) - throws IOException { + OzoneManager ozoneManager, KeyArgs keyArgs, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfoWithFSO pathInfo, long trxnLogIndex) + throws IOException { List missingParentInfos = new ArrayList<>(); // The base id is left shifted by 8 bits for creating space to @@ -273,7 +271,6 @@ public static List getAllMissingParentDirInfo( long lastKnownParentId = pathInfo.getLastKnownParentId(); List missingParents = pathInfo.getMissingParents(); - List inheritAcls = pathInfo.getAcls(); for (String missingKey : missingParents) { long nextObjId = baseObjId + objectCount; if (nextObjId > maxObjId) { @@ -286,7 +283,8 @@ public static List getAllMissingParentDirInfo( LOG.debug("missing parent {} getting added to DirectoryTable", missingKey); OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey, - keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, inheritAcls); + keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, + bucketInfo, pathInfo); objectCount++; missingParentInfos.add(dirInfo); @@ -301,28 +299,27 @@ public static List getAllMissingParentDirInfo( /** * Fill in a DirectoryInfo for a new directory entry in OM database. - * without initializing ACLs from the KeyArgs - used for intermediate - * directories which get created internally/recursively during file - * and directory create. * @param dirName * @param keyArgs * @param objectId * @param parentObjectId - * @param inheritAcls + * @param bucketInfo + * @param omPathInfo * @return the OmDirectoryInfo structure */ private static OmDirectoryInfo createDirectoryInfoWithACL( - String dirName, KeyArgs keyArgs, long objectId, - long parentObjectId, long transactionIndex, - List inheritAcls) { + String dirName, KeyArgs keyArgs, long objectId, + long parentObjectId, long transactionIndex, + OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo) { return OmDirectoryInfo.newBuilder() - .setName(dirName) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setObjectID(objectId) - .setUpdateID(transactionIndex) - .setParentObjectID(parentObjectId) - .setAcls(inheritAcls).build(); + .setName(dirName) + .setCreationTime(keyArgs.getModificationTime()) + .setModificationTime(keyArgs.getModificationTime()) + .setObjectID(objectId) + .setUpdateID(transactionIndex) + .setParentObjectID(parentObjectId) + .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo)) + .build(); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index d004dc22b70..f0bc1f5639c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -29,7 +29,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneConfigUtil; @@ -233,7 +232,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, bucketName, keyName, Paths.get(keyName)); OMFileRequest.OMDirectoryResult omDirectoryResult = pathInfo.getDirectoryResult(); - List inheritAcls = pathInfo.getAcls(); // Check if a file or directory exists with same key name. checkDirectoryResult(keyName, isOverWrite, omDirectoryResult); @@ -253,7 +251,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs, dbKeyInfo, keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), - ozoneManager.getPrefixManager(), omBucketInfo, trxnLogIndex, + ozoneManager.getPrefixManager(), omBucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), ozoneManager.isRatisEnabled(), repConfig); @@ -264,7 +262,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, missingParentInfos = OMDirectoryCreateRequest .getAllParentInfo(ozoneManager, keyArgs, - pathInfo.getMissingParents(), inheritAcls, trxnLogIndex); + pathInfo.getMissingParents(), omBucketInfo, + pathInfo, trxnLogIndex); // Append new blocks List newLocationList = keyArgs.getKeyLocationsList() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index 279d564b94a..792be15f27b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -156,17 +156,18 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, checkAllParentsExist(keyArgs, pathInfoFSO); } + // do open key + OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName)); // add all missing parents to dir table + missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( - ozoneManager, keyArgs, pathInfoFSO, trxnLogIndex); + OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( + ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); // total number of keys created. numKeysCreated = missingParentInfos.size(); - // do open key - OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, bucketName)); final ReplicationConfig repConfig = OzoneConfigUtil .resolveReplicationConfigPreference(keyArgs.getType(), keyArgs.getFactor(), keyArgs.getEcReplicationConfig(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 17565fec5d0..2e547b51eb6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -96,7 +96,9 @@ public static OMPathInfo verifyFilesInPath( String dirNameFromDetails = omMetadataManager.getOzoneDirKey(volumeName, bucketName, keyName); List missing = new ArrayList<>(); - List inheritAcls = new ArrayList<>(); + // Get parent all acls including ACCESS and DEFAULT acls + // The logic of specific inherited acl should be when creating dir/file + List acls = new ArrayList<>(); OMDirectoryResult result = OMDirectoryResult.NONE; while (keyPath != null) { @@ -126,11 +128,10 @@ public static OMPathInfo verifyFilesInPath( result = OMDirectoryResult.DIRECTORY_EXISTS; } else { result = OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; - inheritAcls = omMetadataManager.getKeyTable( + acls = omMetadataManager.getKeyTable( getBucketLayout(omMetadataManager, volumeName, bucketName)) .get(dbDirKeyName).getAcls(); - LOG.trace("Acls inherited from parent " + dbDirKeyName + " are : " - + inheritAcls); + LOG.trace("Acls from parent {} are : {}", dbDirKeyName, acls); } } else { if (!dbDirKeyName.equals(dirNameFromDetails)) { @@ -140,26 +141,25 @@ public static OMPathInfo verifyFilesInPath( if (result != OMDirectoryResult.NONE) { - LOG.trace("verifyFiles in Path : " + "/" + volumeName - + "/" + bucketName + "/" + keyName + ":" + result); - return new OMPathInfo(missing, result, inheritAcls); + LOG.trace("verifyFiles in Path : /{}/{}/{} : {}", + volumeName, bucketName, keyName, result); + return new OMPathInfo(missing, result, acls); } keyPath = keyPath.getParent(); } - if (inheritAcls.isEmpty()) { + if (acls.isEmpty()) { String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - inheritAcls = omMetadataManager.getBucketTable().get(bucketKey) + acls = omMetadataManager.getBucketTable().get(bucketKey) .getAcls(); - LOG.trace("Acls inherited from bucket " + bucketName + " are : " - + inheritAcls); + LOG.trace("Acls from bucket {} are : {}", bucketName, acls); } - LOG.trace("verifyFiles in Path : " + volumeName + "/" + bucketName + "/" - + keyName + ":" + result); + LOG.trace("verifyFiles in Path : /{}/{}/{} : {}", + volumeName, bucketName, keyName, result); // Found no files/ directories in the given path. - return new OMPathInfo(missing, OMDirectoryResult.NONE, inheritAcls); + return new OMPathInfo(missing, OMDirectoryResult.NONE, acls); } /** @@ -195,8 +195,9 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath( final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); - // by default, inherit bucket ACLs - List inheritAcls = omBucketInfo.getAcls(); + // Get parent all acls including ACCESS and DEFAULT acls + // The logic of specific inherited acl should be when creating dir/file + List acls = omBucketInfo.getAcls(); long lastKnownParentId = omBucketInfo.getObjectID(); String dbDirName = ""; // absolute path for trace logs @@ -230,7 +231,7 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath( if (elements.hasNext()) { result = OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; lastKnownParentId = omDirInfo.getObjectID(); - inheritAcls = omDirInfo.getAcls(); + acls = omDirInfo.getAcls(); continue; } else { // Checked all the sub-dirs till the leaf node. @@ -261,22 +262,21 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath( } } - LOG.trace("verifyFiles/Directories in Path : " + "/" + volumeName - + "/" + bucketName + "/" + keyName + ":" + result); + LOG.trace("verifyFiles/Directories in Path : /{}/{}/{} : {}", + volumeName, bucketName, keyName, result); if (result == OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH || result == OMDirectoryResult.FILE_EXISTS) { return new OMPathInfoWithFSO(leafNodeName, lastKnownParentId, missing, - result, inheritAcls, fullKeyPath.toString()); + result, acls, fullKeyPath.toString()); } String dbDirKeyName = omMetadataManager.getOzoneDirKey(volumeName, bucketName, dbDirName); - LOG.trace("Acls inherited from parent " + dbDirKeyName + " are : " - + inheritAcls); + LOG.trace("Acls from parent {} are : {}", dbDirKeyName, acls); return new OMPathInfoWithFSO(leafNodeName, lastKnownParentId, missing, - result, inheritAcls); + result, acls); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 30536559048..33bf839167e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Map; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OmUtils; @@ -74,7 +75,8 @@ */ public class OMKeyCommitRequest extends OMKeyRequest { - private static final Logger LOG = + @VisibleForTesting + public static final Logger LOG = LoggerFactory.getLogger(OMKeyCommitRequest.class); public OMKeyCommitRequest(OMRequest omRequest, BucketLayout bucketLayout) { @@ -134,7 +136,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, String keyName = commitKeyArgs.getKeyName(); OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumKeyCommits(); AuditLogger auditLogger = ozoneManager.getAuditLogger(); @@ -154,6 +155,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, boolean isHSync = commitKeyRequest.hasHsync() && commitKeyRequest.getHsync(); + + if (isHSync) { + omMetrics.incNumKeyHSyncs(); + } else { + omMetrics.incNumKeyCommits(); + } + + LOG.debug("isHSync = {}, volumeName = {}, bucketName = {}, keyName = {}", + isHSync, volumeName, bucketName, keyName); + try { commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap); volumeName = commitKeyArgs.getVolumeName(); @@ -253,7 +264,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.put(delKeyName, oldVerKeyInfo); + + // Remove any block from oldVerKeyInfo that share the same container ID + // and local ID with omKeyInfo blocks'. + // Otherwise, it causes data loss once those shared blocks are added + // to deletedTable and processed by KeyDeletingService for deletion. + filterOutBlocksStillInUse(omKeyInfo, oldVerKeyInfo); + + if (!oldVerKeyInfo.getOmKeyInfoList().isEmpty()) { + oldKeyVersionsToDeleteMap.put(delKeyName, oldVerKeyInfo); + } } else { checkBucketQuotaInNamespace(omBucketInfo, 1L); checkBucketQuotaInBytes(omMetadataManager, omBucketInfo, @@ -278,6 +298,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // Add to cache of open key table and key table. if (!isHSync) { + // If isHSync = false, put a tombstone in OpenKeyTable cache, + // indicating the key is removed from OpenKeyTable. + // So that this key can't be committed again. omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry( dbOpenKey, trxnLogIndex); } @@ -307,6 +330,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } + // Debug logging for any key commit operation, successful or not + LOG.debug("Key commit {} with isHSync = {}, omKeyInfo = {}", + result == Result.SUCCESS ? "succeeded" : "failed", isHSync, omKeyInfo); + if (!isHSync) { auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, exception, getOmRequest().getUserInfo())); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index fa7f92e9a8c..9b4094a3814 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -19,6 +19,8 @@ package org.apache.hadoop.ozone.om.request.key; import java.util.HashMap; + +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -60,7 +62,8 @@ */ public class OMKeyCommitRequestWithFSO extends OMKeyCommitRequest { - private static final Logger LOG = + @VisibleForTesting + public static final Logger LOG = LoggerFactory.getLogger(OMKeyCommitRequestWithFSO.class); public OMKeyCommitRequestWithFSO(OMRequest omRequest, @@ -98,12 +101,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Result result; boolean isHSync = commitKeyRequest.hasHsync() && commitKeyRequest.getHsync(); + if (isHSync) { omMetrics.incNumKeyHSyncs(); } else { omMetrics.incNumKeyCommits(); } + LOG.debug("isHSync = {}, volumeName = {}, bucketName = {}, keyName = {}", + isHSync, volumeName, bucketName, keyName); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); try { @@ -199,7 +206,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.put(delKeyName, oldVerKeyInfo); + + // Remove any block from oldVerKeyInfo that share the same container ID + // and local ID with omKeyInfo blocks'. + // Otherwise, it causes data loss once those shared blocks are added + // to deletedTable and processed by KeyDeletingService for deletion. + filterOutBlocksStillInUse(omKeyInfo, oldVerKeyInfo); + + if (!oldVerKeyInfo.getOmKeyInfoList().isEmpty()) { + oldKeyVersionsToDeleteMap.put(delKeyName, oldVerKeyInfo); + } } else { checkBucketQuotaInNamespace(omBucketInfo, 1L); checkBucketQuotaInBytes(omMetadataManager, omBucketInfo, @@ -226,6 +242,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // Add to cache of open key table and key table. if (!isHSync) { + // If isHSync = false, put a tombstone in OpenKeyTable cache, + // indicating the key is removed from OpenKeyTable. + // So that this key can't be committed again. OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, dbOpenFileKey, null, fileName, trxnLogIndex); } @@ -255,6 +274,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } + // Debug logging for any key commit operation, successful or not + LOG.debug("Key commit {} with isHSync = {}, omKeyInfo = {}", + result == Result.SUCCESS ? "succeeded" : "failed", isHSync, omKeyInfo); + if (!isHSync) { auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, exception, getOmRequest().getUserInfo())); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index c947891c7e7..b40db65696f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -28,7 +28,6 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -235,19 +234,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, getBucketInfo(omMetadataManager, volumeName, bucketName); // If FILE_EXISTS we just override like how we used to do for Key Create. - List< OzoneAcl > inheritAcls; if (LOG.isDebugEnabled()) { LOG.debug("BucketName: {}, BucketLayout: {}", bucketInfo.getBucketName(), bucketInfo.getBucketLayout()); } + + OMFileRequest.OMPathInfo pathInfo = null; + if (bucketInfo.getBucketLayout() .shouldNormalizePaths(ozoneManager.getEnableFileSystemPaths())) { - OMFileRequest.OMPathInfo pathInfo = - OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName, - bucketName, keyName, Paths.get(keyName)); + pathInfo = OMFileRequest.verifyFilesInPath(omMetadataManager, + volumeName, bucketName, keyName, Paths.get(keyName)); OMFileRequest.OMDirectoryResult omDirectoryResult = pathInfo.getDirectoryResult(); - inheritAcls = pathInfo.getAcls(); // Check if a file or directory exists with same key name. if (omDirectoryResult == DIRECTORY_EXISTS) { @@ -262,7 +261,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, missingParentInfos = OMDirectoryCreateRequest .getAllParentInfo(ozoneManager, keyArgs, - pathInfo.getMissingParents(), inheritAcls, trxnLogIndex); + pathInfo.getMissingParents(), bucketInfo, + pathInfo, trxnLogIndex); numMissingParents = missingParentInfos.size(); } @@ -275,7 +275,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs, dbKeyInfo, keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), - ozoneManager.getPrefixManager(), bucketInfo, trxnLogIndex, + ozoneManager.getPrefixManager(), bucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), ozoneManager.isRatisEnabled(), replicationConfig); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index 89f42bf85c6..de6ed6b5459 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -142,17 +142,18 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + " as there is already file in the given path", NOT_A_FILE); } + // do open key + OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName)); + // add all missing parents to dir table missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( - ozoneManager, keyArgs, pathInfoFSO, trxnLogIndex); + OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( + ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); // total number of keys created. numKeysCreated = missingParentInfos.size(); - // do open key - OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, bucketName)); final ReplicationConfig repConfig = OzoneConfigUtil .resolveReplicationConfigPreference(keyArgs.getType(), keyArgs.getFactor(), keyArgs.getEcReplicationConfig(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 6d079359eae..749b0f3fe62 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -26,12 +26,17 @@ import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -100,7 +105,8 @@ */ public abstract class OMKeyRequest extends OMClientRequest { - private static final Logger LOG = LoggerFactory.getLogger(OMKeyRequest.class); + @VisibleForTesting + public static final Logger LOG = LoggerFactory.getLogger(OMKeyRequest.class); private BucketLayout bucketLayout = BucketLayout.DEFAULT; @@ -270,9 +276,10 @@ public EncryptedKeyVersion run() throws IOException { } protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, - OmBucketInfo bucketInfo, PrefixManager prefixManager) { - List acls = new ArrayList<>(); + OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo, + PrefixManager prefixManager) { + List acls = new ArrayList<>(); if (keyArgs.getAclsList() != null) { acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); } @@ -296,8 +303,16 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, } } + // Inherit DEFAULT acls from parent-dir only if DEFAULT acls for + // prefix are not set + if (omPathInfo != null) { + if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls())) { + return acls; + } + } + // Inherit DEFAULT acls from bucket only if DEFAULT acls for - // prefix are not set. + // parent-dir are not set. if (bucketInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) { return acls; @@ -307,6 +322,39 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, return acls; } + /** + * Inherit parent DEFAULT acls and generate its own ACCESS acls. + * @param keyArgs + * @param bucketInfo + * @param omPathInfo + * @return Acls which inherited parent DEFAULT and keyArgs ACCESS acls. + */ + protected static List getAclsForDir(KeyArgs keyArgs, + OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo) { + // Acls inherited from parent or bucket will convert to DEFAULT scope + List acls = new ArrayList<>(); + + // Inherit DEFAULT acls from parent-dir + if (omPathInfo != null) { + if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls())) { + OzoneAclUtil.toDefaultScope(acls); + } + } + + // Inherit DEFAULT acls from bucket only if DEFAULT acls for + // parent-dir are not set. + if (acls.isEmpty() && bucketInfo != null) { + if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) { + OzoneAclUtil.toDefaultScope(acls); + } + } + + // add itself acls + acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); + + return acls; + } + /** * Check Acls for the ozone bucket. * @param ozoneManager @@ -623,12 +671,13 @@ protected OmKeyInfo prepareKeyInfo( @Nullable FileEncryptionInfo encInfo, @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo, + OMFileRequest.OMPathInfo omPathInfo, long transactionLogIndex, long objectID, boolean isRatisEnabled, ReplicationConfig replicationConfig) throws IOException { return prepareFileInfo(omMetadataManager, keyArgs, dbKeyInfo, size, - locations, encInfo, prefixManager, omBucketInfo, null, + locations, encInfo, prefixManager, omBucketInfo, omPathInfo, transactionLogIndex, objectID, isRatisEnabled, replicationConfig); } @@ -645,7 +694,7 @@ protected OmKeyInfo prepareFileInfo( @Nullable FileEncryptionInfo encInfo, @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo, - OMFileRequest.OMPathInfoWithFSO omPathInfo, + OMFileRequest.OMPathInfo omPathInfo, long transactionLogIndex, long objectID, boolean isRatisEnabled, ReplicationConfig replicationConfig) throws IOException { @@ -694,30 +743,32 @@ protected OmKeyInfo createFileInfo( @Nullable FileEncryptionInfo encInfo, @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo, - OMFileRequest.OMPathInfoWithFSO omPathInfo, - long transactionLogIndex, long objectID - ) { + OMFileRequest.OMPathInfo omPathInfo, + long transactionLogIndex, long objectID) { OmKeyInfo.Builder builder = new OmKeyInfo.Builder(); builder.setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, locations))) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setDataSize(size) - .setReplicationConfig(replicationConfig) - .setFileEncryptionInfo(encInfo) - .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager)) - .addAllMetadata(KeyValueUtil.getFromProtobuf( - keyArgs.getMetadataList())) - .setUpdateID(transactionLogIndex) - .setFile(true); - if (omPathInfo != null) { + .setBucketName(keyArgs.getBucketName()) + .setKeyName(keyArgs.getKeyName()) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(0, locations))) + .setCreationTime(keyArgs.getModificationTime()) + .setModificationTime(keyArgs.getModificationTime()) + .setDataSize(size) + .setReplicationConfig(replicationConfig) + .setFileEncryptionInfo(encInfo) + .setAcls(getAclsForKey( + keyArgs, omBucketInfo, omPathInfo, prefixManager)) + .addAllMetadata(KeyValueUtil.getFromProtobuf( + keyArgs.getMetadataList())) + .setUpdateID(transactionLogIndex) + .setFile(true); + if (omPathInfo instanceof OMFileRequest.OMPathInfoWithFSO) { // FileTable metadata format - objectID = omPathInfo.getLeafNodeObjectId(); - builder.setParentObjectID(omPathInfo.getLastKnownParentId()); - builder.setFileName(omPathInfo.getLeafNodeName()); + OMFileRequest.OMPathInfoWithFSO omPathInfoFSO + = (OMFileRequest.OMPathInfoWithFSO) omPathInfo; + objectID = omPathInfoFSO.getLeafNodeObjectId(); + builder.setParentObjectID(omPathInfoFSO.getLastKnownParentId()); + builder.setFileName(omPathInfoFSO.getLeafNodeName()); } builder.setObjectID(objectID); return builder.build(); @@ -736,7 +787,7 @@ private OmKeyInfo prepareMultipartFileInfo( @Nonnull List locations, FileEncryptionInfo encInfo, @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo, - OMFileRequest.OMPathInfoWithFSO omPathInfo, + OMFileRequest.OMPathInfo omPathInfo, @Nonnull long transactionLogIndex, long objectID) throws IOException { @@ -749,15 +800,17 @@ private OmKeyInfo prepareMultipartFileInfo( String uploadID = args.getMultipartUploadID(); Preconditions.checkNotNull(uploadID); String multipartKey = ""; - if (omPathInfo != null) { + if (omPathInfo instanceof OMFileRequest.OMPathInfoWithFSO) { + OMFileRequest.OMPathInfoWithFSO omPathInfoFSO + = (OMFileRequest.OMPathInfoWithFSO) omPathInfo; final long volumeId = omMetadataManager.getVolumeId( args.getVolumeName()); final long bucketId = omMetadataManager.getBucketId( args.getVolumeName(), args.getBucketName()); // FileTable metadata format multipartKey = omMetadataManager.getMultipartKey(volumeId, bucketId, - omPathInfo.getLastKnownParentId(), - omPathInfo.getLeafNodeName(), uploadID); + omPathInfoFSO.getLastKnownParentId(), + omPathInfoFSO.getLeafNodeName(), uploadID); } else { multipartKey = omMetadataManager .getMultipartKey(args.getVolumeName(), args.getBucketName(), @@ -844,4 +897,109 @@ protected OmKeyInfo wrapUncommittedBlocksAsPseudoKey( pseudoKeyInfo.setKeyLocationVersions(uncommittedGroups); return pseudoKeyInfo; } + + /** + * Remove blocks in-place from keysToBeFiltered that exist in referenceKey. + *

+ * keysToBeFiltered.getOmKeyInfoList() becomes an empty list when all blocks + * are filtered out. + * + * @param referenceKey OmKeyInfo + * @param keysToBeFiltered RepeatedOmKeyInfo + */ + protected void filterOutBlocksStillInUse(OmKeyInfo referenceKey, + RepeatedOmKeyInfo keysToBeFiltered) { + + LOG.debug("Before block filtering, keysToBeFiltered = {}", + keysToBeFiltered); + + // A HashSet for fast lookup. Gathers all ContainerBlockID entries inside + // the referenceKey. + HashSet cbIdSet = referenceKey.getKeyLocationVersions() + .stream() + .flatMap(e -> e.getLocationList().stream()) + .map(omKeyLocationInfo -> + omKeyLocationInfo.getBlockID().getContainerBlockID()) + .collect(Collectors.toCollection(HashSet::new)); + + // Pardon the nested loops. ContainerBlockID is 9-layer deep from: + // keysToBeFiltered // Layer 0. RepeatedOmKeyInfo + // .getOmKeyInfoList() // 1. List + // .get(0) // 2. OmKeyInfo + // .getKeyLocationVersions() // 3. List + // .get(0) // 4. OmKeyLocationInfoGroup + // .getLocationVersionMap() // 5. Map> + // .get(version) // 6. List + // .get(0) // 7. OmKeyLocationInfo + // .getBlockID() // 8. BlockID + // .getContainerBlockID(); // 9. ContainerBlockID + + // Using iterator instead of `for` or `forEach` for in-place entry removal + + // Layer 1: List + Iterator iterOmKeyInfo = keysToBeFiltered + .getOmKeyInfoList().iterator(); + + while (iterOmKeyInfo.hasNext()) { + // Note with HDDS-8462, each RepeatedOmKeyInfo should have only one entry, + // so this outer most loop should never be entered twice in each call. + + // But for completeness sake I shall put it here. + // Remove only when RepeatedOmKeyInfo is no longer used. + + // Layer 2: OmKeyInfo + OmKeyInfo oldOmKeyInfo = iterOmKeyInfo.next(); + // Layer 3: List + Iterator iterKeyLocInfoGroup = oldOmKeyInfo + .getKeyLocationVersions().iterator(); + while (iterKeyLocInfoGroup.hasNext()) { + // Layer 4: OmKeyLocationInfoGroup + OmKeyLocationInfoGroup keyLocInfoGroup = iterKeyLocInfoGroup.next(); + // Layer 5: Map> + Iterator>> iterVerMap = + keyLocInfoGroup.getLocationVersionMap().entrySet().iterator(); + + while (iterVerMap.hasNext()) { + Map.Entry> mapEntry = iterVerMap.next(); + // Layer 6: List + List omKeyLocationInfoList = mapEntry.getValue(); + + Iterator iterKeyLocInfo = + omKeyLocationInfoList.iterator(); + while (iterKeyLocInfo.hasNext()) { + // Layer 7: OmKeyLocationInfo + OmKeyLocationInfo keyLocationInfo = iterKeyLocInfo.next(); + // Layer 8: BlockID. Then Layer 9: ContainerBlockID + ContainerBlockID cbId = keyLocationInfo + .getBlockID().getContainerBlockID(); + + if (cbIdSet.contains(cbId)) { + // Remove this block from oldVerKeyInfo because it is referenced. + iterKeyLocInfo.remove(); + LOG.debug("Filtered out block: {}", cbId); + } + } + + // Cleanup when Layer 6 is an empty list + if (omKeyLocationInfoList.isEmpty()) { + iterVerMap.remove(); + } + } + + // Cleanup when Layer 5 is an empty map + if (keyLocInfoGroup.getLocationVersionMap().isEmpty()) { + iterKeyLocInfoGroup.remove(); + } + } + + // Cleanup when Layer 3 is an empty list + if (oldOmKeyInfo.getKeyLocationVersions().isEmpty()) { + iterOmKeyInfo.remove(); + } + } + + // Intentional extra space for alignment + LOG.debug("After block filtering, keysToBeFiltered = {}", + keysToBeFiltered); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index c753fb4c620..90d35e86c3f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; @@ -57,6 +58,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; import java.util.Map; @@ -177,6 +179,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // initiate MPU. final OmBucketInfo bucketInfo = omMetadataManager.getBucketTable() .get(omMetadataManager.getBucketKey(volumeName, bucketName)); + + OMFileRequest.OMPathInfo pathInfo = null; + if (bucketInfo != null && bucketInfo.getBucketLayout() + .shouldNormalizePaths(ozoneManager.getEnableFileSystemPaths())) { + pathInfo = OMFileRequest.verifyFilesInPath(omMetadataManager, + volumeName, bucketName, keyName, Paths.get(keyName)); + } final ReplicationConfig replicationConfig = OzoneConfigUtil .resolveReplicationConfigPreference(keyArgs.getType(), keyArgs.getFactor(), keyArgs.getEcReplicationConfig(), @@ -202,7 +211,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setReplicationConfig(replicationConfig) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setAcls(getAclsForKey(keyArgs, bucketInfo, + .setAcls(getAclsForKey(keyArgs, bucketInfo, pathInfo, ozoneManager.getPrefixManager())) .setObjectID(objectID) .setUpdateID(transactionLogIndex) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index bcfaa8e6a08..2a0b891e160 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; @@ -118,10 +117,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // check if the directory already existed in OM checkDirectoryResult(keyName, pathInfoFSO.getDirectoryResult()); + final OmBucketInfo bucketInfo = getBucketInfo(omMetadataManager, + volumeName, bucketName); + // add all missing parents to dir table missingParentInfos = OMDirectoryCreateRequestWithFSO - .getAllMissingParentDirInfo(ozoneManager, keyArgs, pathInfoFSO, - transactionLogIndex); + .getAllMissingParentDirInfo(ozoneManager, keyArgs, bucketInfo, + pathInfoFSO, transactionLogIndex); // We are adding uploadId to key, because if multiple users try to // perform multipart upload on the same key, each will try to upload, who @@ -156,8 +158,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // care of in the final complete multipart upload. AWS S3 behavior is // also like this, even when key exists in a bucket, user can still // initiate MPU. - final OmBucketInfo bucketInfo = getBucketInfo(omMetadataManager, - volumeName, bucketName); final ReplicationConfig replicationConfig = OzoneConfigUtil .resolveReplicationConfigPreference(keyArgs.getType(), keyArgs.getFactor(), keyArgs.getEcReplicationConfig(), @@ -183,7 +183,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setReplicationConfig(replicationConfig) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())) + .setAcls(getAclsForKey(keyArgs, bucketInfo, pathInfoFSO, + ozoneManager.getPrefixManager())) .setObjectID(pathInfoFSO.getLeafNodeObjectId()) .setUpdateID(transactionLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index 17adbd13d07..c8e9b679cf4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -331,4 +331,13 @@ public void stop() { public static Logger getLog() { return LOG; } + + /** + * Wait until both buffers are flushed. This is used in cases like + * "follower bootstrap tarball creation" where the rocksDb for the active + * fs needs to synchronized with the rocksdb's for the snapshots. + */ + public void awaitDoubleBufferFlush() throws InterruptedException { + ozoneManagerDoubleBuffer.awaitFlush(); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index f3debcc484c..4faf60decb8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -18,13 +18,20 @@ package org.apache.hadoop.ozone.om.request.file; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; import java.util.UUID; +import java.util.stream.Collectors; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.OMClientRequest; @@ -56,6 +63,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; @@ -420,6 +428,79 @@ public void testCreateDirectoryOMMetric() Assert.assertEquals(4L, omMetrics.getNumKeys()); } + @Test + public void testCreateDirectoryInheritParentDefaultAcls() throws Exception { + String volumeName = "vol1"; + String bucketName = "bucket1"; + String keyName = genRandomKeyName(); + + List acls = new ArrayList<>(); + acls.add(OzoneAcl.parseAcl("user:newUser:rw[DEFAULT]")); + acls.add(OzoneAcl.parseAcl("user:noInherit:rw")); + acls.add(OzoneAcl.parseAcl("group:newGroup:rwl[DEFAULT]")); + + // create bucket with DEFAULT acls + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, omMetadataManager, + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketLayout(getBucketLayout()) + .setAcls(acls)); + + // Verify bucket has DEFAULT acls. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + List bucketAcls = omMetadataManager.getBucketTable() + .get(bucketKey).getAcls(); + Assert.assertEquals(acls, bucketAcls); + + // Create sub dirs + OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, + keyName); + OMDirectoryCreateRequest omDirectoryCreateRequest = + new OMDirectoryCreateRequest(omRequest, getBucketLayout()); + + OMRequest modifiedOmRequest = + omDirectoryCreateRequest.preExecute(ozoneManager); + + omDirectoryCreateRequest = + new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); + + OMClientResponse omClientResponse = + omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L, + ozoneManagerDoubleBufferHelper); + + Assert.assertSame(omClientResponse.getOMResponse().getStatus(), + OzoneManagerProtocolProtos.Status.OK); + + // Verify sub dirs inherit parent DEFAULT acls. + verifyDirectoriesInheritAcls(volumeName, bucketName, keyName, bucketAcls); + + } + + private void verifyDirectoriesInheritAcls(String volumeName, + String bucketName, String keyName, List bucketAcls) + throws IOException { + List nodes = Arrays.asList(keyName.split(OZONE_URI_DELIMITER)); + + List expectedInheritAcls = bucketAcls.stream() + .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.DEFAULT) + .collect(Collectors.toList()); + String prefix = ""; + + for (int indx = 0; indx < nodes.size(); indx++) { + String dirName = prefix + nodes.get(indx); + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()) + .get(omMetadataManager + .getOzoneDirKey(volumeName, bucketName, dirName)); + + List omKeyAcls = omKeyInfo.getAcls(); + + Assert.assertEquals("Failed to inherit parent acls!,", + expectedInheritAcls, omKeyAcls); + + prefix = dirName + OZONE_URI_DELIMITER; + expectedInheritAcls = omKeyAcls; + } + } /** * Create OMRequest which encapsulates CreateDirectory request. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index 0a8b4b44b59..9b79344741e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.AuditMessage; @@ -59,6 +60,7 @@ import java.util.ArrayList; import java.util.List; import java.util.UUID; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; import static org.mockito.ArgumentMatchers.any; @@ -656,6 +658,88 @@ public void testCreateDirectoryOMMetric() throws Exception { Assert.assertEquals(dirs.size(), omMetrics.getNumKeys()); } + @Test + public void testCreateDirectoryInheritParentDefaultAcls() throws Exception { + String volumeName = "vol1"; + String bucketName = "bucket1"; + List dirs = new ArrayList<>(); + String keyName = createDirKey(dirs, 3); + + List acls = new ArrayList<>(); + acls.add(OzoneAcl.parseAcl("user:newUser:rw[DEFAULT]")); + acls.add(OzoneAcl.parseAcl("user:noInherit:rw")); + acls.add(OzoneAcl.parseAcl("group:newGroup:rwl[DEFAULT]")); + + // Create bucket with DEFAULT acls + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, omMetadataManager, + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketLayout(getBucketLayout()) + .setAcls(acls)); + + // Verify bucket has DEFAULT acls. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + List bucketAcls = omMetadataManager.getBucketTable() + .get(bucketKey).getAcls(); + Assert.assertEquals(acls, bucketAcls); + + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, + bucketName); + + // Create dir with acls inherited from parent DEFAULT acls + OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, + keyName); + OMDirectoryCreateRequestWithFSO omDirCreateReqFSO = + new OMDirectoryCreateRequestWithFSO(omRequest, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + OMRequest modifiedOmReq = omDirCreateReqFSO.preExecute(ozoneManager); + + omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + OMClientResponse omClientResponse = + omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L, + ozoneManagerDoubleBufferHelper); + Assert.assertSame(omClientResponse.getOMResponse().getStatus(), + OzoneManagerProtocolProtos.Status.OK); + + // Verify sub dirs inherit parent DEFAULT acls. + verifyDirectoriesInheritAcls(dirs, volumeId, bucketId, bucketAcls); + + } + + private void verifyDirectoriesInheritAcls(List dirs, + long volumeId, long bucketId, List bucketAcls) + throws IOException { + // bucketID is the parent + long parentID = bucketId; + List expectedInheritAcls = bucketAcls.stream() + .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.DEFAULT) + .collect(Collectors.toList()); + System.out.println("expectedInheritAcls: " + expectedInheritAcls); + + // dir should inherit parent DEFAULT acls and self has DEFAULT scope + // [user:newUser:rw[DEFAULT], group:newGroup:rwl[DEFAULT]] + for (int indx = 0; indx < dirs.size(); indx++) { + String dirName = dirs.get(indx); + String dbKey = ""; + // for index=0, parentID is bucketID + dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + parentID, dirName); + OmDirectoryInfo omDirInfo = + omMetadataManager.getDirectoryTable().get(dbKey); + List omDirAcls = omDirInfo.getAcls(); + System.out.println( + " subdir acls : " + omDirInfo + " ==> " + omDirAcls); + + Assert.assertEquals("Failed to inherit parent DEFAULT acls!", + expectedInheritAcls, omDirAcls); + + parentID = omDirInfo.getObjectID(); + expectedInheritAcls = omDirAcls; + } + } @NotNull private String createDirKey(List dirs, int depth) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index 367e5d5cce1..f79b9f9b478 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -18,11 +18,16 @@ package org.apache.hadoop.ozone.om.request.file; +import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.UUID; +import java.util.stream.Collectors; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.jetbrains.annotations.NotNull; import org.junit.Assert; @@ -337,6 +342,148 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() testNonRecursivePath(key, false, false, true); } + @Test + public void testCreateFileInheritParentDefaultAcls() + throws Exception { + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); + String prefix = "a/b/c/"; + List dirs = new ArrayList<>(); + dirs.add("a"); + dirs.add("b"); + dirs.add("c"); + String keyName = prefix + UUID.randomUUID(); + List bucketAclResults = new ArrayList<>(); + + OmKeyInfo omKeyInfo = createFileWithInheritAcls(keyName, bucketAclResults); + + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); + + verifyInheritAcls(dirs, omKeyInfo, volumeId, bucketId, bucketAclResults); + } + + protected OmKeyInfo createFileWithInheritAcls(String keyName, + List bucketAclResults) throws Exception { + List acls = new ArrayList<>(); + acls.add(OzoneAcl.parseAcl("user:newUser:rw[DEFAULT]")); + acls.add(OzoneAcl.parseAcl("user:noInherit:rw")); + acls.add(OzoneAcl.parseAcl("group:newGroup:rwl[DEFAULT]")); + + // Create bucket with DEFAULT acls + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, omMetadataManager, + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketLayout(getBucketLayout()) + .setAcls(acls)); + + // Verify bucket has DEFAULT acls. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + bucketAclResults.addAll(omMetadataManager.getBucketTable() + .get(bucketKey).getAcls()); + Assert.assertEquals(acls, bucketAclResults); + + // Recursive create file with acls inherited from bucket DEFAULT acls + OMRequest omRequest = createFileRequest(volumeName, bucketName, + keyName, HddsProtos.ReplicationFactor.ONE, + HddsProtos.ReplicationType.RATIS, false, true); + + OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); + OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); + + omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest); + OMClientResponse omFileCreateResponse = + omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L, + ozoneManagerDoubleBufferHelper); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omFileCreateResponse.getOMResponse().getStatus()); + + long id = modifiedOmRequest.getCreateFileRequest().getClientID(); + return verifyPathInOpenKeyTable(keyName, id, true); + } + + /** + * The following layout should inherit the parent DEFAULT acls: + * (1) FSO + * (2) Legacy when EnableFileSystemPaths + * + * The following layout should inherit the bucket DEFAULT acls: + * (1) OBS + * (2) Legacy when DisableFileSystemPaths + * + * Note: Acl which dir inherited itself has DEFAULT scope, + * and acl which leaf file inherited itself has ACCESS scope. + */ + protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, + long volumeId, long bucketId, List bucketAcls) + throws IOException { + + if (getBucketLayout().shouldNormalizePaths( + ozoneManager.getEnableFileSystemPaths())) { + + // bucketID is the parent + long parentID = bucketId; + List expectedInheritAcls = bucketAcls.stream() + .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.DEFAULT) + .collect(Collectors.toList()); + System.out.println("expectedInheritAcls: " + expectedInheritAcls); + + // dir should inherit parent DEFAULT acls and itself has DEFAULT scope + // [user:newUser:rw[DEFAULT], group:newGroup:rwl[DEFAULT]] + for (int indx = 0; indx < dirs.size(); indx++) { + String dirName = dirs.get(indx); + String dbKey = ""; + // for index=0, parentID is bucketID + dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + parentID, dirName); + OmDirectoryInfo omDirInfo = + omMetadataManager.getDirectoryTable().get(dbKey); + List omDirAcls = omDirInfo.getAcls(); + + System.out.println( + " subdir acls : " + omDirInfo + " ==> " + omDirAcls); + Assert.assertEquals("Failed to inherit parent DEFAULT acls!", + expectedInheritAcls, omDirAcls); + + parentID = omDirInfo.getObjectID(); + expectedInheritAcls = omDirAcls; + + // file should inherit parent DEFAULT acls and itself has ACCESS scope + // [user:newUser:rw[ACCESS], group:newGroup:rwl[ACCESS]] + if (indx == dirs.size() - 1) { + // verify file acls + Assert.assertEquals(omDirInfo.getObjectID(), + omKeyInfo.getParentObjectID()); + List fileAcls = omDirInfo.getAcls(); + System.out.println(" file acls : " + omKeyInfo + " ==> " + fileAcls); + Assert.assertEquals("Failed to inherit parent DEFAULT acls!", + expectedInheritAcls.stream() + .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .collect(Collectors.toList()), fileAcls); + } + } + } else { + List keyAcls = omKeyInfo.getAcls(); + + List parentDefaultAcl = bucketAcls.stream() + .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.DEFAULT) + .collect(Collectors.toList()); + + OzoneAcl parentAccessAcl = bucketAcls.stream() + .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.ACCESS) + .findAny().orElse(null); + + // Should inherit parent DEFAULT acls + // [user:newUser:rw[ACCESS], group:newGroup:rwl[ACCESS]] + Assert.assertEquals("Failed to inherit bucket DEFAULT acls!", + parentDefaultAcl.stream() + .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .collect(Collectors.toList()), keyAcls); + // Should not inherit parent ACCESS acls + Assert.assertFalse(keyAcls.contains(parentAccessAcl)); + } + } + @Test public void testPreExecuteWithInvalidKeyPrefix() throws Exception { String[] invalidKeyNames = { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index 2bd4db25816..4ce9f48d60c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -157,6 +157,12 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() testNonRecursivePath(key, false, false, true); } + @Test + public void testCreateFileInheritParentDefaultAcls() + throws Exception { + super.testCreateFileInheritParentDefaultAcls(); + } + @Test public void testValidateAndUpdateCacheWithSnapshotReservedWord() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index 92a484c9198..4c610141b65 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -313,8 +313,13 @@ public void testRejectHsyncIfNotEnabled() throws Exception { omMetadataManager, bucketLayout); List allocatedKeyLocationList = getKeyLocation(10); + // hsync should throw OMException assertThrows(OMException.class, () -> - performHsyncCommit(allocatedKeyLocationList.subList(0, 5))); + doKeyCommit(true, allocatedKeyLocationList.subList(0, 5))); + + // Regular key commit should still work + doKeyCommit(false, allocatedKeyLocationList.subList(0, 5)); + conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); } @@ -330,26 +335,38 @@ public void testCommitWithHsyncIncrementalUsages() throws Exception { .get(bucketKey); long usedBytes = bucketInfo.getUsedBytes(); - performHsyncCommit(allocatedKeyLocationList.subList(0, 5)); - bucketInfo = omMetadataManager.getBucketTable() - .get(bucketKey); + // 1st commit of 3 blocks, HSync = true + Map keyToDeleteMap = + doKeyCommit(true, allocatedKeyLocationList.subList(0, 3)); + Assert.assertNull(keyToDeleteMap); + bucketInfo = omMetadataManager.getBucketTable().get(bucketKey); long firstCommitUsedBytes = bucketInfo.getUsedBytes(); - Assert.assertEquals(500, firstCommitUsedBytes - usedBytes); - - performHsyncCommit(allocatedKeyLocationList); - bucketInfo = omMetadataManager.getBucketTable() - .get(bucketKey); - long nextCommitUsedBytes = bucketInfo.getUsedBytes(); - - Assert.assertEquals(1000, nextCommitUsedBytes - usedBytes); + Assert.assertEquals(300, firstCommitUsedBytes - usedBytes); + + // 2nd commit of 6 blocks, HSync = true + keyToDeleteMap = doKeyCommit(true, allocatedKeyLocationList.subList(0, 6)); + Assert.assertNull(keyToDeleteMap); + bucketInfo = omMetadataManager.getBucketTable().get(bucketKey); + long secondCommitUsedBytes = bucketInfo.getUsedBytes(); + Assert.assertEquals(600, secondCommitUsedBytes - usedBytes); + + // 3rd and final commit of all 10 blocks, HSync = false + keyToDeleteMap = doKeyCommit(false, allocatedKeyLocationList); + // keyToDeleteMap should be empty because none of the previous blocks + // should be deleted. + Assert.assertNotNull(keyToDeleteMap); + Assert.assertTrue(keyToDeleteMap.isEmpty()); + bucketInfo = omMetadataManager.getBucketTable().get(bucketKey); + long thirdCommitUsedBytes = bucketInfo.getUsedBytes(); + Assert.assertEquals(1000, thirdCommitUsedBytes - usedBytes); } - - private List performHsyncCommit( + + private Map doKeyCommit(boolean isHSync, List keyLocations) throws Exception { // allocated block list dataSize = keyLocations.size() * 100; OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest( - keyLocations, true)); + keyLocations, isHSync)); OMKeyCommitRequest omKeyCommitRequest = getOmKeyCommitRequest(modifiedOmRequest); @@ -365,16 +382,22 @@ private List performHsyncCommit( Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); - // key must be prsent in both open key table and key table for hsync + // Key should be present in both OpenKeyTable and KeyTable with HSync commit OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable( omKeyCommitRequest.getBucketLayout()).get(openKey); - Assert.assertNotNull(omKeyInfo); + if (isHSync) { + Assert.assertNotNull(omKeyInfo); + } else { + // Key should not exist in OpenKeyTable anymore with non-HSync commit + Assert.assertNull(omKeyInfo); + } omKeyInfo = omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()) .get(ozoneKey); Assert.assertNotNull(omKeyInfo); - return keyLocations; + + return ((OMKeyCommitResponse) omClientResponse).getKeysToDelete(); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 23337f534b7..29cb6c86c60 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -21,14 +21,17 @@ import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.UUID; +import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -658,6 +661,78 @@ public void testPreExecuteWithInvalidKeyPrefix() throws Exception { + OM_SNAPSHOT_INDICATOR + OM_KEY_PREFIX)); } + @Test + public void testKeyCreateInheritParentDefaultAcls() + throws Exception { + when(ozoneManager.getOzoneLockProvider()).thenReturn( + new OzoneLockProvider(keyPathLockEnabled, enableFileSystemPaths)); + + List acls = new ArrayList<>(); + acls.add(OzoneAcl.parseAcl("user:newUser:rw[DEFAULT]")); + acls.add(OzoneAcl.parseAcl("user:noInherit:rw")); + acls.add(OzoneAcl.parseAcl("group:newGroup:rwl[DEFAULT]")); + + // create bucket with DEFAULT acls + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, omMetadataManager, + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketLayout(getBucketLayout()) + .setAcls(acls)); + + // Verify bucket has DEFAULT acls. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + List bucketAcls = omMetadataManager.getBucketTable() + .get(bucketKey).getAcls(); + Assert.assertEquals(acls, bucketAcls); + + // create file inherit bucket DEFAULT acls + OMRequest modifiedOmRequest = + doPreExecute(createKeyRequest(false, 0)); + + OMKeyCreateRequest omKeyCreateRequest = + getOMKeyCreateRequest(modifiedOmRequest); + + long id = modifiedOmRequest.getCreateKeyRequest().getClientID(); + String openKey = getOpenKey(id); + + OMClientResponse omKeyCreateResponse = + omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L, + ozoneManagerDoubleBufferHelper); + checkResponse(modifiedOmRequest, omKeyCreateResponse, id, false, + omKeyCreateRequest.getBucketLayout()); + + OmKeyInfo omKeyInfo = + omMetadataManager.getOpenKeyTable(getBucketLayout()).get(openKey); + + verifyKeyInheritAcls(omKeyInfo.getAcls(), bucketAcls); + + } + + /** + * Leaf file has ACCESS scope acls which inherited + * from parent DEFAULT acls. + */ + private void verifyKeyInheritAcls(List keyAcls, + List bucketAcls) { + + List parentDefaultAcl = bucketAcls.stream() + .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.DEFAULT) + .collect(Collectors.toList()); + + OzoneAcl parentAccessAcl = bucketAcls.stream() + .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.ACCESS) + .findAny().orElse(null); + + // Should inherit parent DEFAULT Acls + Assert.assertEquals("Failed to inherit parent DEFAULT acls!,", + parentDefaultAcl.stream() + .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .collect(Collectors.toList()), keyAcls); + + // Should not inherit parent ACCESS Acls + Assert.assertFalse(keyAcls.contains(parentAccessAcl)); + } + protected void addToKeyTable(String keyName) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName.substring(1), 0L, RATIS, THREE, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index fc766bcbc2c..8f92c1ab223 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -85,6 +85,12 @@ public void testValidateAndUpdateCacheWithKeyContainsSnapshotReservedWord() } } + @Test + public void testKeyCreateInheritParentDefaultAcls() + throws Exception { + super.testKeyCreateInheritParentDefaultAcls(); + } + @Override protected OzoneConfiguration getOzoneConfiguration() { OzoneConfiguration config = super.getOzoneConfiguration(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 37ad06f5d7c..0954382b78b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.ozone.test.GenericTestUtils; import org.jetbrains.annotations.NotNull; import org.junit.After; import org.junit.Assert; @@ -73,6 +74,7 @@ import org.apache.hadoop.ozone.om.ScmClient; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.util.Time; +import org.slf4j.event.Level; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.setupReplicationConfigValidation; import static org.mockito.ArgumentMatchers.any; @@ -231,6 +233,11 @@ public void setup() throws Exception { OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); when(ozoneManager.getOmSnapshotManager()) .thenReturn(omSnapshotManager); + + // Enable DEBUG level logging for relevant classes + GenericTestUtils.setLogLevel(OMKeyRequest.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(OMKeyCommitRequest.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(OMKeyCommitRequestWithFSO.LOG, Level.DEBUG); } @NotNull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index d473bf9621d..499da5370c8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -19,8 +19,14 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import java.util.ArrayList; +import java.util.List; import java.util.UUID; +import java.util.stream.Collectors; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; @@ -161,4 +167,81 @@ protected String getMultipartKey(String volumeName, String bucketName, return omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, multipartUploadID); } + + @Test + public void testMultipartUploadInheritParentDefaultAcls() + throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + List acls = new ArrayList<>(); + acls.add(OzoneAcl.parseAcl("user:newUser:rw[DEFAULT]")); + acls.add(OzoneAcl.parseAcl("user:noInherit:rw")); + acls.add(OzoneAcl.parseAcl("group:newGroup:rwl[DEFAULT]")); + + // create bucket with DEFAULT acls + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, omMetadataManager, + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketLayout(getBucketLayout()) + .setAcls(acls)); + + // Verify bucket has DEFAULT acls. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + List bucketAcls = omMetadataManager.getBucketTable() + .get(bucketKey).getAcls(); + Assert.assertEquals(acls, bucketAcls); + + // create file with acls inherited from parent DEFAULT acls + OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, + bucketName, keyName); + + S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = + getS3InitiateMultipartUploadReq(modifiedRequest); + + OMClientResponse omClientResponse = + s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager, + 100L, ozoneManagerDoubleBufferHelper); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + + String multipartKey = getMultipartKey(volumeName, bucketName, keyName, + modifiedRequest.getInitiateMultiPartUploadRequest() + .getKeyArgs().getMultipartUploadID()); + + OmKeyInfo omKeyInfo = omMetadataManager + .getOpenKeyTable(s3InitiateMultipartUploadRequest.getBucketLayout()) + .get(multipartKey); + + verifyKeyInheritAcls(omKeyInfo.getAcls(), bucketAcls); + + } + + /** + * Leaf key has ACCESS scope acls which inherited + * from parent DEFAULT acls. + */ + private void verifyKeyInheritAcls(List keyAcls, + List bucketAcls) { + + List parentDefaultAcl = bucketAcls.stream() + .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.DEFAULT) + .collect(Collectors.toList()); + + OzoneAcl parentAccessAcl = bucketAcls.stream() + .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.ACCESS) + .findAny().orElse(null); + + // Should inherit parent DEFAULT Acls + // [user:newUser:rw[DEFAULT], group:newGroup:rwl[DEFAULT]] + Assert.assertEquals("Failed to inherit parent DEFAULT acls!", + parentDefaultAcl.stream() + .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .collect(Collectors.toList()), keyAcls); + + // Should not inherit parent ACCESS Acls + Assert.assertFalse(keyAcls.contains(parentAccessAcl)); + } + } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java index 6fb26486ad3..cd96054bf12 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java @@ -19,6 +19,8 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; @@ -34,6 +36,7 @@ import java.util.ArrayList; import java.util.List; import java.util.UUID; +import java.util.stream.Collectors; /** * Tests S3 Initiate Multipart Upload request. @@ -146,6 +149,114 @@ protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( BucketLayout.FILE_SYSTEM_OPTIMIZED); } + @Test + public void testMultipartUploadInheritParentDefaultAcls() + throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String prefix = "a/b/c/"; + List dirs = new ArrayList<>(); + dirs.add("a"); + dirs.add("b"); + dirs.add("c"); + String fileName = UUID.randomUUID().toString(); + String keyName = prefix + fileName; + + List acls = new ArrayList<>(); + acls.add(OzoneAcl.parseAcl("user:newUser:rw[DEFAULT]")); + acls.add(OzoneAcl.parseAcl("user:noInherit:rw")); + acls.add(OzoneAcl.parseAcl("group:newGroup:rwl[DEFAULT]")); + + // create bucket with DEFAULT acls + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, omMetadataManager, + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketLayout(getBucketLayout()) + .setAcls(acls)); + + // Verify bucket has DEFAULT acls. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + List bucketAcls = omMetadataManager.getBucketTable() + .get(bucketKey).getAcls(); + Assert.assertEquals(acls, bucketAcls); + + // create dir with acls inherited from parent DEFAULT acls + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, + bucketName); + OMRequest modifiedRequest = doPreExecuteInitiateMPUWithFSO(volumeName, + bucketName, keyName); + + S3InitiateMultipartUploadRequest s3InitiateMultipartUploadReqFSO = + getS3InitiateMultipartUploadReq(modifiedRequest); + + OMClientResponse omClientResponse = + s3InitiateMultipartUploadReqFSO.validateAndUpdateCache( + ozoneManager, 100L, + ozoneManagerDoubleBufferHelper); + + // create file with acls inherited from parent DEFAULT acls + long parentID = verifyDirectoriesInDB(dirs, volumeId, bucketId); + String multipartOpenFileKey = omMetadataManager.getMultipartKey(volumeId, + bucketId, parentID, fileName, + modifiedRequest.getInitiateMultiPartUploadRequest() + .getKeyArgs().getMultipartUploadID()); + OmKeyInfo omKeyInfo = omMetadataManager + .getOpenKeyTable(s3InitiateMultipartUploadReqFSO.getBucketLayout()) + .get(multipartOpenFileKey); + + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + + verifyKeyInheritAcls(dirs, omKeyInfo, volumeId, bucketId, bucketAcls); + + } + + private void verifyKeyInheritAcls(List dirs, OmKeyInfo fileInfo, + long volumeId, long bucketId, List bucketAcls) + throws IOException { + // bucketID is the parent + long parentID = bucketId; + List expectedInheritAcls = bucketAcls.stream() + .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.DEFAULT) + .collect(Collectors.toList()); + System.out.println("expectedInheritAcls: " + expectedInheritAcls); + + // dir should inherit parent DEFAULT acls and self has DEFAULT scope + // [user:newUser:rw[DEFAULT], group:newGroup:rwl[DEFAULT]] + for (int indx = 0; indx < dirs.size(); indx++) { + String dirName = dirs.get(indx); + String dbKey = ""; + // for index=0, parentID is bucketID + dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + parentID, dirName); + OmDirectoryInfo omDirInfo = + omMetadataManager.getDirectoryTable().get(dbKey); + List omDirAcls = omDirInfo.getAcls(); + + System.out.println(" subdir acls : " + omDirInfo + " ==> " + omDirAcls); + Assert.assertEquals("Failed to inherit parent DEFAULT acls!", + expectedInheritAcls, omDirAcls); + + parentID = omDirInfo.getObjectID(); + expectedInheritAcls = omDirAcls; + + // file should inherit parent DEFAULT acls and self has ACCESS scope + // [user:newUser:rw[ACCESS], group:newGroup:rwl[ACCESS]] + if (indx == dirs.size() - 1) { + // verify file acls + Assert.assertEquals(fileInfo.getParentObjectID(), + omDirInfo.getObjectID()); + List fileAcls = fileInfo.getAcls(); + System.out.println(" file acls : " + fileInfo + " ==> " + fileAcls); + Assert.assertEquals("Failed to inherit parent DEFAULT acls!", + expectedInheritAcls.stream() + .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .collect(Collectors.toList()), fileAcls); + } + } + } + @Override public BucketLayout getBucketLayout() { return BucketLayout.FILE_SYSTEM_OPTIMIZED; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 2c714786623..801dbb0ff2c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -105,11 +105,6 @@ public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM, * @return the http json response wrapped in below format: * * { - * "keysSummary": { - * "totalUnreplicatedDataSize": 2147483648, - * "totalReplicatedDataSize": 2147483648, - * "totalOpenKeys": 8 - * }, * "lastKey": "/-4611686018427388160/-9223372036854775552/-922777620354", * "replicatedTotal": 2147483648, * "unreplicatedTotal": 2147483648, @@ -174,8 +169,6 @@ public Response getOpenKeyInfo( List nonFSOKeyInfoList = openKeyInsightInfo.getNonFSOKeyInfoList(); - // Create a HashMap for the keysSummary - Map keysSummary = new HashMap<>(); boolean skipPrevKeyDone = false; boolean isLegacyBucketLayout = true; boolean recordsFetchedLimitReached = false; @@ -255,15 +248,39 @@ public Response getOpenKeyInfo( break; } } - // Populate the keysSummary map - createKeysSummaryForOpenKey(keysSummary); - - openKeyInsightInfo.setKeysSummary(keysSummary); openKeyInsightInfo.setLastKey(lastKey); return Response.ok(openKeyInsightInfo).build(); } + /** + * Retrieves the summary of open keys. + * + * This method calculates and returns a summary of open keys. + * + * @return The HTTP response body includes a map with the following entries: + * - "totalOpenKeys": the total number of open keys + * - "totalReplicatedDataSize": the total replicated size for open keys + * - "totalUnreplicatedDataSize": the total unreplicated size for open keys + * + * + * Example response: + * { + * "totalOpenKeys": 8, + * "totalReplicatedDataSize": 90000, + * "totalUnreplicatedDataSize": 30000 + * } + */ + @GET + @Path("/open/summary") + public Response getOpenKeySummary() { + // Create a HashMap for the keysSummary + Map keysSummary = new HashMap<>(); + // Create a keys summary for open keys + createKeysSummaryForOpenKey(keysSummary); + return Response.ok(keysSummary).build(); + } + /** * Creates a keys summary for open keys and updates the provided * keysSummary map. Calculates the total number of open keys, replicated @@ -310,8 +327,6 @@ private void getPendingForDeletionKeyInfo( deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); Table deletedTable = omMetadataManager.getDeletedTable(); - // Create a HashMap for the keysSummary - Map keysSummary = new HashMap<>(); try ( TableIterator> @@ -348,10 +363,6 @@ private void getPendingForDeletionKeyInfo( break; } } - // Create the keysSummary for deleted keys - createKeysSummaryForDeletedKey(keysSummary); - // Set the keysSummary and lastKey in the response - deletedKeyAndDirInsightInfo.setKeysSummary(keysSummary); deletedKeyAndDirInsightInfo.setLastKey(lastKey); } catch (IOException ex) { throw new WebApplicationException(ex, @@ -364,6 +375,33 @@ private void getPendingForDeletionKeyInfo( } } + /** Retrieves the summary of deleted keys. + * + * This method calculates and returns a summary of deleted keys. + * + * @return The HTTP response body includes a map with the following entries: + * - "totalDeletedKeys": the total number of deleted keys + * - "totalReplicatedDataSize": the total replicated size for deleted keys + * - "totalUnreplicatedDataSize": the total unreplicated size for deleted keys + * + * + * Example response: + * { + * "totalDeletedKeys": 8, + * "totalReplicatedDataSize": 90000, + * "totalUnreplicatedDataSize": 30000 + * } + */ + @GET + @Path("/deletePending/summary") + public Response getDeletedKeySummary() { + // Create a HashMap for the keysSummary + Map keysSummary = new HashMap<>(); + // Create a keys summary for deleted keys + createKeysSummaryForDeletedKey(keysSummary); + return Response.ok(keysSummary).build(); + } + /** * This method retrieves set of keys/files pending for deletion. *

diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java index 425454ffcc6..2777cf53b49 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java @@ -23,19 +23,13 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; /** * HTTP Response wrapped for keys insights. */ public class KeyInsightInfoResponse { - /** Keys summary. Includes aggregated information about the keys. */ - @JsonProperty("keysSummary") - private Map keysSummary; - /** last key sent. */ @JsonProperty("lastKey") private String lastKey; @@ -81,15 +75,6 @@ public KeyInsightInfoResponse() { fsoKeyInfoList = new ArrayList<>(); repeatedOmKeyInfoList = new ArrayList<>(); deletedDirInfoList = new ArrayList<>(); - keysSummary = new HashMap<>(); - } - - public Map getKeysSummary() { - return keysSummary; - } - - public void setKeysSummary(Map keysSummary) { - this.keysSummary = keysSummary; } public String getLastKey() { diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json index d7efeea053c..5dc3c0d310f 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json @@ -29,8 +29,8 @@ "/heatmap/readaccess?startDate=*&path=*&entityType=key": "/keyHeatmap", "/heatmap/readaccess?startDate=*&path=*&entityType=volume": "/heatmap", "/features/disabledFeatures": "/disabledFeatures", - "/keys/open?limit=0": "/keysOpenSummary", - "/keys/deletePending?limit=1": "/keysdeletePendingSummary", + "/keys/open/summary": "/keysOpenSummary", + "/keys/deletePending/summary": "/keysdeletePendingSummary", "/containers/mismatch?limit=*&prevKey=11&missingIn=OM" : "/omMismatch1", "/containers/mismatch?limit=*&prevKey=21&missingIn=OM" : "/omMismatch2", diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx index 09720fbc223..2908564bb14 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx @@ -114,8 +114,8 @@ export class Overview extends React.Component, IOverviewS axios.all([ axios.get('/api/v1/clusterState'), axios.get('/api/v1/task/status'), - axios.get('/api/v1/keys/open?limit=0'), - axios.get('/api/v1/keys/deletePending?limit=1'), + axios.get('/api/v1/keys/open/summary'), + axios.get('/api/v1/keys/deletePending/summary'), ]).then(axios.spread((clusterStateResponse, taskstatusResponse, openResponse, deletePendingResponse) => { const clusterState: IClusterStateResponse = clusterStateResponse.data; @@ -140,12 +140,12 @@ export class Overview extends React.Component, IOverviewS lastRefreshed: Number(moment()), lastUpdatedOMDBDelta: omDBDeltaObject && omDBDeltaObject.lastUpdatedTimestamp, lastUpdatedOMDBFull: omDBFullObject && omDBFullObject.lastUpdatedTimestamp, - openSummarytotalUnrepSize: openResponse.data && openResponse.data.keysSummary && openResponse.data.keysSummary.totalUnreplicatedDataSize, - openSummarytotalRepSize: openResponse.data && openResponse.data.keysSummary && openResponse.data.keysSummary.totalReplicatedDataSize, - openSummarytotalOpenKeys: openResponse.data && openResponse.data.keysSummary && openResponse.data.keysSummary.totalOpenKeys, - deletePendingSummarytotalUnrepSize: deletePendingResponse.data && deletePendingResponse.data.keysSummary && deletePendingResponse.data.keysSummary.totalUnreplicatedDataSize, - deletePendingSummarytotalRepSize: deletePendingResponse.data && deletePendingResponse.data.keysSummary && deletePendingResponse.data.keysSummary.totalReplicatedDataSize, - deletePendingSummarytotalDeletedKeys: deletePendingResponse.data && deletePendingResponse.data.keysSummary && deletePendingResponse.data.keysSummary.totalDeletedKeys + openSummarytotalUnrepSize: openResponse.data && openResponse.data.totalUnreplicatedDataSize, + openSummarytotalRepSize: openResponse.data && openResponse.data.totalReplicatedDataSize, + openSummarytotalOpenKeys: openResponse.data && openResponse.data.totalOpenKeys, + deletePendingSummarytotalUnrepSize: deletePendingResponse.data && deletePendingResponse.data.totalUnreplicatedDataSize, + deletePendingSummarytotalRepSize: deletePendingResponse.data && deletePendingResponse.data.totalReplicatedDataSize, + deletePendingSummarytotalDeletedKeys: deletePendingResponse.data && deletePendingResponse.data.totalDeletedKeys }); })).catch(error => { this.setState({ diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 83015379801..5695c752c6e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -231,15 +231,18 @@ public void testKeyCountsForValidAndInvalidKeyPrefix() { "openFileTable" + "UnReplicatedDataSize", 50L); Response openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(-1, "", true, true); - KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) - openKeyInfoResp.getEntity(); - Assertions.assertNotNull(keyInsightInfoResp); - Map summary = keyInsightInfoResp.getKeysSummary(); + omdbInsightEndpoint.getOpenKeySummary(); + Assertions.assertNotNull(openKeyInfoResp); + + Map openKeysSummary = + (Map) openKeyInfoResp.getEntity(); // Assert that the key prefix format is accepted in the global stats - Assertions.assertEquals(6L, summary.get("totalOpenKeys")); - Assertions.assertEquals(300L, summary.get("totalReplicatedDataSize")); - Assertions.assertEquals(100L, summary.get("totalUnreplicatedDataSize")); + Assertions.assertEquals(6L, + openKeysSummary.get("totalOpenKeys")); + Assertions.assertEquals(300L, + openKeysSummary.get("totalReplicatedDataSize")); + Assertions.assertEquals(100L, + openKeysSummary.get("totalUnreplicatedDataSize")); // Delete the previous records and Update the new value for valid key prefix statsDao.deleteById("openKeyTable" + "Count", @@ -256,15 +259,18 @@ public void testKeyCountsForValidAndInvalidKeyPrefix() { 3L); openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(-1, "", true, true); - keyInsightInfoResp = (KeyInsightInfoResponse) - openKeyInfoResp.getEntity(); - Assertions.assertNotNull(keyInsightInfoResp); - summary = keyInsightInfoResp.getKeysSummary(); + omdbInsightEndpoint.getOpenKeySummary(); + Assertions.assertNotNull(openKeyInfoResp); + + openKeysSummary = + (Map) openKeyInfoResp.getEntity(); // Assert that the key format is not accepted in the global stats - Assertions.assertEquals(0L, summary.get("totalOpenKeys")); - Assertions.assertEquals(0L, summary.get("totalReplicatedDataSize")); - Assertions.assertEquals(0L, summary.get("totalUnreplicatedDataSize")); + Assertions.assertEquals(0L, + openKeysSummary.get("totalOpenKeys")); + Assertions.assertEquals(0L, + openKeysSummary.get("totalReplicatedDataSize")); + Assertions.assertEquals(0L, + openKeysSummary.get("totalUnreplicatedDataSize")); } @Test @@ -292,25 +298,33 @@ public void testKeysSummaryAttribute() { // Call the API of Open keys to get the response Response openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(-1, "", true, true); - KeyInsightInfoResponse keyInsightInfoResp = - (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); - Assertions.assertNotNull(keyInsightInfoResp); - Map summary = keyInsightInfoResp.getKeysSummary(); - Assertions.assertEquals(60L, summary.get("totalReplicatedDataSize")); - Assertions.assertEquals(20L, summary.get("totalUnreplicatedDataSize")); - Assertions.assertEquals(6L, summary.get("totalOpenKeys")); + omdbInsightEndpoint.getOpenKeySummary(); + Assertions.assertNotNull(openKeyInfoResp); + + Map openKeysSummary = + (Map) openKeyInfoResp.getEntity(); + + Assertions.assertEquals(60L, + openKeysSummary.get("totalReplicatedDataSize")); + Assertions.assertEquals(20L, + openKeysSummary.get("totalUnreplicatedDataSize")); + Assertions.assertEquals(6L, + openKeysSummary.get("totalOpenKeys")); // Call the API of Deleted keys to get the response Response deletedKeyInfoResp = - omdbInsightEndpoint.getDeletedKeyInfo(-1, ""); - keyInsightInfoResp = - (KeyInsightInfoResponse) deletedKeyInfoResp.getEntity(); - Assertions.assertNotNull(keyInsightInfoResp); - summary = keyInsightInfoResp.getKeysSummary(); - Assertions.assertEquals(30L, summary.get("totalReplicatedDataSize")); - Assertions.assertEquals(10L, summary.get("totalUnreplicatedDataSize")); - Assertions.assertEquals(3L, summary.get("totalDeletedKeys")); + omdbInsightEndpoint.getDeletedKeySummary(); + Assertions.assertNotNull(deletedKeyInfoResp); + + Map deletedKeysSummary = (Map) + deletedKeyInfoResp.getEntity(); + + Assertions.assertEquals(30L, + deletedKeysSummary.get("totalReplicatedDataSize")); + Assertions.assertEquals(10L, + deletedKeysSummary.get("totalUnreplicatedDataSize")); + Assertions.assertEquals(3L, + deletedKeysSummary.get("totalDeletedKeys")); } private void insertGlobalStatsRecords(GlobalStatsDao statsDao, diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java index 3f80087ebb1..179c5eeee79 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java @@ -72,6 +72,15 @@ public final class S3GatewayConfigKeys { public static final boolean OZONE_S3G_FSO_DIRECTORY_CREATION_ENABLED_DEFAULT = true; + /** + * Configuration key that enables shallow listing of Keys when results + * with delimiter by '/'. + */ + public static final String OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED = + "ozone.s3g.list-keys.shallow.enabled"; + public static final boolean OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED_DEFAULT = + true; + /** * Never constructed. */ diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 9842ccaee0d..4c0f055ecbb 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -20,6 +20,7 @@ import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.audit.S3GAction; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -45,6 +46,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.inject.Inject; import javax.ws.rs.DELETE; import javax.ws.rs.DefaultValue; import javax.ws.rs.GET; @@ -69,6 +71,9 @@ import java.util.Set; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE; @@ -82,6 +87,11 @@ public class BucketEndpoint extends EndpointBase { private static final Logger LOG = LoggerFactory.getLogger(BucketEndpoint.class); + private boolean listKeysShallowEnabled; + + @Inject + private OzoneConfiguration ozoneConfiguration; + /** * Rest endpoint to list objects in a specific bucket. *

@@ -133,18 +143,19 @@ public Response get( startAfter = marker; } + // If continuation token and start after both are provided, then we + // ignore start After + String prevKey = continueToken != null ? decodedToken.getLastKey() + : startAfter; + + // If shallow is true, only list immediate children + // delimited by OZONE_URI_DELIMITER + boolean shallow = listKeysShallowEnabled + && OZONE_URI_DELIMITER.equals(delimiter); + OzoneBucket bucket = getBucket(bucketName); - if (startAfter != null && continueToken != null) { - // If continuation token and start after both are provided, then we - // ignore start After - ozoneKeyIterator = bucket.listKeys(prefix, decodedToken.getLastKey()); - } else if (startAfter != null && continueToken == null) { - ozoneKeyIterator = bucket.listKeys(prefix, startAfter); - } else if (startAfter == null && continueToken != null) { - ozoneKeyIterator = bucket.listKeys(prefix, decodedToken.getLastKey()); - } else { - ozoneKeyIterator = bucket.listKeys(prefix); - } + ozoneKeyIterator = bucket.listKeys(prefix, prevKey, shallow); + } catch (OMException ex) { AUDIT.logReadFailure( buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); @@ -701,6 +712,8 @@ private void addKey(ListObjectResponse response, OzoneKey next) { @Override public void init() { - + listKeysShallowEnabled = ozoneConfiguration.getBoolean( + OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED, + OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED_DEFAULT); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index ad2211cd37e..62149a73d10 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -919,6 +919,11 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, throw newError(NO_SUCH_UPLOAD, uploadID, ex); } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key, ex); + } else if (ex.getResult() == ResultCodes.INVALID_PART) { + OS3Exception os3Exception = newError( + S3ErrorTable.INVALID_ARGUMENT, String.valueOf(partNumber), ex); + os3Exception.setErrorMessage(ex.getMessage()); + throw os3Exception; } throw ex; } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index d613297fc05..e7ed9face40 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -52,6 +52,8 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.util.Time; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; + /** * In-memory ozone bucket for testing. */ @@ -339,6 +341,45 @@ public Iterator listKeys(String keyPrefix, .iterator(); } + public Iterator listKeys(String keyPrefix, + String prevKey, boolean shallow) throws IOException { + if (!shallow) { + return prevKey == null ? listKeys(keyPrefix) + : listKeys(keyPrefix, prevKey); + } + + Map sortedKey = new TreeMap<>(keyDetails); + List ozoneKeys = sortedKey.values() + .stream() + .filter(key -> key.getName().startsWith(keyPrefix)) + .map(key -> { + String[] res = key.getName().split(OZONE_URI_DELIMITER); + String newKeyName; + if (res.length < 2) { + newKeyName = key.getName(); + } else if (res.length == 2) { + newKeyName = res[0] + OZONE_URI_DELIMITER + res[1]; + } else { + newKeyName = + res[0] + OZONE_URI_DELIMITER + res[1] + OZONE_URI_DELIMITER; + } + return new OzoneKey(key.getVolumeName(), + key.getBucketName(), newKeyName, + key.getDataSize(), + key.getCreationTime().getEpochSecond() * 1000, + key.getModificationTime().getEpochSecond() * 1000, + key.getReplicationConfig(), key.isFile()); + }).collect(Collectors.toList()); + + if (prevKey != null) { + return ozoneKeys.stream() + .filter(key -> key.getName().compareTo(prevKey) > 0) + .collect(Collectors.toList()) + .iterator(); + } + return ozoneKeys.iterator(); + } + @Override public void deleteKey(String key) throws IOException { keyDetails.remove(key); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java index f77c970f5fc..e011c1d5975 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java @@ -36,6 +36,8 @@ /** * Testing basic object list browsing. + * Note: delimiter with '/' will call shallow list logic, + * just list immediate subdir of prefix. */ public class TestBucketList { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index edf1488f168..a04af4be6be 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -45,6 +45,7 @@ import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.when; @@ -168,7 +169,8 @@ public void testListMultiUpload() throws IOException { public void testListKey() throws IOException { Mockito.when(objectStore.getVolume(anyString())).thenReturn(volume); Mockito.when(objectStore.getS3Bucket(anyString())).thenReturn(bucket); - doThrow(exception).when(bucket).listKeys(anyString()); + doThrow(exception).when(bucket).listKeys(anyString(), isNull(), + anyBoolean()); BucketEndpoint bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java index ac70ddb75f0..39044db797a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java @@ -59,6 +59,7 @@ public static class LinkBucket { private Instant creationTime; private Instant modificationTime; private String owner; + private Boolean link; LinkBucket(OzoneBucket ozoneBucket) { this.volumeName = ozoneBucket.getVolumeName(); @@ -68,6 +69,7 @@ public static class LinkBucket { this.creationTime = ozoneBucket.getCreationTime(); this.modificationTime = ozoneBucket.getModificationTime(); this.owner = ozoneBucket.getOwner(); + this.link = ozoneBucket.isLink(); } public String getVolumeName() { @@ -97,6 +99,10 @@ public Instant getModificationTime() { public String getOwner() { return owner; } + + public Boolean getLink() { + return link; + } } }