Skip to content

Commit

Permalink
Merge remote-tracking branch 'asf/master' into HDDS-9159-snap-du-ref
Browse files Browse the repository at this point in the history
Change-Id: I6ee9a61d4d373cb6aec0d5c2a1531de590ef80be
  • Loading branch information
smengcl committed Aug 17, 2023
2 parents 8aa3936 + 5cac346 commit 23e7d02
Show file tree
Hide file tree
Showing 64 changed files with 2,649 additions and 310 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -164,9 +164,8 @@ public BlockLocationInfo build() {
}

@Override
public String toString() {
return "{blockID={containerID=" + blockID.getContainerID() +
", localID=" + blockID.getLocalID() + "}" +
public String toString() {
return "{blockID={" + blockID + "}" +
", length=" + length +
", offset=" + offset +
", token=" + token +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -377,6 +377,10 @@ private OzoneConsts() {
// For Multipart upload
public static final int OM_MULTIPART_MIN_SIZE = 5 * 1024 * 1024;

// refer to :
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html
public static final int MAXIMUM_NUMBER_OF_PARTS_PER_UPLOAD = 10000;

// GRPC block token metadata header and context key
public static final String OZONE_BLOCK_TOKEN = "blocktoken";
public static final Context.Key<UserGroupInformation> UGI_CTX_KEY =
Expand Down
10 changes: 10 additions & 0 deletions hadoop-hdds/common/src/main/resources/ozone-default.xml
Original file line number Diff line number Diff line change
Expand Up @@ -1787,6 +1787,16 @@
service principal. </description>
</property>

<property>
<name>ozone.s3g.list-keys.shallow.enabled</name>
<value>true</value>
<tag>OZONE, S3GATEWAY</tag>
<description>If this is true, there will be efficiency optimization effects
when calling s3g list interface with delimiter '/' parameter, especially
when there are a large number of keys.
</description>
</property>

<property>
<name>ozone.om.save.metrics.interval</name>
<value>5m</value>
Expand Down
5 changes: 4 additions & 1 deletion hadoop-hdds/docs/content/security/SecurityAcls.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,10 @@ allows the user to overwrite an existing ozone key.
Where an _scope_ can be:

1. **ACCESS** – Access ACL is applied only to the specific object and not inheritable. It controls the access to the object itself.
2. **DEFAULT** - Default ACL is applied to the specific object and will be inherited by object's descendants. Default ACLs cannot be set on keys (as there can be no objects under a key).
2. **DEFAULT** - Default ACL is applied to the specific object and will be inherited by object's descendants. Default ACLs cannot be set on keys (as there can be no objects under a key). <br>
_Note_: ACLs inherited from parent's Default ACLs will follow the following rules based on different bucket layout:
- **Legacy with EnableFileSystem or FSO**: inherit the immediate parent's DEFAULT ACLs. If none, inherit the bucket DEFAULT ACLs.
- **Legacy with DisableFileSystem or OBS**: inherit the bucket DEFAULT ACLs.

## Ozone Native ACL APIs

Expand Down
6 changes: 4 additions & 2 deletions hadoop-hdds/docs/content/security/SecurityAcls.zh.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,10 @@ _权限_ 可选的值包括::
_范围_ 可选的值包括::

1. **ACCESS** – 这类 ACL 仅作用于对象本身,不能被继承。它控制对对象本身的访问。
2. **DEFAULT** - 这类 ACL 不仅作用于对象本身,还会被对象的后代继承。不能在叶子对象上设置该类 ACL(因为叶子对象下不能再有其他对象)。

2. **DEFAULT** - 这类 ACL 不仅作用于对象本身,还会被对象的后代继承。不能在叶子对象上设置该类 ACL(因为叶子对象下不能再有其他对象)。 <br>
_注意_:从父级默认 ACL 继承的 ACL, 将根据不同的桶布局遵循以下规则:
- **启用文件系统的 Legacy 或 FSO**:继承直接父目录的默认ACL。如果直接父目录没有默认ACL,则继承存储桶的默认ACL。
- **禁用文件系统的 Legacy 或 OBS**:继承桶的默认ACL。

## Ozone 原生 ACL API

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1522,6 +1522,11 @@ public static Logger getLog() {
return LOG;
}

@VisibleForTesting
public String getCurrentCompactionLogPath() {
return currentCompactionLogPath;
}

@VisibleForTesting
public ConcurrentHashMap<String, CompactionNode> getCompactionNodeMap() {
return compactionNodeMap;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,8 @@ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails,
}

if (!enoughForData) {
LOG.debug("Datanode {} has no volumes with enough space to allocate {} " +
"bytes for data.", datanodeDetails, dataSizeRequired);
return false;
}

Expand All @@ -332,8 +334,11 @@ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails,
} else {
enoughForMeta = true;
}

return enoughForData && enoughForMeta;
if (!enoughForMeta) {
LOG.debug("Datanode {} has no volumes with enough space to allocate {} " +
"bytes for metadata.", datanodeDetails, metadataSizeRequired);
}
return enoughForMeta;
}

/**
Expand Down Expand Up @@ -481,16 +486,20 @@ public boolean isValidNode(DatanodeDetails datanodeDetails,
if (datanodeInfo == null) {
LOG.error("Failed to find the DatanodeInfo for datanode {}",
datanodeDetails);
} else {
if (datanodeInfo.getNodeStatus().isNodeWritable() &&
(hasEnoughSpace(datanodeInfo, metadataSizeRequired,
dataSizeRequired))) {
LOG.debug("Datanode {} is chosen. Required metadata size is {} and " +
"required data size is {}",
datanodeDetails, metadataSizeRequired, dataSizeRequired);
return true;
}
return false;
}
NodeStatus nodeStatus = datanodeInfo.getNodeStatus();
if (nodeStatus.isNodeWritable() &&
(hasEnoughSpace(datanodeInfo, metadataSizeRequired,
dataSizeRequired))) {
LOG.debug("Datanode {} is chosen. Required metadata size is {} and " +
"required data size is {} and NodeStatus is {}",
datanodeDetails, metadataSizeRequired, dataSizeRequired, nodeStatus);
return true;
}
LOG.debug("Datanode {} is not chosen. Required metadata size is {} and " +
"required data size is {} and NodeStatus is {}",
datanodeDetails, metadataSizeRequired, dataSizeRequired, nodeStatus);
return false;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ public ContainerSafeModeRule(String ruleName, EventQueue eventQueue,

Optional.ofNullable(container.getState())
.filter(state -> (state == HddsProtos.LifeCycleState.QUASI_CLOSED ||
state == HddsProtos.LifeCycleState.CLOSED))
state == HddsProtos.LifeCycleState.CLOSED)
&& container.getNumberOfKeys() > 0)
.ifPresent(s -> containerMap.put(container.getContainerID(),
container));
});
Expand Down Expand Up @@ -166,7 +167,8 @@ private void reInitializeRule() {

Optional.ofNullable(container.getState())
.filter(state -> (state == HddsProtos.LifeCycleState.QUASI_CLOSED ||
state == HddsProtos.LifeCycleState.CLOSED))
state == HddsProtos.LifeCycleState.CLOSED)
&& container.getNumberOfKeys() > 0)
.ifPresent(s -> containerMap.put(container.getContainerID(),
container));
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;

import java.util.Map;
Expand Down Expand Up @@ -669,7 +668,6 @@ public void testSetNodeOpStateAndCommandFired()
* @throws TimeoutException
*/
@Test
@Disabled("HDDS-5098")
public void testScmDetectStaleAndDeadNode()
throws IOException, InterruptedException, AuthenticationException {
final int interval = 100;
Expand Down Expand Up @@ -721,13 +719,13 @@ public void testScmDetectStaleAndDeadNode()
"Expected to find 1 stale node");
assertEquals(staleNode.getUuid(), staleNodeList.get(0).getUuid(),
"Stale node is not the expected ID");
Thread.sleep(1000);

Map<String, Map<String, Integer>> nodeCounts = nodeManager.getNodeCount();
assertEquals(1,
nodeCounts.get(HddsProtos.NodeOperationalState.IN_SERVICE.name())
.get(HddsProtos.NodeState.STALE.name()).intValue());

Thread.sleep(1000);
// heartbeat good nodes again.
for (DatanodeDetails dn : nodeList) {
nodeManager.processHeartbeat(dn, layoutInfo);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ private void testSafeMode(int numContainers) throws Exception {
// Currently, only considered containers which are not in open state.
for (ContainerInfo container : containers) {
container.setState(HddsProtos.LifeCycleState.CLOSED);
container.setNumberOfKeys(10);
}
scmSafeModeManager = new SCMSafeModeManager(
config, containers, null, null, queue,
Expand Down Expand Up @@ -160,6 +161,7 @@ public void testSafeModeExitRule() throws Exception {
// container list
for (ContainerInfo container : containers) {
container.setState(HddsProtos.LifeCycleState.CLOSED);
container.setNumberOfKeys(10);
}
scmSafeModeManager = new SCMSafeModeManager(
config, containers, null, null, queue,
Expand Down Expand Up @@ -501,12 +503,19 @@ public void testContainerSafeModeRule() throws Exception {
// Add 100 containers to the list of containers in SCM
containers.addAll(HddsTestUtils.getContainerInfo(25 * 4));
// Assign CLOSED state to first 25 containers and OPEN state to rest
// of the containers
// of the containers. Set container key count = 10 in each container.
for (ContainerInfo container : containers.subList(0, 25)) {
container.setState(HddsProtos.LifeCycleState.CLOSED);
container.setNumberOfKeys(10);
}
for (ContainerInfo container : containers.subList(25, 100)) {
container.setState(HddsProtos.LifeCycleState.OPEN);
container.setNumberOfKeys(10);
}

// Set the last 5 closed containers to be empty
for (ContainerInfo container : containers.subList(20, 25)) {
container.setNumberOfKeys(0);
}

scmSafeModeManager = new SCMSafeModeManager(
Expand All @@ -515,14 +524,15 @@ public void testContainerSafeModeRule() throws Exception {
assertTrue(scmSafeModeManager.getInSafeMode());

// When 10 CLOSED containers are reported by DNs, the computed container
// threshold should be 10/25 as there are only 25 CLOSED containers.
// threshold should be 10/20 as there are only 20 CLOSED NON-EMPTY
// containers.
// Containers in OPEN state should not contribute towards list of
// containers while calculating container threshold in SCMSafeNodeManager
testContainerThreshold(containers.subList(0, 10), 0.4);
testContainerThreshold(containers.subList(0, 10), 0.5);
assertTrue(scmSafeModeManager.getInSafeMode());

// When remaining 15 OPEN containers are reported by DNs, the container
// threshold should be (10+15)/25.
// When remaining 10 CLOSED NON-EMPTY containers are reported by DNs,
// the container threshold should be (10+10)/20.
testContainerThreshold(containers.subList(10, 25), 1.0);

GenericTestUtils.waitFor(() -> !scmSafeModeManager.getInSafeMode(),
Expand Down
Loading

0 comments on commit 23e7d02

Please sign in to comment.