Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(hadoop): Fix patches for 3.3.4 #564

Merged
merged 2 commits into from
Feb 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ All notable changes to this project will be documented in this file.
- hadoop: Build from source ([#526]).
- superset: Add patch that fixes saved queries export ([#539]).
- inotify-tools: Download from Nexus instead of using the EPEL 8 repository ([#549]).
- hadoop: Add patches to fix missing operationType for some operations in authorizer ([#555]).
- hadoop: Add patches to fix missing operationType for some operations in authorizer ([#555], [#564]).
- airflow: bump git-sync to `4.2.1` ([#562]).

### Removed
Expand Down Expand Up @@ -82,6 +82,7 @@ All notable changes to this project will be documented in this file.
[#559]: https://github.com/stackabletech/docker-images/pull/559
[#560]: https://github.com/stackabletech/docker-images/pull/560
[#562]: https://github.com/stackabletech/docker-images/pull/562
[#564]: https://github.com/stackabletech/docker-images/pull/564

## [23.11.0] - 2023-11-30

Expand Down
40 changes: 20 additions & 20 deletions hadoop/stackable/patches/3.3.4/006-HDFS-17378-3.3.4.patch
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9855b434e9c4..b3781ee1dd26 100644
index 243f62295ca4..ba3caa6b6c04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2530,15 +2530,16 @@ void unsetStoragePolicy(String src) throws IOException {
@@ -2501,15 +2501,16 @@ void unsetStoragePolicy(String src) throws IOException {
* @throws IOException
*/
BlockStoragePolicy getStoragePolicy(String src) throws IOException {
Expand All @@ -21,7 +21,7 @@ index 9855b434e9c4..b3781ee1dd26 100644
}
}

@@ -2558,15 +2559,16 @@ BlockStoragePolicy[] getStoragePolicies() throws IOException {
@@ -2529,15 +2530,16 @@ BlockStoragePolicy[] getStoragePolicies() throws IOException {
}

long getPreferredBlockSize(String src) throws IOException {
Expand All @@ -40,23 +40,23 @@ index 9855b434e9c4..b3781ee1dd26 100644
}
}

@@ -2619,7 +2621,6 @@ HdfsFileStatus startFile(String src, PermissionStatus permissions,
@@ -2590,7 +2592,6 @@ HdfsFileStatus startFile(String src, PermissionStatus permissions,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, String ecPolicyName,
String storagePolicy, boolean logRetryCache) throws IOException {
-
HdfsFileStatus status;
try {
status = startFileInt(src, permissions, holder, clientMachine, flag,
@@ -2639,6 +2640,7 @@ private HdfsFileStatus startFileInt(String src,
@@ -2610,6 +2611,7 @@ private HdfsFileStatus startFileInt(String src,
long blockSize, CryptoProtocolVersion[] supportedVersions,
String ecPolicyName, String storagePolicy, boolean logRetryCache)
throws IOException {
+ final String operationName = "create";
if (NameNode.stateChangeLog.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
builder.append("DIR* NameSystem.startFile: src=").append(src)
@@ -2676,7 +2678,7 @@ private HdfsFileStatus startFileInt(String src,
@@ -2647,7 +2649,7 @@ private HdfsFileStatus startFileInt(String src,

checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
Expand All @@ -65,7 +65,7 @@ index 9855b434e9c4..b3781ee1dd26 100644
writeLock();
try {
checkOperation(OperationCategory.WRITE);
@@ -2740,7 +2742,7 @@ private HdfsFileStatus startFileInt(String src,
@@ -2711,7 +2713,7 @@ private HdfsFileStatus startFileInt(String src,
dir.writeUnlock();
}
} finally {
Expand All @@ -74,7 +74,7 @@ index 9855b434e9c4..b3781ee1dd26 100644
// There might be transactions logged while trying to recover the lease.
// They need to be sync'ed even when an exception was thrown.
if (!skipSync) {
@@ -2769,10 +2771,11 @@ private HdfsFileStatus startFileInt(String src,
@@ -2740,10 +2742,11 @@ private HdfsFileStatus startFileInt(String src,
*/
boolean recoverLease(String src, String holder, String clientMachine)
throws IOException {
Expand All @@ -87,7 +87,7 @@ index 9855b434e9c4..b3781ee1dd26 100644
writeLock();
try {
checkOperation(OperationCategory.WRITE);
@@ -2793,7 +2796,7 @@ boolean recoverLease(String src, String holder, String clientMachine)
@@ -2764,7 +2767,7 @@ boolean recoverLease(String src, String holder, String clientMachine)
skipSync = true;
throw se;
} finally {
Expand All @@ -96,24 +96,24 @@ index 9855b434e9c4..b3781ee1dd26 100644
// There might be transactions logged while trying to recover the lease.
// They need to be sync'ed even when an exception was thrown.
if (!skipSync) {
@@ -3010,6 +3013,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
@@ -2981,6 +2984,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
final Set<Node> excludes,
final int numAdditionalNodes, final String clientName
) throws IOException {
+ final String operationName = "getAdditionalDatanode";
//check if the feature is enabled
dtpReplaceDatanodeOnFailure.checkEnabled();

@@ -3021,7 +3025,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
@@ -2992,7 +2996,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
final BlockType blockType;
checkOperation(OperationCategory.WRITE);
checkOperation(OperationCategory.READ);
final FSPermissionChecker pc = getPermissionChecker();
- FSPermissionChecker.setOperationType(null);
+ FSPermissionChecker.setOperationType(operationName);
readLock();
try {
// Changing this operation category to WRITE instead of making getAdditionalDatanode as a
@@ -3047,7 +3051,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
checkOperation(OperationCategory.READ);
@@ -3015,7 +3019,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
"src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s",
src, fileId, blk, clientName, clientMachine));
} finally {
Expand All @@ -122,7 +122,7 @@ index 9855b434e9c4..b3781ee1dd26 100644
}

if (clientnode == null) {
@@ -3069,11 +3073,12 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
@@ -3037,11 +3041,12 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
*/
void abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
throws IOException {
Expand All @@ -136,7 +136,7 @@ index 9855b434e9c4..b3781ee1dd26 100644
writeLock();
try {
checkOperation(OperationCategory.WRITE);
@@ -3082,7 +3087,7 @@ void abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
@@ -3050,7 +3055,7 @@ void abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: {} is " +
"removed from pendingCreates", b);
} finally {
Expand All @@ -145,7 +145,7 @@ index 9855b434e9c4..b3781ee1dd26 100644
}
getEditLog().logSync();
}
@@ -3136,10 +3141,11 @@ INodeFile checkLease(INodesInPath iip, String holder, long fileId)
@@ -3104,10 +3109,11 @@ INodeFile checkLease(INodesInPath iip, String holder, long fileId)
boolean completeFile(final String src, String holder,
ExtendedBlock last, long fileId)
throws IOException {
Expand All @@ -158,7 +158,7 @@ index 9855b434e9c4..b3781ee1dd26 100644
writeLock();
try {
checkOperation(OperationCategory.WRITE);
@@ -3147,7 +3153,7 @@ boolean completeFile(final String src, String holder,
@@ -3115,7 +3121,7 @@ boolean completeFile(final String src, String holder,
success = FSDirWriteFileOp.completeFile(this, pc, src, holder, last,
fileId);
} finally {
Expand All @@ -167,7 +167,7 @@ index 9855b434e9c4..b3781ee1dd26 100644
}
getEditLog().logSync();
if (success) {
@@ -3572,10 +3578,11 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type)
@@ -3536,10 +3542,11 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type)
*/
void fsync(String src, long fileId, String clientName, long lastBlockLength)
throws IOException {
Expand All @@ -180,7 +180,7 @@ index 9855b434e9c4..b3781ee1dd26 100644
writeLock();
try {
checkOperation(OperationCategory.WRITE);
@@ -3589,7 +3596,7 @@ void fsync(String src, long fileId, String clientName, long lastBlockLength)
@@ -3553,7 +3560,7 @@ void fsync(String src, long fileId, String clientName, long lastBlockLength)
}
FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, false);
} finally {
Expand Down