Skip to content

Commit

Permalink
fix leaks in tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Sadanand Shenoy committed Oct 8, 2024
1 parent 7f8fd06 commit 71c69c0
Show file tree
Hide file tree
Showing 12 changed files with 131 additions and 44 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

package org.apache.hadoop.hdds;

import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.ServiceException;

import jakarta.annotation.Nonnull;
Expand Down Expand Up @@ -112,6 +113,8 @@ public final class HddsUtils {

private static final int NO_PORT = -1;

private static boolean ignoreReportingLeak = false;

private HddsUtils() {
}

Expand Down Expand Up @@ -879,8 +882,15 @@ public static HddsProtos.UUID toProtobuf(UUID uuid) {
: null;
}

@VisibleForTesting
public static void setIgnoreReportingLeak(boolean ignoreReportingLeak) {
HddsUtils.ignoreReportingLeak = ignoreReportingLeak;
}

/**
* Logs a warning to report that the class is not closed properly.
* If {@link HddsUtils#ignoreReportingLeak} is set to true it will log that
* the message has been ignored. This only serves for testing purposes.
*/
public static void reportLeak(Class<?> clazz, String stackTrace, Logger log) {
String warning = String.format("%s is not closed properly", clazz.getSimpleName());
Expand All @@ -889,6 +899,13 @@ public static void reportLeak(Class<?> clazz, String stackTrace, Logger log) {
stackTrace);
warning = warning.concat(debugMessage);
}
log.warn(warning);
if (!ignoreReportingLeak) {
log.warn(warning);
} else {
String ignoreMessage =
String.format("Ignoring warning : %s is not closed correctly",
clazz.getSimpleName());
log.warn(ignoreMessage);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1831,12 +1831,14 @@ public void testLoopInLinkBuckets() throws Exception {
String rootPath = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, linkBucket1Name, linksVolume);

try {
FileSystem.get(URI.create(rootPath), cluster.getConf());
fail("Should throw Exception due to loop in Link Buckets");
try (FileSystem fileSystem = FileSystem.get(URI.create(rootPath),
cluster.getConf())) {
fail("Should throw Exception due to loop in Link Buckets" +
" while initialising fs with URI " + fileSystem.getUri());
} catch (OMException oe) {
// Expected exception
assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, oe.getResult());
assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS,
oe.getResult());
} finally {
volume.deleteBucket(linkBucket1Name);
volume.deleteBucket(linkBucket2Name);
Expand All @@ -1854,13 +1856,17 @@ public void testLoopInLinkBuckets() throws Exception {
String rootPath2 = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, danglingLinkBucketName, linksVolume);

FileSystem fileSystem = null;
try {
FileSystem.get(URI.create(rootPath2), cluster.getConf());
fileSystem = FileSystem.get(URI.create(rootPath2), cluster.getConf());
} catch (OMException oe) {
// Expected exception
fail("Should not throw Exception and show orphan buckets");
} finally {
volume.deleteBucket(danglingLinkBucketName);
if (fileSystem != null) {
fileSystem.close();
}
}
}

Expand Down Expand Up @@ -2230,7 +2236,17 @@ void testFileSystemWithObjectStoreLayout() throws IOException {
OzoneConfiguration config = new OzoneConfiguration(fs.getConf());
config.set(FS_DEFAULT_NAME_KEY, obsRootPath);

IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> FileSystem.get(config));
IllegalArgumentException e =
assertThrows(IllegalArgumentException.class, () -> {
FileSystem fileSystem = null;
try {
fileSystem = FileSystem.get(config);
} finally {
if (fileSystem != null) {
fileSystem.close();
}
}
});
assertThat(e.getMessage()).contains("OBJECT_STORE, which does not support file system semantics");
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,15 +65,17 @@ public static void listStatusIteratorOnPageSize(OzoneConfiguration conf,
URI uri = FileSystem.getDefaultUri(config);
config.setBoolean(
String.format("fs.%s.impl.disable.cache", uri.getScheme()), true);
FileSystem subject = FileSystem.get(uri, config);
Path dir = new Path(Objects.requireNonNull(rootPath), "listStatusIterator");
try {
Set<String> paths = new TreeSet<>();
for (int dirCount : dirCounts) {
listStatusIterator(subject, dir, paths, dirCount);
try (FileSystem subject = FileSystem.get(uri, config)) {
Path dir = new Path(Objects.requireNonNull(rootPath),
"listStatusIterator");
try {
Set<String> paths = new TreeSet<>();
for (int dirCount : dirCounts) {
listStatusIterator(subject, dir, paths, dirCount);
}
} finally {
subject.delete(dir, true);
}
} finally {
subject.delete(dir, true);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,16 @@ void teardown() throws IOException {
void fileSystemWithUnsupportedDefaultBucketLayout(String layout) {
OzoneConfiguration conf = configWithDefaultBucketLayout(layout);

OMException e = assertThrows(OMException.class,
() -> FileSystem.newInstance(conf));
OMException e = assertThrows(OMException.class, () -> {
FileSystem fileSystem = null;
try {
fileSystem = FileSystem.newInstance(conf);
} finally {
if (fileSystem != null) {
fileSystem.close();
}
}
});
assertThat(e.getMessage())
.contains(ERROR_MAP.get(layout));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.net.StaticMapping;

import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.om.KeyManagerImpl;
import org.apache.hadoop.ozone.om.OmTestManagers;
import org.apache.hadoop.ozone.om.OzoneManager;
Expand Down Expand Up @@ -74,6 +75,8 @@ public class TestOMSortDatanodes {
"edge1", "/rack1"
);

private static OzoneClient ozoneClient;

@BeforeAll
public static void setup() throws Exception {
config = new OzoneConfiguration();
Expand Down Expand Up @@ -109,11 +112,15 @@ public static void setup() throws Exception {
= new OmTestManagers(config, scm.getBlockProtocolServer(),
mockScmContainerClient);
om = omTestManagers.getOzoneManager();
ozoneClient = omTestManagers.getRpcClient();
keyManager = (KeyManagerImpl)omTestManagers.getKeyManager();
}

@AfterAll
public static void cleanup() throws Exception {
if (ozoneClient != null) {
ozoneClient.close();
}
if (scm != null) {
scm.stop();
scm.join();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,14 @@
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
Expand All @@ -51,6 +53,7 @@
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec;
Expand Down Expand Up @@ -221,6 +224,7 @@ abstract class OzoneRpcClientTests extends OzoneTestBase {
private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP,
remoteGroupName, ACCESS, READ);
private static MessageDigest eTagProvider;
private static Set<OzoneClient> ozoneClients = new HashSet<>();

@BeforeAll
public static void initialize() throws NoSuchAlgorithmException {
Expand Down Expand Up @@ -250,6 +254,7 @@ static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder build
.build();
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
ozoneClients.add(ozClient);
store = ozClient.getObjectStore();
storageContainerLocationClient =
cluster.getStorageContainerLocationClient();
Expand All @@ -260,9 +265,12 @@ static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder build
* Close OzoneClient and shutdown MiniOzoneCluster.
*/
static void shutdownCluster() throws IOException {
if (ozClient != null) {
ozClient.close();
for (OzoneClient ozoneClient : ozoneClients) {
if (ozoneClient != null) {
ozoneClient.close();
}
}
ozoneClients.clear();

if (storageContainerLocationClient != null) {
storageContainerLocationClient.close();
Expand All @@ -274,6 +282,7 @@ static void shutdownCluster() throws IOException {
}

private static void setOzClient(OzoneClient ozClient) {
ozoneClients.add(ozClient);
OzoneRpcClientTests.ozClient = ozClient;
}

Expand Down Expand Up @@ -3143,11 +3152,12 @@ void testMultipartUploadOverride(ReplicationConfig replication)

@Test
public void testClientLeakDetector() throws Exception {
HddsUtils.setIgnoreReportingLeak(true);
OzoneClient client = OzoneClientFactory.getRpcClient(cluster.getConf());
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
GenericTestUtils.LogCapturer ozoneClienFactoryLogCapturer =
GenericTestUtils.LogCapturer ozoneClientFactoryLogCapturer =
GenericTestUtils.LogCapturer.captureLogs(
OzoneClientFactory.getLogger());

Expand All @@ -3163,8 +3173,9 @@ public void testClientLeakDetector() throws Exception {
}
client = null;
System.gc();
GenericTestUtils.waitFor(() -> ozoneClienFactoryLogCapturer.getOutput()
.contains("is not closed properly"), 100, 2000);
GenericTestUtils.waitFor(() -> ozoneClientFactoryLogCapturer.getOutput()
.contains("is not closed correctly"), 100, 2000);
HddsUtils.setIgnoreReportingLeak(false);
}
@Test
public void testMultipartUploadOwner() throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
Expand Down Expand Up @@ -171,6 +172,7 @@ public class TestKeyManagerImpl {
private static final String VERSIONED_BUCKET_NAME = "versionedbucket1";
private static final String VOLUME_NAME = "vol1";
private static OzoneManagerProtocol writeClient;
private static OzoneClient rpcClient;
private static OzoneManager om;

@BeforeAll
Expand Down Expand Up @@ -219,6 +221,7 @@ public static void setUp() throws Exception {
keyManager = (KeyManagerImpl)omTestManagers.getKeyManager();
prefixManager = omTestManagers.getPrefixManager();
writeClient = omTestManagers.getWriteClient();
rpcClient = omTestManagers.getRpcClient();

mockContainerClient();

Expand All @@ -235,6 +238,8 @@ public static void setUp() throws Exception {

@AfterAll
public static void cleanup() throws Exception {
writeClient.close();
rpcClient.close();
scm.stop();
scm.join();
om.stop();
Expand All @@ -252,10 +257,11 @@ public void init() throws Exception {
public void cleanupTest() throws IOException {
mockContainerClient();
org.apache.hadoop.fs.Path volumePath = new org.apache.hadoop.fs.Path(OZONE_URI_DELIMITER, VOLUME_NAME);
FileSystem fs = FileSystem.get(conf);
fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true);
fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true);
fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true);
try (FileSystem fs = FileSystem.get(conf)) {
fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true);
fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true);
fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true);
}
}

private static void mockContainerClient() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
Expand Down Expand Up @@ -162,7 +163,7 @@ public class TestOmContainerLocationCache {
private static final DatanodeDetails DN5 =
MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID());
private static final AtomicLong CONTAINER_ID = new AtomicLong(1);

private static OzoneClient ozoneClient;

@BeforeAll
public static void setUp() throws Exception {
Expand All @@ -184,6 +185,7 @@ public static void setUp() throws Exception {
OmTestManagers omTestManagers = new OmTestManagers(conf,
mockScmBlockLocationProtocol, mockScmContainerClient);
om = omTestManagers.getOzoneManager();
ozoneClient = omTestManagers.getRpcClient();
metadataManager = omTestManagers.getMetadataManager();

rpcClient = new RpcClient(conf, null) {
Expand All @@ -204,6 +206,7 @@ protected XceiverClientFactory createXceiverClientFactory(

@AfterAll
public static void cleanup() throws Exception {
ozoneClient.close();
om.stop();
FileUtils.deleteDirectory(dir);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import java.io.IOException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.util.Map;
import java.util.Arrays;
import java.util.HashSet;
Expand Down Expand Up @@ -1149,8 +1148,6 @@ public void testListBucket() throws Exception {
getClientConfForOFS(hostPrefix, cluster.getConf());
int pageSize = 20;
clientConf.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize);
URI uri = FileSystem.getDefaultUri(clientConf);
clientConf.setBoolean(String.format("fs.%s.impl.disable.cache", uri.getScheme()), true);
OzoneFsShell shell = new OzoneFsShell(clientConf);

String volName = "testlistbucket";
Expand Down
Loading

0 comments on commit 71c69c0

Please sign in to comment.