diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 5f14451fad32..af30cf03edfc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -28,6 +28,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.COMPACTION_LOG_TABLE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS; @@ -102,12 +103,15 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.CompactionLogEntryProto; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.hdds.utils.db.CodecException; import org.apache.hadoop.hdds.utils.db.DBProfile; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator; import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils; @@ -141,6 +145,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -235,6 +240,7 @@ private void init() throws Exception { conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); conf.setInt(OMStorage.TESTING_INIT_LAYOUT_VERSION_KEY, OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); + conf.setTimeDuration(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, -1); conf.setTimeDuration(OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL, 100, TimeUnit.MILLISECONDS); if (!disableNativeDiff) { @@ -1082,6 +1088,73 @@ public void testSnapDiffWithDirectoryDelete() throws Exception { assertEquals(diff.getDiffList(), diffEntries); } + /** + * Testing scenario: + * 1) Dir dir1/dir2 is created. + * 2) Snapshot snap1 created. + * 3) Delete dir1. + * 4) Wait for DDS to run and pick the sub-dirs and purge. + * 6) Snapshot snap2 created. + * 5) Snap-diff b/w snapshot snap1 and snap2 should have 1 entry + * in case of native lib (dir1) and 2 entries (dir1, dir1/dir2) + * in case of non-native env. + * This is because native lib impl will read the single entry from + * range delete tombstone. + */ + @Test + public void testSnapDiffWithDirectoryDeleteAfterDDSProcessing() throws Exception { + startKeyManager(); + assumeTrue(bucketLayout.isFileSystemOptimized()); + String testVolumeName = "vol" + counter.incrementAndGet(); + String testBucketName = "bucket1"; + store.createVolume(testVolumeName); + OzoneVolume volume = store.getVolume(testVolumeName); + createBucket(volume, testBucketName); + OzoneBucket bucket = volume.getBucket(testBucketName); + String snap1 = "snap1"; + String dir1 = "dir1"; + String dir2 = "dir1/dir2"; + bucket.createDirectory(dir2); + createSnapshot(testVolumeName, testBucketName, snap1); + bucket.deleteDirectory(dir1, true); + // assert that dir2 exists + assertTrue(dirExists("dir2")); + GenericTestUtils.waitFor(() -> { + try { + return !dirExists("dir2"); + } catch (RocksDatabaseException | CodecException e) { + fail("Exception occurred while waiting for deletion" + e.getMessage()); + return false; + } + }, 100, 20000); + String snap2 = "snap2"; + createSnapshot(testVolumeName, testBucketName, snap2); + SnapshotDiffReport diff = getSnapDiffReport(testVolumeName, testBucketName, snap1, snap2); + List diffEntries = + Lists.newArrayList(SnapshotDiffReportOzone.getDiffReportEntry(SnapshotDiffReport.DiffType.DELETE, dir1)); + if (disableNativeDiff) { + diffEntries.add(SnapshotDiffReportOzone.getDiffReportEntry(SnapshotDiffReport.DiffType.DELETE, dir2)); + } + assertEquals(diff.getDiffList(), diffEntries); + stopKeyManager(); + } + + private boolean dirExists(String dirName) throws RocksDatabaseException, + CodecException { + Table directoryTable = ozoneManager + .getMetadataManager().getDirectoryTable(); + try (Table.KeyValueIterator it = directoryTable + .iterator()) { + while (it.hasNext()) { + String name = it.next().getValue().getName(); + if (name.equals(dirName)) { + return true; + } + } + return false; + } + } + private OzoneObj buildKeyObj(OzoneBucket bucket, String key) { return OzoneObjInfo.Builder.newBuilder() .setResType(OzoneObj.ResourceType.KEY) diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index bdb3cc3cee35..e970faf9ea93 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1473,6 +1473,8 @@ message PurgePathRequest { optional string deletedDir = 3; repeated KeyInfo deletedSubFiles = 4; repeated KeyInfo markDeletedSubDirs = 5; + repeated hadoop.hdds.KeyValue deleteRangeSubFiles = 6; + repeated hadoop.hdds.KeyValue deleteRangeSubDirs = 7; } message DeleteOpenKeysRequest { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java index 2b685edf273d..9988afbc8fc8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.om; +import java.util.Collections; import java.util.List; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -27,11 +28,14 @@ public class DeleteKeysResult { private List keysToDelete; - private boolean processedKeys; + private List keyRanges; - public DeleteKeysResult(List keysToDelete, boolean processedKeys) { - this.keysToDelete = keysToDelete; + DeleteKeysResult(List keysToDelete, List keyRanges, boolean processedKeys) { + this.keysToDelete = + Collections.unmodifiableList(java.util.Objects.requireNonNull(keysToDelete, "keysToDelete must not be null")); + this.keyRanges = + Collections.unmodifiableList(java.util.Objects.requireNonNull(keyRanges, "keyRanges must not be null")); this.processedKeys = processedKeys; } @@ -43,4 +47,30 @@ public boolean isProcessedKeys() { return processedKeys; } + public List getKeyRanges() { + return keyRanges; + } + + /** + * Represents a half-open key range {@code [startKey, exclusiveEndKey)} used + * for RocksDB deleteRange operations. + */ + public static class ExclusiveRange { + private final String startKey; + private final String exclusiveEndKey; + + public ExclusiveRange(String startKey, String exclusiveEndKey) { + this.startKey = java.util.Objects.requireNonNull(startKey, "startKey must not be null"); + this.exclusiveEndKey = java.util.Objects.requireNonNull(exclusiveEndKey, "exclusiveEndKey must not be null"); + } + + public String getExclusiveEndKey() { + return exclusiveEndKey; + } + + public String getStartKey() { + return startKey; + } + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index bc5f2ce9b961..c7a7a37a8264 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; +import static org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; import static org.apache.hadoop.hdds.scm.net.NetConstants.NODE_COST_DEFAULT; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser; @@ -2298,15 +2299,30 @@ private DeleteKeysResult gatherSubPathsWithIterat List keyInfos = new ArrayList<>(); String seekFileInDB = metadataManager.getOzonePathKey(volumeId, bucketId, parentInfo.getObjectID(), ""); try (TableIterator> iterator = table.iterator(seekFileInDB)) { + String startKey = null; + List keyRanges = new ArrayList<>(); while (iterator.hasNext() && remainingNum > 0) { KeyValue entry = iterator.next(); KeyValue keyInfo = deleteKeyTransformer.apply(entry); if (deleteKeyFilter.apply(keyInfo)) { keyInfos.add(keyInfo.getValue()); remainingNum--; + if (startKey == null) { + startKey = entry.getKey(); + } + } else { + if (startKey != null) { + keyRanges.add(new DeleteKeysResult.ExclusiveRange(startKey, entry.getKey())); + } + startKey = null; } } - return new DeleteKeysResult(keyInfos, !iterator.hasNext()); + boolean processedAllKeys = !iterator.hasNext(); + if (startKey != null) { + keyRanges.add(new DeleteKeysResult.ExclusiveRange(startKey, + processedAllKeys ? getLexicographicallyHigherString(seekFileInDB) : iterator.next().getKey())); + } + return new DeleteKeysResult(keyInfos, keyRanges, processedAllKeys); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index 127ec4ed2cbf..5e0fa7de11e1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -31,6 +31,7 @@ import java.util.Map; import java.util.UUID; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.ozone.OmUtils; @@ -147,25 +148,27 @@ public void processPaths( deletedSpaceOmMetadataManager.getDeletedDirTable().putWithBatch(deletedSpaceBatchOperation, ozoneDeleteKey, keyInfo); - keySpaceOmMetadataManager.getDirectoryTable().deleteWithBatch(keySpaceBatchOperation, - ozoneDbKey); - if (LOG.isDebugEnabled()) { LOG.debug("markDeletedDirList KeyName: {}, DBKey: {}", keyInfo.getKeyName(), ozoneDbKey); } } + for (HddsProtos.KeyValue keyRanges : path.getDeleteRangeSubDirsList()) { + keySpaceOmMetadataManager.getDirectoryTable() + .deleteRangeWithBatch(keySpaceBatchOperation, keyRanges.getKey(), keyRanges.getValue()); + LOG.debug("Sub Directory delete range Start Key(inclusive): {} and End Key(exclusive): {}", keyRanges.getKey(), + keyRanges.getValue()); + } + for (OzoneManagerProtocolProtos.KeyInfo key : deletedSubFilesList) { OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(key) .withCommittedKeyDeletedFlag(true); String ozoneDbKey = keySpaceOmMetadataManager.getOzonePathKey(volumeId, bucketId, keyInfo.getParentObjectID(), keyInfo.getFileName()); - keySpaceOmMetadataManager.getKeyTable(getBucketLayout()) - .deleteWithBatch(keySpaceBatchOperation, ozoneDbKey); if (LOG.isDebugEnabled()) { - LOG.info("Move keyName:{} to DeletedTable DBKey: {}", + LOG.debug("Move keyName:{} to DeletedTable DBKey: {}", keyInfo.getKeyName(), ozoneDbKey); } @@ -182,6 +185,13 @@ public void processPaths( deletedKey, repeatedOmKeyInfo); } + for (HddsProtos.KeyValue keyRanges : path.getDeleteRangeSubFilesList()) { + keySpaceOmMetadataManager.getKeyTable(getBucketLayout()) + .deleteRangeWithBatch(keySpaceBatchOperation, keyRanges.getKey(), keyRanges.getValue()); + LOG.debug("Sub File delete range Start Key(inclusive): {} and End Key(exclusive): {}", keyRanges.getKey(), + keyRanges.getValue()); + } + if (!openKeyInfoMap.isEmpty()) { for (Map.Entry entry : openKeyInfoMap.entrySet()) { keySpaceOmMetadataManager.getOpenKeyTable(getBucketLayout()).putWithBatch( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java index e99b36269607..624e27f421ba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.ReconfigurationHandler; import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskResult; import org.apache.hadoop.hdds.utils.IOUtils; @@ -440,8 +441,9 @@ private Optional prepareDeleteDirRequest( if (purgeDeletedDir != null) { remainNum.addAndGet(-1); } - return Optional.of(wrapPurgeRequest(volumeBucketId.getVolumeId(), volumeBucketId.getBucketId(), - purgeDeletedDir, subFiles, subDirs)); + return Optional.of( + wrapPurgeRequest(volumeBucketId.getVolumeId(), volumeBucketId.getBucketId(), purgeDeletedDir, subFiles, subDirs, + subDirDeleteResult.getKeyRanges(), subFileDeleteResult.getKeyRanges())); } private OzoneManagerProtocolProtos.PurgePathRequest wrapPurgeRequest( @@ -449,7 +451,9 @@ private OzoneManagerProtocolProtos.PurgePathRequest wrapPurgeRequest( final long bucketId, final String purgeDeletedDir, final List purgeDeletedFiles, - final List markDirsAsDeleted) { + final List markDirsAsDeleted, + List dirExclusiveRanges, + List fileExclusiveRanges) { // Put all keys to be purged in a list PurgePathRequest.Builder purgePathsRequest = PurgePathRequest.newBuilder(); purgePathsRequest.setVolumeId(volumeId); @@ -471,6 +475,16 @@ private OzoneManagerProtocolProtos.PurgePathRequest wrapPurgeRequest( dir.getProtobuf(ClientVersion.CURRENT_VERSION)); } + for (DeleteKeysResult.ExclusiveRange range : dirExclusiveRanges) { + purgePathsRequest.addDeleteRangeSubDirs( + HddsProtos.KeyValue.newBuilder().setKey(range.getStartKey()).setValue(range.getExclusiveEndKey()).build()); + } + + for (DeleteKeysResult.ExclusiveRange range : fileExclusiveRanges) { + purgePathsRequest.addDeleteRangeSubFiles( + HddsProtos.KeyValue.newBuilder().setKey(range.getStartKey()).setValue(range.getExclusiveEndKey()).build()); + } + return purgePathsRequest.build(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 058bce1f9979..d7fd0e680c5b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -21,7 +21,10 @@ import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DELETED_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.SNAPSHOT_RENAMED_TABLE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; @@ -36,10 +39,14 @@ import java.util.stream.Stream; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.MapBackedTableIterator; +import org.apache.hadoop.hdds.utils.db.StringInMemoryTestTable; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.ratis.util.function.CheckedFunction; +import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -231,4 +238,192 @@ public void testGetDeletedDirEntries(int numberOfVolumes, int numberOfBucketsPer assertEquals(expectedEntries, km.getDeletedDirEntries(volumeName, bucketName, numberOfEntries)); } } + + @Test + public void testGetPendingDeletionSubFilesAllReclaimableNoLimit() throws Exception { + OzoneConfiguration configuration = new OzoneConfiguration(); + OMMetadataManager omMetadataManager = Mockito.mock(OMMetadataManager.class); + KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null); + + String prefix = "/vol1/buck1/dir1/"; + java.util.NavigableMap values = new java.util.TreeMap<>(); + // Three reclaimable children under the same parent + OmKeyInfo f1 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file1", null).build(); + OmKeyInfo f2 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file2", null).build(); + OmKeyInfo f3 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file3", null).build(); + values.put(prefix + "file1", f1); + values.put(prefix + "file2", f2); + values.put(prefix + "file3", f3); + + Table fileTable = new StringInMemoryTestTable<>(values, "fileTable"); + Mockito.when(omMetadataManager.getFileTable()).thenReturn(fileTable); + Mockito.when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), eq(""))).thenReturn(prefix); + + OmKeyInfo parent = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "dir1", null).setObjectID(100L).build(); + + CheckedFunction, Boolean, IOException> filter = kv -> true; + + DeleteKeysResult result = km.getPendingDeletionSubFiles(1L, 1L, parent, filter, 10); + + // All 3 files reclaimable + assertEquals(3, result.getKeysToDelete().size()); + assertTrue(result.isProcessedKeys()); + + List ranges = result.getKeyRanges(); + assertEquals(1, ranges.size()); + assertEquals(prefix + "file1", ranges.get(0).getStartKey()); + // End key must be lexicographically higher than the parent prefix + assertEquals(org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString(prefix), + ranges.get(0).getExclusiveEndKey()); + } + + @Test + public void testGetPendingDeletionSubFilesMixedReclaimableWithGap() throws Exception { + OzoneConfiguration configuration = new OzoneConfiguration(); + OMMetadataManager omMetadataManager = Mockito.mock(OMMetadataManager.class); + KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null); + + String prefix = "/vol1/buck1/dir1/"; + java.util.NavigableMap values = new java.util.TreeMap<>(); + OmKeyInfo f1 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file1", null).build(); + OmKeyInfo f2 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file2", null).build(); + OmKeyInfo f3 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file3", null).build(); + OmKeyInfo f4 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file4", null).build(); + OmKeyInfo f5 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file5", null).build(); + values.put(prefix + "file1", f1); + values.put(prefix + "file2", f2); + values.put(prefix + "file3", f3); + values.put(prefix + "file4", f4); + values.put(prefix + "file5", f5); + + Table fileTable = new StringInMemoryTestTable<>(values, "fileTable"); + Mockito.when(omMetadataManager.getFileTable()).thenReturn(fileTable); + Mockito.when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), eq(""))).thenReturn(prefix); + + OmKeyInfo parent = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "dir1", null).setObjectID(100L).build(); + + // file3 is NOT reclaimable; others are + CheckedFunction, Boolean, IOException> filter = + kv -> !kv.getValue().getKeyName().endsWith("file3"); + + DeleteKeysResult result = km.getPendingDeletionSubFiles(1L, 1L, parent, filter, 10); + + assertEquals(4, result.getKeysToDelete().size()); // 1,2,4,5 + assertTrue(result.isProcessedKeys()); + + List ranges = result.getKeyRanges(); + assertEquals(2, ranges.size()); + + DeleteKeysResult.ExclusiveRange r1 = ranges.get(0); + DeleteKeysResult.ExclusiveRange r2 = ranges.get(1); + + assertEquals(prefix + "file1", r1.getStartKey()); + assertEquals(prefix + "file3", r1.getExclusiveEndKey()); + + assertEquals(prefix + "file4", r2.getStartKey()); + assertEquals(org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString(prefix), r2.getExclusiveEndKey()); + } + + @Test + public void testGetPendingDeletionSubFilesLimitHitsInsideRun() throws Exception { + OzoneConfiguration configuration = new OzoneConfiguration(); + OMMetadataManager omMetadataManager = Mockito.mock(OMMetadataManager.class); + KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null); + + String prefix = "/vol1/buck1/dir1/"; + java.util.NavigableMap values = new java.util.TreeMap<>(); + OmKeyInfo f1 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file1", null).build(); + OmKeyInfo f2 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file2", null).build(); + OmKeyInfo f3 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file3", null).build(); + values.put(prefix + "file1", f1); + values.put(prefix + "file2", f2); + values.put(prefix + "file3", f3); + + Table fileTable = new StringInMemoryTestTable<>(values, "fileTable"); + Mockito.when(omMetadataManager.getFileTable()).thenReturn(fileTable); + Mockito.when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), eq(""))).thenReturn(prefix); + + OmKeyInfo parent = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "dir1", null).setObjectID(100L).build(); + + CheckedFunction, Boolean, IOException> filter = kv -> true; + + // remainingNum = 2 -> we only pick file1, file2; file3 is still in iterator + DeleteKeysResult result = km.getPendingDeletionSubFiles(1L, 1L, parent, filter, 2); + + assertEquals(2, result.getKeysToDelete().size()); + assertFalse(result.isProcessedKeys()); + + List ranges = result.getKeyRanges(); + assertEquals(1, ranges.size()); + assertEquals(prefix + "file1", ranges.get(0).getStartKey()); + assertEquals(prefix + "file3", ranges.get(0).getExclusiveEndKey()); // [file1, file3) + } + + @Test + public void testGetPendingDeletionSubFilesFirstNonReclaimable() throws Exception { + OzoneConfiguration configuration = new OzoneConfiguration(); + OMMetadataManager omMetadataManager = Mockito.mock(OMMetadataManager.class); + KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null); + + String prefix = "/vol1/buck1/dir1/"; + java.util.NavigableMap values = new java.util.TreeMap<>(); + OmKeyInfo f1 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file1", null).build(); + OmKeyInfo f2 = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "file2", null).build(); + values.put(prefix + "file1", f1); + values.put(prefix + "file2", f2); + + Table fileTable = new StringInMemoryTestTable<>(values, "fileTable"); + Mockito.when(omMetadataManager.getFileTable()).thenReturn(fileTable); + Mockito.when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), eq(""))).thenReturn(prefix); + + OmKeyInfo parent = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "dir1", null).setObjectID(100L).build(); + + // file1 not reclaimable, file2 reclaimable + CheckedFunction, Boolean, IOException> filter = + kv -> kv.getValue().getKeyName().endsWith("file2"); + + DeleteKeysResult result = km.getPendingDeletionSubFiles(1L, 1L, parent, filter, 10); + + assertEquals(1, result.getKeysToDelete().size()); + assertTrue(result.isProcessedKeys()); + + List ranges = result.getKeyRanges(); + assertEquals(1, ranges.size()); + assertEquals(prefix + "file2", ranges.get(0).getStartKey()); + assertEquals(org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString(prefix), + ranges.get(0).getExclusiveEndKey()); + } + + @Test + public void testGetPendingDeletionSubDirsFirstNonReclaimable() throws Exception { + OzoneConfiguration configuration = new OzoneConfiguration(); + OMMetadataManager omMetadataManager = Mockito.mock(OMMetadataManager.class); + KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null); + + String prefix = "/vol1/buck1/dir1/"; + OmKeyInfo parent = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", "dir1", null).setObjectID(100L).build(); + java.util.NavigableMap values = new java.util.TreeMap<>(); + OmDirectoryInfo d2 = OMRequestTestUtils.createOmDirectoryInfo("dir2", 101, parent.getParentObjectID()); + OmDirectoryInfo d3 = OMRequestTestUtils.createOmDirectoryInfo("dir3", 102, parent.getParentObjectID()); + values.put(prefix + "dir2", d2); + values.put(prefix + "dir3", d3); + + Table dirTable = new StringInMemoryTestTable<>(values, "directoryTable"); + Mockito.when(omMetadataManager.getDirectoryTable()).thenReturn(dirTable); + Mockito.when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), eq(""))).thenReturn(prefix); + + CheckedFunction, Boolean, IOException> filter = + kv -> kv.getValue().getKeyName().endsWith("dir3"); + + DeleteKeysResult result = km.getPendingDeletionSubDirs(1L, 1L, parent, filter, 10); + + assertEquals(1, result.getKeysToDelete().size()); + assertTrue(result.isProcessedKeys()); + + List ranges = result.getKeyRanges(); + assertEquals(1, ranges.size()); + assertEquals(prefix + "dir3", ranges.get(0).getStartKey()); + assertEquals(org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString(prefix), + ranges.get(0).getExclusiveEndKey()); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index 4692039ef0cc..1c2b261ee739 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -195,9 +195,19 @@ private OMRequest createPurgeKeysRequest(String fromSnapshot, String purgeDelete private PurgePathRequest wrapPurgeRequest( final long volumeId, final long bucketId, final String purgeDeletedDir, final List purgeDeletedFiles, final List markDirsAsDeleted) { + return wrapPurgeRequest( + volumeId, bucketId, purgeDeletedDir, purgeDeletedFiles, markDirsAsDeleted, + null, null); + } + + private PurgePathRequest wrapPurgeRequest( + final long volumeId, final long bucketId, final String purgeDeletedDir, + final List purgeDeletedFiles, final List markDirsAsDeleted, + final List deleteRangeSubDirs, + final List deleteRangeSubFiles) { + // Put all keys to be purged in a list - PurgePathRequest.Builder purgePathsRequest - = PurgePathRequest.newBuilder(); + PurgePathRequest.Builder purgePathsRequest = PurgePathRequest.newBuilder(); purgePathsRequest.setVolumeId(volumeId); purgePathsRequest.setBucketId(bucketId); @@ -217,6 +227,13 @@ private PurgePathRequest wrapPurgeRequest( dir.getProtobuf(ClientVersion.CURRENT_VERSION)); } + if (deleteRangeSubDirs != null) { + purgePathsRequest.addAllDeleteRangeSubDirs(deleteRangeSubDirs); + } + if (deleteRangeSubFiles != null) { + purgePathsRequest.addAllDeleteRangeSubFiles(deleteRangeSubFiles); + } + return purgePathsRequest.build(); } @@ -618,6 +635,197 @@ public void testValidateAndUpdateCacheQuotaBucketRecreated() validateDeletedKeys(omMetadataManager, deletedKeyNames); } + @Test + public void testDeleteRangeSubFilesRespectedByPurge() throws Exception { + when(ozoneManager.getDefaultReplicationConfig()) + .thenReturn(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); + + String bucket = "bucket" + RandomUtils.secure().randomInt(); + OMRequestTestUtils.addVolumeAndBucketToDB( + volumeName, bucket, omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED); + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucket); + OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(bucketKey); + + // Create parent directory "dir1" + OmDirectoryInfo dir1 = new OmDirectoryInfo.Builder() + .setName("dir1") + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setObjectID(1) + .setParentObjectID(bucketInfo.getObjectID()) + .setUpdateID(0) + .build(); + String dirKey = OMRequestTestUtils.addDirKeyToDirTable( + false, dir1, volumeName, bucket, 1L, omMetadataManager); + + // Create 5 files under dir1 + List fileDbKeys = new ArrayList<>(); + for (int i = 1; i <= 5; i++) { + OmKeyInfo subFile = OMRequestTestUtils + .createOmKeyInfo(volumeName, bucket, "file" + i, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(10 + i) + .setParentObjectID(dir1.getObjectID()) + .setUpdateID(100L) + .build(); + + String dbKey = OMRequestTestUtils.addFileToKeyTable( + false, true, subFile.getKeyName(), subFile, 1234L, 10 + i, omMetadataManager); + fileDbKeys.add(dbKey); + } + + // Check: all 5 file keys exist before purge + for (String k : fileDbKeys) { + assertTrue(omMetadataManager.getFileTable().isExist(k)); + } + + // Build deleteRangeSubFiles: + // [file1, file3) and [file4, highKey) so that file1, file2, file4, file5 are purged, + // while file3 (the "non-reclaimable" middle entry) stays. + List fileRanges = new ArrayList<>(); + fileRanges.add(HddsProtos.KeyValue.newBuilder() + .setKey(fileDbKeys.get(0)) + .setValue(fileDbKeys.get(2)) + .build()); + + String highKey = org.apache.hadoop.hdds.StringUtils + .getLexicographicallyHigherString(fileDbKeys.get(4)); + fileRanges.add(HddsProtos.KeyValue.newBuilder() + .setKey(fileDbKeys.get(3)) + .setValue(highKey) + .build()); + + Long volumeId = omMetadataManager.getVolumeId(bucketInfo.getVolumeName()); + Long bucketId = bucketInfo.getObjectID(); + + PurgePathRequest purgePathRequest = wrapPurgeRequest( + volumeId, bucketId, dirKey, + Collections.emptyList(), Collections.emptyList(), + Collections.emptyList(), fileRanges); + + List purgePathRequests = Collections.singletonList(purgePathRequest); + List bucketInfoList = Collections.singletonList( + BucketNameInfo.newBuilder() + .setVolumeName(bucketInfo.getVolumeName()) + .setBucketName(bucketInfo.getBucketName()) + .setBucketId(bucketId) + .setVolumeId(volumeId) + .build()); + + OMRequest omRequest = createPurgeKeysRequest( + null, purgePathRequests, bucketInfoList); + OMRequest preExecutedRequest = preExecute(omRequest); + + OzoneManagerProtocolProtos.PurgeDirectoriesRequest dirReq = + preExecutedRequest.getPurgeDirectoriesRequest(); + assertEquals(1, dirReq.getDeletedPathCount()); + assertEquals(2, dirReq.getDeletedPath(0).getDeleteRangeSubFilesCount()); + + OzoneManagerProtocolProtos.PurgePathRequest path = dirReq.getDeletedPath(0); + + // We expect two ranges: [file1, file3) and [file4, highKey) + assertEquals(2, path.getDeleteRangeSubFilesCount()); + assertEquals(fileDbKeys.get(0), path.getDeleteRangeSubFiles(0).getKey()); + assertEquals(fileDbKeys.get(2), path.getDeleteRangeSubFiles(0).getValue()); + assertEquals(fileDbKeys.get(3), path.getDeleteRangeSubFiles(1).getKey()); + assertEquals(highKey, path.getDeleteRangeSubFiles(1).getValue()); + } + + @Test + public void testDeleteRangeSubDirsRespectedByPurge() throws Exception { + when(ozoneManager.getDefaultReplicationConfig()) + .thenReturn(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); + + String bucket = "bucket" + RandomUtils.secure().randomInt(); + OMRequestTestUtils.addVolumeAndBucketToDB( + volumeName, bucket, omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED); + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucket); + OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(bucketKey); + + // Parent directory "dir1" + OmDirectoryInfo dir1 = new OmDirectoryInfo.Builder() + .setName("dir1") + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setObjectID(1) + .setParentObjectID(bucketInfo.getObjectID()) + .setUpdateID(0) + .build(); + String dirKey = OMRequestTestUtils.addDirKeyToDirTable( + false, dir1, volumeName, bucket, 1L, omMetadataManager); + + // Create 5 subdirectories under dir1 + List subDirDbKeys = new ArrayList<>(); + for (int i = 1; i <= 5; i++) { + OmDirectoryInfo subdir = new OmDirectoryInfo.Builder() + .setName("subdir" + i) + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setObjectID(10 + i) + .setParentObjectID(dir1.getObjectID()) + .setUpdateID(0) + .build(); + String subDirPath = OMRequestTestUtils.addDirKeyToDirTable( + false, subdir, volumeName, bucket, 2L + i, omMetadataManager); + subDirDbKeys.add(subDirPath); + } + + // Check: all 5 subdir keys exist before purge + for (String k : subDirDbKeys) { + assertTrue(omMetadataManager.getDirectoryTable().isExist(k)); + } + + // Build deleteRangeSubDirs ranges like [subdir1, subdir3) and [subdir4, highKey) + List dirRanges = new ArrayList<>(); + dirRanges.add(HddsProtos.KeyValue.newBuilder() + .setKey(subDirDbKeys.get(0)) + .setValue(subDirDbKeys.get(2)) + .build()); + + String highKey = org.apache.hadoop.hdds.StringUtils + .getLexicographicallyHigherString(subDirDbKeys.get(4)); + dirRanges.add(HddsProtos.KeyValue.newBuilder() + .setKey(subDirDbKeys.get(3)) + .setValue(highKey) + .build()); + + Long volumeId = omMetadataManager.getVolumeId(bucketInfo.getVolumeName()); + Long bucketId = bucketInfo.getObjectID(); + + PurgePathRequest purgePathRequest = wrapPurgeRequest( + volumeId, bucketId, dirKey, + Collections.emptyList(), Collections.emptyList(), + dirRanges, Collections.emptyList()); + + List purgePathRequests = + Collections.singletonList(purgePathRequest); + List bucketInfoList = Collections.singletonList( + BucketNameInfo.newBuilder() + .setVolumeName(bucketInfo.getVolumeName()) + .setBucketName(bucketInfo.getBucketName()) + .setBucketId(bucketId) + .setVolumeId(volumeId) + .build()); + + OMRequest omRequest = createPurgeKeysRequest( + null, purgePathRequests, bucketInfoList); + OMRequest preExecutedRequest = preExecute(omRequest); + + OzoneManagerProtocolProtos.PurgeDirectoriesRequest dirReq = + preExecutedRequest.getPurgeDirectoriesRequest(); + assertEquals(1, dirReq.getDeletedPathCount()); + OzoneManagerProtocolProtos.PurgePathRequest path = dirReq.getDeletedPath(0); + + // We expect two directory ranges: [subdir1, subdir3) and [subdir4, highKey) + assertEquals(2, path.getDeleteRangeSubDirsCount()); + assertEquals(subDirDbKeys.get(0), path.getDeleteRangeSubDirs(0).getKey()); + assertEquals(subDirDbKeys.get(2), path.getDeleteRangeSubDirs(0).getValue()); + assertEquals(subDirDbKeys.get(3), path.getDeleteRangeSubDirs(1).getKey()); + assertEquals(highKey, path.getDeleteRangeSubDirs(1).getValue()); + } + private void performBatchOperationCommit(OMDirectoriesPurgeResponseWithFSO omClientResponse) throws ExecutionException, InterruptedException { CompletableFuture future = new CompletableFuture<>(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java index f3c91b6f4d88..986ee7aaf3d1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; import org.apache.hadoop.ozone.om.KeyManager; @@ -63,7 +64,6 @@ import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.mockito.MockedStatic; @@ -241,7 +241,6 @@ public void testMultithreadedDirectoryDeletion() throws Exception { } @Test - @DisplayName("DirectoryDeletingService batches PurgeDirectories by Ratis byte limit (via submitRequest spy)") void testPurgeDirectoriesBatching() throws Exception { final int ratisLimitBytes = 2304; @@ -312,4 +311,107 @@ void testPurgeDirectoriesBatching() throws Exception { org.apache.commons.io.FileUtils.deleteDirectory(testDir); } + @Test + void testDeleteRangeFieldsPropagatedToRatis() throws Exception { + final int ratisLimitBytes = 4096; + + OzoneConfiguration conf = new OzoneConfiguration(); + File testDir = Files.createTempDirectory("testDeleteRange").toFile(); + ServerUtils.setOzoneMetaDirPath(conf, testDir.toString()); + conf.setTimeDuration(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); + conf.setStorageSize(OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, ratisLimitBytes, StorageUnit.BYTES); + conf.setQuietMode(false); + + OmTestManagers managers = new OmTestManagers(conf); + om = managers.getOzoneManager(); + KeyManager km = managers.getKeyManager(); + + DirectoryDeletingService real = km.getDirDeletingService(); + DirectoryDeletingService dds = Mockito.spy(real); + + List captured = new ArrayList<>(); + Mockito.doAnswer(inv -> { + OzoneManagerProtocolProtos.OMRequest req = inv.getArgument(0); + captured.add(req); + return OzoneManagerProtocolProtos.OMResponse.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.PurgeDirectories) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .build(); + }).when(dds).submitRequest(Mockito.any(OzoneManagerProtocolProtos.OMRequest.class)); + + final long volumeId = 1L, bucketId = 2L; + String prefix = "/vol1/buck1/dir1/"; + + HddsProtos.KeyValue dirRange1 = HddsProtos.KeyValue.newBuilder() + .setKey(prefix + "subdir1") + .setValue(prefix + "subdir3") + .build(); + HddsProtos.KeyValue dirRange2 = HddsProtos.KeyValue.newBuilder() + .setKey(prefix + "subdir4") + .setValue("zzzz") + .build(); + + HddsProtos.KeyValue fileRange1 = HddsProtos.KeyValue.newBuilder() + .setKey(prefix + "file1") + .setValue(prefix + "file3") + .build(); + + OzoneManagerProtocolProtos.PurgePathRequest purgePath = OzoneManagerProtocolProtos.PurgePathRequest.newBuilder() + .setVolumeId(volumeId) + .setBucketId(bucketId) + .setDeletedDir(prefix + "deletedDir") + .addDeleteRangeSubDirs(dirRange1) + .addDeleteRangeSubDirs(dirRange2) + .addDeleteRangeSubFiles(fileRange1) + .build(); + + List purgeList = java.util.Collections.singletonList(purgePath); + + org.apache.hadoop.ozone.om.OMMetadataManager.VolumeBucketId vbId = + new org.apache.hadoop.ozone.om.OMMetadataManager.VolumeBucketId(volumeId, bucketId); + OzoneManagerProtocolProtos.BucketNameInfo bni = + OzoneManagerProtocolProtos.BucketNameInfo.newBuilder() + .setVolumeId(volumeId) + .setBucketId(bucketId) + .setVolumeName("v") + .setBucketName("b") + .build(); + Map + bucketNameInfoMap = new HashMap<>(); + bucketNameInfoMap.put(vbId, bni); + + dds.optimizeDirDeletesAndSubmitRequest( + 0L, 0L, 0L, + new ArrayList<>(), purgeList, + null, Time.monotonicNow(), + km, + kv -> true, kv -> true, + bucketNameInfoMap, + null, 1L, + new AtomicInteger(Integer.MAX_VALUE)); + + // Exactly one PurgeDirectories OMRequest expected + assertThat(captured).hasSize(1); + OzoneManagerProtocolProtos.OMRequest omReq = captured.get(0); + assertThat(omReq.getCmdType()).isEqualTo(OzoneManagerProtocolProtos.Type.PurgeDirectories); + + OzoneManagerProtocolProtos.PurgeDirectoriesRequest purgeReq = omReq.getPurgeDirectoriesRequest(); + assertThat(purgeReq.getDeletedPathCount()).isEqualTo(1); + OzoneManagerProtocolProtos.PurgePathRequest path = purgeReq.getDeletedPath(0); + + // Verify dir ranges + assertThat(path.getDeleteRangeSubDirsCount()).isEqualTo(2); + assertThat(path.getDeleteRangeSubDirs(0).getKey()).isEqualTo(dirRange1.getKey()); + assertThat(path.getDeleteRangeSubDirs(0).getValue()).isEqualTo(dirRange1.getValue()); + assertThat(path.getDeleteRangeSubDirs(1).getKey()).isEqualTo(dirRange2.getKey()); + assertThat(path.getDeleteRangeSubDirs(1).getValue()).isEqualTo(dirRange2.getValue()); + + // Verify file ranges + assertThat(path.getDeleteRangeSubFilesCount()).isEqualTo(1); + assertThat(path.getDeleteRangeSubFiles(0).getKey()).isEqualTo(fileRange1.getKey()); + assertThat(path.getDeleteRangeSubFiles(0).getValue()).isEqualTo(fileRange1.getValue()); + + org.apache.commons.io.FileUtils.deleteDirectory(testDir); + } + }