From df5ef9685537348c10bbbdde77b3ffe9a80584c6 Mon Sep 17 00:00:00 2001 From: Satish Kotha Date: Fri, 8 Jan 2021 17:44:37 -0800 Subject: [PATCH] Use filesystemview and json format from metadata. Add tests --- .../hudi/table/HoodieTimelineArchiveLog.java | 4 +- .../action/clean/BaseCleanActionExecutor.java | 22 +--- .../hudi/table/action/clean/CleanPlanner.java | 98 ++++++--------- .../table/action/rollback/RollbackUtils.java | 1 + .../metadata/TestHoodieBackedMetadata.java | 11 ++ .../org/apache/hudi/table/TestCleaner.java | 112 +++++++++++++++++- .../view/AbstractTableFileSystemView.java | 20 ++++ .../view/PriorityBasedFileSystemView.java | 10 ++ .../view/RemoteHoodieTableFileSystemView.java | 30 +++++ .../table/view/TableFileSystemView.java | 12 +- .../metadata/HoodieTableMetadataUtil.java | 27 +---- .../view/TestHoodieTableFileSystemView.java | 7 ++ .../common/testutils/HoodieTestTable.java | 5 + .../service/FileSystemViewHandler.java | 15 +++ .../service/handlers/FileSliceHandler.java | 10 ++ 15 files changed, 274 insertions(+), 110 deletions(-) diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/HoodieTimelineArchiveLog.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/HoodieTimelineArchiveLog.java index 50967b12a34ad..3f4c2716bf394 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/HoodieTimelineArchiveLog.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/HoodieTimelineArchiveLog.java @@ -290,10 +290,10 @@ public void archive(HoodieEngineContext context, List instants) t LOG.info("Wrapper schema " + wrapperSchema.toString()); List records = new ArrayList<>(); for (HoodieInstant hoodieInstant : instants) { + // TODO HUDI-1518 Cleaner now takes care of removing replaced file groups. This call to deleteReplacedFileGroups can be removed. boolean deleteSuccess = deleteReplacedFileGroups(context, hoodieInstant); if (!deleteSuccess) { - // throw error and stop archival if deleting replaced file groups failed. - throw new HoodieCommitException("Unable to delete file(s) for " + hoodieInstant.getFileName()); + LOG.warn("Unable to delete file(s) for " + hoodieInstant.getFileName() + ", replaced files possibly deleted by cleaner"); } try { deleteAnyLeftOverMarkerFiles(context, hoodieInstant); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/BaseCleanActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/BaseCleanActionExecutor.java index 1011f68d0d7a7..786bf3e188821 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/BaseCleanActionExecutor.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/BaseCleanActionExecutor.java @@ -21,12 +21,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hudi.avro.model.HoodieActionInstant; +import org.apache.hudi.avro.model.HoodieCleanFileInfo; import org.apache.hudi.avro.model.HoodieCleanMetadata; import org.apache.hudi.avro.model.HoodieCleanerPlan; -import org.apache.hudi.avro.model.HoodieCleanFileInfo; import org.apache.hudi.common.HoodieCleanStat; import org.apache.hudi.common.engine.HoodieEngineContext; -import org.apache.hudi.common.model.CleanFileInfo; import org.apache.hudi.common.model.HoodieCleaningPolicy; import org.apache.hudi.common.model.HoodieRecordPayload; import org.apache.hudi.common.table.timeline.HoodieInstant; @@ -50,7 +49,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import java.util.stream.Stream; public abstract class BaseCleanActionExecutor extends BaseActionExecutor { @@ -83,21 +81,9 @@ HoodieCleanerPlan requestClean(HoodieEngineContext context) { context.setJobStatus(this.getClass().getSimpleName(), "Generates list of file slices to be cleaned"); - // Compute the file paths, to be cleaned in each valid file group - Stream>> cleanInfos = context.map(partitionsToClean, - partitionPathToClean -> Pair.of(partitionPathToClean, planner.getDeletePaths(partitionPathToClean)), - cleanerParallelism).stream(); - - // Compute the file paths, to be cleaned in replaced file groups - List>> partitionToReplacedFileIds = planner.getReplacedFileIdsToClean(earliestInstant).entrySet().stream() - .map(e -> Pair.of(e.getKey(), e.getValue())) - .collect(Collectors.toList()); - Stream>> replacedCleanInfos = context.map(partitionToReplacedFileIds, partitionFileIds -> { - String partitionPath = partitionFileIds.getKey(); - return Pair.of(partitionPath, planner.getDeletePathsForReplacedFileGroups(partitionPath, partitionFileIds.getRight())); - }, cleanerParallelism).stream(); - - Map> cleanOps = Stream.concat(cleanInfos, replacedCleanInfos) + Map> cleanOps = context + .map(partitionsToClean, partitionPathToClean -> Pair.of(partitionPathToClean, planner.getDeletePaths(partitionPathToClean)), cleanerParallelism) + .stream() .collect(Collectors.toMap(Pair::getKey, y -> CleanerUtils.convertToHoodieCleanFileInfoList(y.getValue()))); return new HoodieCleanerPlan(earliestInstant diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java index 5cdb46cc7d144..321f2487375f4 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java @@ -29,6 +29,7 @@ import org.apache.hudi.common.model.HoodieFileGroup; import org.apache.hudi.common.model.HoodieFileGroupId; import org.apache.hudi.common.model.HoodieRecordPayload; +import org.apache.hudi.common.model.HoodieReplaceCommitMetadata; import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.table.timeline.HoodieInstant; import org.apache.hudi.common.table.timeline.HoodieTimeline; @@ -49,7 +50,6 @@ import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -159,7 +159,8 @@ private List getPartitionPathsForCleanByCommits(Option in * @param newInstantToRetain * @return */ - private List getPartitionPathsForIncrementalCleaning(HoodieCleanMetadata cleanMetadata, Option newInstantToRetain) { + private List getPartitionPathsForIncrementalCleaning(HoodieCleanMetadata cleanMetadata, + Option newInstantToRetain) { LOG.warn("Incremental Cleaning mode is enabled. Looking up partition-paths that have since changed " + "since last cleaned at " + cleanMetadata.getEarliestCommitToRetain() + ". New Instant to retain : " + newInstantToRetain); @@ -168,10 +169,16 @@ private List getPartitionPathsForIncrementalCleaning(HoodieCleanMetadata cleanMetadata.getEarliestCommitToRetain()) && HoodieTimeline.compareTimestamps(instant.getTimestamp(), HoodieTimeline.LESSER_THAN, newInstantToRetain.get().getTimestamp())).flatMap(instant -> { try { - HoodieCommitMetadata commitMetadata = HoodieCommitMetadata - .fromBytes(hoodieTable.getActiveTimeline().getInstantDetails(instant).get(), - HoodieCommitMetadata.class); - return commitMetadata.getPartitionToWriteStats().keySet().stream(); + if (HoodieTimeline.REPLACE_COMMIT_ACTION.equals(instant.getAction())) { + HoodieReplaceCommitMetadata replaceCommitMetadata = HoodieReplaceCommitMetadata.fromBytes( + hoodieTable.getActiveTimeline().getInstantDetails(instant).get(), HoodieReplaceCommitMetadata.class); + return Stream.concat(replaceCommitMetadata.getPartitionToReplaceFileIds().keySet().stream(), replaceCommitMetadata.getPartitionToWriteStats().keySet().stream()); + } else { + HoodieCommitMetadata commitMetadata = HoodieCommitMetadata + .fromBytes(hoodieTable.getActiveTimeline().getInstantDetails(instant).get(), + HoodieCommitMetadata.class); + return commitMetadata.getPartitionToWriteStats().keySet().stream(); + } } catch (IOException e) { throw new HoodieIOException(e.getMessage(), e); } @@ -196,13 +203,17 @@ private List getPartitionPathsForFullCleaning() throws IOException { private List getFilesToCleanKeepingLatestVersions(String partitionPath) { LOG.info("Cleaning " + partitionPath + ", retaining latest " + config.getCleanerFileVersionsRetained() + " file versions. "); - List fileGroups = fileSystemView.getAllFileGroups(partitionPath).collect(Collectors.toList()); List deletePaths = new ArrayList<>(); // Collect all the datafiles savepointed by all the savepoints List savepointedFiles = hoodieTable.getSavepoints().stream() .flatMap(this::getSavepointedDataFiles) .collect(Collectors.toList()); + // In this scenario, we will assume that once replaced a file group automatically becomes eligible for cleaning completely + // In other words, the file versions only apply to the active file groups. + deletePaths.addAll(getReplacedFilesEligibleToClean(savepointedFiles, partitionPath, Option.empty())); + + List fileGroups = fileSystemView.getAllFileGroups(partitionPath).collect(Collectors.toList()); for (HoodieFileGroup fileGroup : fileGroups) { int keepVersions = config.getCleanerFileVersionsRetained(); // do not cleanup slice required for pending compaction @@ -258,7 +269,11 @@ private List getFilesToCleanKeepingLatestCommits(String partition // determine if we have enough commits, to start cleaning. if (commitTimeline.countInstants() > commitsRetained) { - HoodieInstant earliestCommitToRetain = getEarliestCommitToRetain().get(); + Option earliestCommitToRetainOption = getEarliestCommitToRetain(); + HoodieInstant earliestCommitToRetain = earliestCommitToRetainOption.get(); + // all replaced file groups before earliestCommitToRetain are eligible to clean + deletePaths.addAll(getReplacedFilesEligibleToClean(savepointedFiles, partitionPath, earliestCommitToRetainOption)); + // add active files List fileGroups = fileSystemView.getAllFileGroups(partitionPath).collect(Collectors.toList()); for (HoodieFileGroup fileGroup : fileGroups) { List fileSliceList = fileGroup.getAllFileSlices().collect(Collectors.toList()); @@ -311,6 +326,20 @@ private List getFilesToCleanKeepingLatestCommits(String partition } return deletePaths; } + + private List getReplacedFilesEligibleToClean(List savepointedFiles, String partitionPath, Option earliestCommitToRetain) { + final Stream replacedGroups; + if (earliestCommitToRetain.isPresent()) { + replacedGroups = fileSystemView.getReplacedFileGroupsBefore(earliestCommitToRetain.get().getTimestamp(), partitionPath); + } else { + replacedGroups = fileSystemView.getAllReplacedFileGroups(partitionPath); + } + return replacedGroups.flatMap(HoodieFileGroup::getAllFileSlices) + // do not delete savepointed files (archival will make sure corresponding replacecommit file is not deleted) + .filter(slice -> !slice.getBaseFile().isPresent() || !savepointedFiles.contains(slice.getBaseFile().get().getFileName())) + .flatMap(slice -> getCleanFileInfoForSlice(slice).stream()) + .collect(Collectors.toList()); + } /** * Gets the latest version < instantTime. This version file could still be used by queries. @@ -376,59 +405,6 @@ public Option getEarliestCommitToRetain() { return earliestCommitToRetain; } - public Map> getReplacedFileIdsToClean(Option earliestInstantToRetain) { - HoodieCleaningPolicy policy = config.getCleanerPolicy(); - HoodieTimeline replaceTimeline = hoodieTable.getActiveTimeline().getCompletedReplaceTimeline(); - - // Determine which replace commits can be cleaned. - Stream cleanableReplaceCommits; - if (policy == HoodieCleaningPolicy.KEEP_LATEST_COMMITS) { - if (!earliestInstantToRetain.isPresent()) { - LOG.info("Not enough instants to start cleaning replace commits"); - return Collections.emptyMap(); - } - // all replace commits, before the earliest instant we want to retain, should be eligible for deleting the - // replaced file groups. - cleanableReplaceCommits = replaceTimeline - .filter(instant -> HoodieTimeline.compareTimestamps(instant.getTimestamp(), HoodieTimeline.LESSER_THAN, - earliestInstantToRetain.get().getTimestamp())) - .getInstants(); - } else if (policy == HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS) { - // In this scenario, we will assume that once replaced a file group automatically becomes eligible for cleaning completely - // In other words, the file versions only apply to the active file groups. - cleanableReplaceCommits = replaceTimeline.getInstants(); - } else { - throw new IllegalArgumentException("Unknown cleaning policy : " + policy.name()); - } - - // merge everything and make a map full of file ids to be cleaned. - return cleanableReplaceCommits.map(instant -> { - try { - return TimelineMetadataUtils.deserializeHoodieReplaceMetadata(hoodieTable.getActiveTimeline().getInstantDetails(instant).get()).getPartitionToReplaceFileIds(); - } catch (IOException e) { - throw new HoodieIOException("Unable to deserialize " + instant, e); - } - }).reduce((leftMap, rightMap) -> { - rightMap.forEach((partition, fileIds) -> { - if (!leftMap.containsKey(partition)) { - leftMap.put(partition, fileIds); - } else { - // duplicates should nt be possible; since replace of a file group should happen once only - leftMap.get(partition).addAll(fileIds); - } - }); - return leftMap; - }).orElse(new HashMap<>()); - } - - public List getDeletePathsForReplacedFileGroups(String partitionPath, List eligibleFileIds) { - return hoodieTable.getFileSystemView().getAllFileGroups(partitionPath) - .filter(fg -> eligibleFileIds.contains(fg.getFileGroupId().getFileId())) - .flatMap(HoodieFileGroup::getAllFileSlices) - .flatMap(fileSlice -> getCleanFileInfoForSlice(fileSlice).stream()) - .collect(Collectors.toList()); - } - /** * Determine if file slice needed to be preserved for pending compaction. * diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/RollbackUtils.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/RollbackUtils.java index 18f284ea35efd..ee7f4dd716a55 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/RollbackUtils.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/RollbackUtils.java @@ -122,6 +122,7 @@ public static List generateRollbackRequestsUsingFil List partitionRollbackRequests = new ArrayList<>(); switch (instantToRollback.getAction()) { case HoodieTimeline.COMMIT_ACTION: + case HoodieTimeline.REPLACE_COMMIT_ACTION: LOG.info("Rolling back commit action."); partitionRollbackRequests.add( ListingBasedRollbackRequest.createRollbackRequestWithDeleteDataAndLogFilesAction(partitionPath)); diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/metadata/TestHoodieBackedMetadata.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/metadata/TestHoodieBackedMetadata.java index 32cec71feafc9..593223653d783 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/metadata/TestHoodieBackedMetadata.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/metadata/TestHoodieBackedMetadata.java @@ -18,6 +18,7 @@ package org.apache.hudi.metadata; +import org.apache.hudi.client.HoodieWriteResult; import org.apache.hudi.client.SparkRDDWriteClient; import org.apache.hudi.client.WriteStatus; import org.apache.hudi.client.common.HoodieSparkEngineContext; @@ -498,6 +499,15 @@ public void testSync(HoodieTableType tableType) throws Exception { writeStatuses = client.upsert(jsc.parallelize(records, 1), newCommitTime).collect(); assertNoWriteErrors(writeStatuses); assertFalse(metadata(client).isInSync()); + + // insert overwrite to test replacecommit + newCommitTime = HoodieActiveTimeline.createNewInstantTime(); + client.startCommitWithTime(newCommitTime, HoodieTimeline.REPLACE_COMMIT_ACTION); + records = dataGen.generateInserts(newCommitTime, 5); + HoodieWriteResult replaceResult = client.insertOverwrite(jsc.parallelize(records, 1), newCommitTime); + writeStatuses = replaceResult.getWriteStatuses().collect(); + assertNoWriteErrors(writeStatuses); + assertFalse(metadata(client).isInSync()); } // Enable metadata table and ensure it is synced @@ -800,6 +810,7 @@ private void validateMetadata(SparkRDDWriteClient client) throws IOException { // FileSystemView should expose the same data List fileGroups = tableView.getAllFileGroups(partition).collect(Collectors.toList()); + fileGroups.addAll(tableView.getAllReplacedFileGroups(partition).collect(Collectors.toList())); fileGroups.forEach(g -> LogManager.getLogger(TestHoodieBackedMetadata.class).info(g)); fileGroups.forEach(g -> g.getAllBaseFiles().forEach(b -> LogManager.getLogger(TestHoodieBackedMetadata.class).info(b))); diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java index 69c6f98c67237..3a5d7373c53a7 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java @@ -18,6 +18,8 @@ package org.apache.hudi.table; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; import org.apache.hudi.avro.model.HoodieActionInstant; import org.apache.hudi.avro.model.HoodieCleanMetadata; import org.apache.hudi.avro.model.HoodieCleanPartitionMetadata; @@ -38,9 +40,11 @@ import org.apache.hudi.common.model.HoodieFileGroup; import org.apache.hudi.common.model.HoodieFileGroupId; import org.apache.hudi.common.model.HoodieRecord; +import org.apache.hudi.common.model.HoodieReplaceCommitMetadata; import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.model.HoodieWriteStat; import org.apache.hudi.common.model.IOType; +import org.apache.hudi.common.model.WriteOperationType; import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; import org.apache.hudi.common.table.timeline.HoodieInstant; @@ -57,6 +61,7 @@ import org.apache.hudi.common.util.CollectionUtils; import org.apache.hudi.common.util.CompactionUtils; import org.apache.hudi.common.util.Option; +import org.apache.hudi.common.util.StringUtils; import org.apache.hudi.common.util.collection.Pair; import org.apache.hudi.config.HoodieCompactionConfig; import org.apache.hudi.config.HoodieWriteConfig; @@ -65,9 +70,6 @@ import org.apache.hudi.index.SparkHoodieIndex; import org.apache.hudi.table.action.clean.CleanPlanner; import org.apache.hudi.testutils.HoodieClientTestBase; - -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.Path; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import org.apache.spark.api.java.JavaRDD; @@ -76,6 +78,7 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; +import scala.Tuple3; import java.io.File; import java.io.IOException; @@ -96,8 +99,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import scala.Tuple3; - import static org.apache.hudi.common.testutils.HoodieTestTable.makeIncrementalCommitTimes; import static org.apache.hudi.common.testutils.HoodieTestTable.makeNewCommitTime; import static org.apache.hudi.common.testutils.HoodieTestUtils.DEFAULT_PARTITION_PATHS; @@ -687,6 +688,107 @@ public void testKeepLatestCommitsMOR() throws Exception { assertTrue(testTable.baseFileExists(p0, "002", file1P0)); assertTrue(testTable.logFileExists(p0, "002", file1P0, 4)); } + + @Test + public void testCleanWithReplaceCommits() throws Exception { + HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath).withAssumeDatePartitioning(true) + .withCompactionConfig(HoodieCompactionConfig.newBuilder() + .withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(2).build()) + .build(); + + HoodieTestTable testTable = HoodieTestTable.of(metaClient); + String p0 = "2020/01/01"; + String p1 = "2020/01/02"; + + // make 1 commit, with 1 file per partition + String file1P0C0 = UUID.randomUUID().toString(); + String file1P1C0 = UUID.randomUUID().toString(); + testTable.addInflightCommit("00000000000001").withBaseFilesInPartition(p0, file1P0C0).withBaseFilesInPartition(p1, file1P1C0); + + HoodieCommitMetadata commitMetadata = generateCommitMetadata( + Collections.unmodifiableMap(new HashMap>() { + { + put(p0, CollectionUtils.createImmutableList(file1P0C0)); + put(p1, CollectionUtils.createImmutableList(file1P1C0)); + } + }) + ); + metaClient.getActiveTimeline().saveAsComplete( + new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, "00000000000001"), + Option.of(commitMetadata.toJsonString().getBytes(StandardCharsets.UTF_8))); + + metaClient = HoodieTableMetaClient.reload(metaClient); + + List hoodieCleanStatsOne = runCleaner(config); + assertEquals(0, hoodieCleanStatsOne.size(), "Must not scan any partitions and clean any files"); + assertTrue(testTable.baseFileExists(p0, "00000000000001", file1P0C0)); + assertTrue(testTable.baseFileExists(p1, "00000000000001", file1P1C0)); + + // make next replacecommit, with 1 clustering operation. logically delete p0. No change to p1 + Map partitionAndFileId002 = testTable.forReplaceCommit("00000000000002").getFileIdsWithBaseFilesInPartitions(p0); + String file2P0C1 = partitionAndFileId002.get(p0); + testTable.addReplaceCommit("00000000000002", generateReplaceCommitMetadata(p0, file1P0C0, file2P0C1)); + + // run cleaner + List hoodieCleanStatsTwo = runCleaner(config); + assertEquals(0, hoodieCleanStatsTwo.size(), "Must not scan any partitions and clean any files"); + assertTrue(testTable.baseFileExists(p0, "00000000000002", file2P0C1)); + assertTrue(testTable.baseFileExists(p0, "00000000000001", file1P0C0)); + assertTrue(testTable.baseFileExists(p1, "00000000000001", file1P1C0)); + + // make next replacecommit, with 1 clustering operation. Replace data in p1. No change to p0 + Map partitionAndFileId003 = testTable.forReplaceCommit("00000000000003").getFileIdsWithBaseFilesInPartitions(p1); + String file3P1C2 = partitionAndFileId003.get(p1); + testTable.addReplaceCommit("00000000000003", generateReplaceCommitMetadata(p1, file1P1C0, file3P1C2)); + + // run cleaner + List hoodieCleanStatsThree = runCleaner(config); + assertEquals(0, hoodieCleanStatsThree.size(), "Must not scan any partitions and clean any files"); + assertTrue(testTable.baseFileExists(p0, "00000000000002", file2P0C1)); + assertTrue(testTable.baseFileExists(p0, "00000000000001", file1P0C0)); + assertTrue(testTable.baseFileExists(p1, "00000000000003", file3P1C2)); + assertTrue(testTable.baseFileExists(p1, "00000000000001", file1P1C0)); + + // make next replacecommit, with 1 clustering operation. Replace data in p0 again + Map partitionAndFileId004 = testTable.forReplaceCommit("00000000000004").getFileIdsWithBaseFilesInPartitions(p0); + String file4P0C3 = partitionAndFileId004.get(p0); + testTable.addReplaceCommit("00000000000004", generateReplaceCommitMetadata(p0, file2P0C1, file4P0C3)); + + // run cleaner + List hoodieCleanStatsFour = runCleaner(config); + assertTrue(testTable.baseFileExists(p0, "00000000000004", file4P0C3)); + assertTrue(testTable.baseFileExists(p0, "00000000000002", file2P0C1)); + assertTrue(testTable.baseFileExists(p1, "00000000000003", file3P1C2)); + assertFalse(testTable.baseFileExists(p0, "00000000000001", file1P0C0)); + //file1P1C0 still stays because its not replaced until 3 and its the only version available + assertTrue(testTable.baseFileExists(p1, "00000000000001", file1P1C0)); + + // make next replacecommit, with 1 clustering operation. Replace all data in p1. no new files created + Map partitionAndFileId005 = testTable.forReplaceCommit("00000000000005").getFileIdsWithBaseFilesInPartitions(p1); + String file4P1C4 = partitionAndFileId005.get(p1); + testTable.addReplaceCommit("00000000000005", generateReplaceCommitMetadata(p1, file3P1C2, file4P1C4)); + + List hoodieCleanStatsFive = runCleaner(config, 2); + assertTrue(testTable.baseFileExists(p0, "00000000000004", file4P0C3)); + assertTrue(testTable.baseFileExists(p0, "00000000000002", file2P0C1)); + assertTrue(testTable.baseFileExists(p1, "00000000000003", file3P1C2)); + assertFalse(testTable.baseFileExists(p0, "00000000000001", file1P0C0)); + assertFalse(testTable.baseFileExists(p1, "00000000000001", file1P1C0)); + } + + private HoodieReplaceCommitMetadata generateReplaceCommitMetadata(String partition, String replacedFileId, String newFileId) { + HoodieReplaceCommitMetadata replaceMetadata = new HoodieReplaceCommitMetadata(); + replaceMetadata.addReplaceFileId(partition, replacedFileId); + replaceMetadata.setOperationType(WriteOperationType.CLUSTER); + if (!StringUtils.isNullOrEmpty(newFileId)) { + HoodieWriteStat writeStat = new HoodieWriteStat(); + writeStat.setPartitionPath(partition); + writeStat.setPath(newFileId); + writeStat.setFileId(newFileId); + replaceMetadata.addWriteStat(partition, writeStat); + } + return replaceMetadata; + } @Test public void testCleanMetadataUpgradeDowngrade() { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java index 65e9231652ef6..3f457153d081a 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java @@ -62,6 +62,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.hudi.common.table.timeline.HoodieTimeline.GREATER_THAN; import static org.apache.hudi.common.table.timeline.HoodieTimeline.GREATER_THAN_OR_EQUALS; import static org.apache.hudi.common.table.timeline.HoodieTimeline.METADATA_BOOTSTRAP_INSTANT_TS; @@ -690,6 +691,16 @@ public Stream getReplacedFileGroupsBeforeOrOn(String maxCommitT return getAllFileGroupsIncludingReplaced(partitionPath).filter(fg -> isFileGroupReplacedBeforeOrOn(fg.getFileGroupId(), maxCommitTime)); } + @Override + public Stream getReplacedFileGroupsBefore(String maxCommitTime, String partitionPath) { + return getAllFileGroupsIncludingReplaced(partitionPath).filter(fg -> isFileGroupReplacedBefore(fg.getFileGroupId(), maxCommitTime)); + } + + @Override + public Stream getAllReplacedFileGroups(String partitionPath) { + return getAllFileGroupsIncludingReplaced(partitionPath).filter(fg -> isFileGroupReplaced(fg.getFileGroupId())); + } + @Override public final Stream> getFileGroupsInPendingClustering() { try { @@ -1041,6 +1052,15 @@ private boolean isFileGroupReplacedBeforeAny(HoodieFileGroupId fileGroupId, List return isFileGroupReplacedBeforeOrOn(fileGroupId, instants.stream().max(Comparator.naturalOrder()).get()); } + private boolean isFileGroupReplacedBefore(HoodieFileGroupId fileGroupId, String instant) { + Option hoodieInstantOption = getReplaceInstant(fileGroupId); + if (!hoodieInstantOption.isPresent()) { + return false; + } + + return HoodieTimeline.compareTimestamps(instant, GREATER_THAN, hoodieInstantOption.get().getTimestamp()); + } + private boolean isFileGroupReplacedBeforeOrOn(HoodieFileGroupId fileGroupId, String instant) { Option hoodieInstantOption = getReplaceInstant(fileGroupId); if (!hoodieInstantOption.isPresent()) { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/PriorityBasedFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/PriorityBasedFileSystemView.java index f7244eefdf9d4..3783d00b65d2b 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/PriorityBasedFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/PriorityBasedFileSystemView.java @@ -199,6 +199,16 @@ public Stream getReplacedFileGroupsBeforeOrOn(String maxCommitT return execute(maxCommitTime, partitionPath, preferredView::getReplacedFileGroupsBeforeOrOn, secondaryView::getReplacedFileGroupsBeforeOrOn); } + @Override + public Stream getReplacedFileGroupsBefore(String maxCommitTime, String partitionPath) { + return execute(maxCommitTime, partitionPath, preferredView::getReplacedFileGroupsBefore, secondaryView::getReplacedFileGroupsBefore); + } + + @Override + public Stream getAllReplacedFileGroups(String partitionPath) { + return execute(partitionPath, preferredView::getAllReplacedFileGroups, secondaryView::getAllReplacedFileGroups); + } + @Override public Stream> getPendingCompactionOperations() { return execute(preferredView::getPendingCompactionOperations, secondaryView::getPendingCompactionOperations); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java index 91a28a861fada..23b0536c240dc 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java @@ -91,6 +91,12 @@ public class RemoteHoodieTableFileSystemView implements SyncableFileSystemView, public static final String ALL_REPLACED_FILEGROUPS_BEFORE_OR_ON = String.format("%s/%s", BASE_URL, "filegroups/replaced/beforeoron/"); + public static final String ALL_REPLACED_FILEGROUPS_BEFORE = + String.format("%s/%s", BASE_URL, "filegroups/replaced/before/"); + + public static final String ALL_REPLACED_FILEGROUPS_PARTITION = + String.format("%s/%s", BASE_URL, "filegroups/replaced/partition/"); + public static final String PENDING_CLUSTERING_FILEGROUPS = String.format("%s/%s", BASE_URL, "clustering/pending/"); @@ -380,6 +386,30 @@ public Stream getReplacedFileGroupsBeforeOrOn(String maxCommitT } } + @Override + public Stream getReplacedFileGroupsBefore(String maxCommitTime, String partitionPath) { + Map paramsMap = getParamsWithAdditionalParam(partitionPath, MAX_INSTANT_PARAM, maxCommitTime); + try { + List fileGroups = executeRequest(ALL_REPLACED_FILEGROUPS_BEFORE, paramsMap, + new TypeReference>() {}, RequestMethod.GET); + return fileGroups.stream().map(dto -> FileGroupDTO.toFileGroup(dto, metaClient)); + } catch (IOException e) { + throw new HoodieRemoteException(e); + } + } + + @Override + public Stream getAllReplacedFileGroups(String partitionPath) { + Map paramsMap = getParamsWithPartitionPath(partitionPath); + try { + List fileGroups = executeRequest(ALL_REPLACED_FILEGROUPS_PARTITION, paramsMap, + new TypeReference>() {}, RequestMethod.GET); + return fileGroups.stream().map(dto -> FileGroupDTO.toFileGroup(dto, metaClient)); + } catch (IOException e) { + throw new HoodieRemoteException(e); + } + } + public boolean refresh() { Map paramsMap = getParams(); try { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/TableFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/TableFileSystemView.java index 504f95a9ee089..7330286734a08 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/TableFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/TableFileSystemView.java @@ -167,10 +167,20 @@ interface SliceView extends SliceViewWithLatestSlice { HoodieTimeline getTimeline(); /** - * Stream all the replaced file groups before maxCommitTime. + * Stream all the replaced file groups before or on maxCommitTime for given partition. */ Stream getReplacedFileGroupsBeforeOrOn(String maxCommitTime, String partitionPath); + /** + * Stream all the replaced file groups before maxCommitTime for given partition. + */ + Stream getReplacedFileGroupsBefore(String maxCommitTime, String partitionPath); + + /** + * Stream all the replaced file groups for given partition. + */ + Stream getAllReplacedFileGroups(String partitionPath); + /** * Filegroups that are in pending clustering. */ diff --git a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java index 22d68c9e7585a..ed2a878767af2 100644 --- a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java +++ b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java @@ -23,11 +23,9 @@ import org.apache.hudi.avro.model.HoodieCleanerPlan; import org.apache.hudi.avro.model.HoodieRestoreMetadata; import org.apache.hudi.avro.model.HoodieRollbackMetadata; -import org.apache.hudi.avro.model.HoodieReplaceCommitMetadata; import org.apache.hudi.common.model.HoodieCommitMetadata; import org.apache.hudi.common.model.HoodieRecord; -import org.apache.hudi.common.model.HoodieWriteStat; -import org.apache.hudi.common.model.WriteOperationType; +import org.apache.hudi.common.model.HoodieReplaceCommitMetadata; import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.timeline.HoodieInstant; import org.apache.hudi.common.table.timeline.HoodieTimeline; @@ -96,8 +94,9 @@ public static Option> convertInstantToMetaRecords(HoodieTable // Nothing to be done here break; case HoodieTimeline.REPLACE_COMMIT_ACTION: - HoodieReplaceCommitMetadata replaceMetadata = TimelineMetadataUtils.deserializeHoodieReplaceMetadata( - timeline.getInstantDetails(instant).get()); + HoodieReplaceCommitMetadata replaceMetadata = HoodieReplaceCommitMetadata.fromBytes( + timeline.getInstantDetails(instant).get(), HoodieReplaceCommitMetadata.class); + // Note: we only add new files created here. Replaced files are removed from metadata later by cleaner. records = Option.of(convertMetadataToRecords(replaceMetadata, instant.getTimestamp())); break; default: @@ -107,24 +106,6 @@ public static Option> convertInstantToMetaRecords(HoodieTable return records; } - public static List convertMetadataToRecords(HoodieReplaceCommitMetadata replaceCommitMetadata, String instantTime) { - // treat the newly written files, as if they were a commit action. - HoodieCommitMetadata addedFilesMetadata = new HoodieCommitMetadata(); - addedFilesMetadata.setOperationType(WriteOperationType.valueOf(replaceCommitMetadata.getOperationType())); - replaceCommitMetadata.getExtraMetadata().forEach(addedFilesMetadata::addMetadata); - replaceCommitMetadata.getPartitionToWriteStats().forEach((k,v) -> v.forEach(s -> { - HoodieWriteStat writeStat = new HoodieWriteStat(); - // only set the few fields that are actually needed. - writeStat.setFileSizeInBytes(s.getFileSizeInBytes()); - writeStat.setTotalWriteBytes(s.getTotalWriteBytes()); - writeStat.setPath(s.getPath()); - writeStat.setNumDeletes(s.getNumDeletes()); - addedFilesMetadata.addWriteStat(k, writeStat); - } - )); - return convertMetadataToRecords(addedFilesMetadata, instantTime); - } - /** * Finds all new files/partitions created as part of commit and creates metadata table records for them. * diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java index 3fceee3bb40c1..e103427d40728 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java @@ -1356,6 +1356,13 @@ public void testReplaceWithTimeTravel() throws IOException { List allReplaced = fsView.getReplacedFileGroupsBeforeOrOn("2", partitionPath1).collect(Collectors.toList()); assertEquals(1, allReplaced.size()); assertEquals(fileId1, allReplaced.get(0).getFileGroupId().getFileId()); + + allReplaced = fsView.getReplacedFileGroupsBefore("2", partitionPath1).collect(Collectors.toList()); + assertEquals(0, allReplaced.size()); + + allReplaced = fsView.getAllReplacedFileGroups(partitionPath1).collect(Collectors.toList()); + assertEquals(1, allReplaced.size()); + assertEquals(fileId1, allReplaced.get(0).getFileGroupId().getFileId()); } @Test diff --git a/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieTestTable.java b/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieTestTable.java index 3663917a54d75..858e113734b58 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieTestTable.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieTestTable.java @@ -228,6 +228,11 @@ public HoodieTestTable forDeltaCommit(String instantTime) { return this; } + public HoodieTestTable forReplaceCommit(String instantTime) { + currentInstantTime = instantTime; + return this; + } + public HoodieTestTable forCompaction(String instantTime) { currentInstantTime = instantTime; return this; diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java index e008fc542c413..b3e860af7001e 100644 --- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java +++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/FileSystemViewHandler.java @@ -299,6 +299,21 @@ private void registerFileSlicesAPI() { writeValueAsString(ctx, dtos); }, true)); + app.get(RemoteHoodieTableFileSystemView.ALL_REPLACED_FILEGROUPS_BEFORE, new ViewHandler(ctx -> { + List dtos = sliceHandler.getReplacedFileGroupsBefore( + ctx.validatedQueryParam(RemoteHoodieTableFileSystemView.BASEPATH_PARAM).getOrThrow(), + ctx.queryParam(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM,""), + ctx.queryParam(RemoteHoodieTableFileSystemView.PARTITION_PARAM,"")); + writeValueAsString(ctx, dtos); + }, true)); + + app.get(RemoteHoodieTableFileSystemView.ALL_REPLACED_FILEGROUPS_PARTITION, new ViewHandler(ctx -> { + List dtos = sliceHandler.getAllReplacedFileGroups( + ctx.validatedQueryParam(RemoteHoodieTableFileSystemView.BASEPATH_PARAM).getOrThrow(), + ctx.queryParam(RemoteHoodieTableFileSystemView.PARTITION_PARAM,"")); + writeValueAsString(ctx, dtos); + }, true)); + app.get(RemoteHoodieTableFileSystemView.PENDING_CLUSTERING_FILEGROUPS, new ViewHandler(ctx -> { List dtos = sliceHandler.getFileGroupsInPendingClustering( ctx.validatedQueryParam(RemoteHoodieTableFileSystemView.BASEPATH_PARAM).getOrThrow()); diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java index 18c5eb17ccda7..2180e4ead46de 100644 --- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java +++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java @@ -94,6 +94,16 @@ public List getReplacedFileGroupsBeforeOrOn(String basePath, Strin .collect(Collectors.toList()); } + public List getReplacedFileGroupsBefore(String basePath, String maxCommitTime, String partitionPath) { + return viewManager.getFileSystemView(basePath).getReplacedFileGroupsBefore(maxCommitTime, partitionPath).map(FileGroupDTO::fromFileGroup) + .collect(Collectors.toList()); + } + + public List getAllReplacedFileGroups(String basePath, String partitionPath) { + return viewManager.getFileSystemView(basePath).getAllReplacedFileGroups(partitionPath).map(FileGroupDTO::fromFileGroup) + .collect(Collectors.toList()); + } + public List getFileGroupsInPendingClustering(String basePath) { return viewManager.getFileSystemView(basePath).getFileGroupsInPendingClustering() .map(fgInstant -> ClusteringOpDTO.fromClusteringOp(fgInstant.getLeft(), fgInstant.getRight()))