diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsGarbageCollectionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsGarbageCollectionIT.java index a0e4281a11569..15c52fdc03b9a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsGarbageCollectionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsGarbageCollectionIT.java @@ -118,15 +118,17 @@ public void testLiveIndexNoPinnedTimestamps() throws Exception { }); } - public void testLiveIndexNoPinnedTimestampsWithMetadataSkippedOnLastDeletionCheck() throws Exception { + public void testLiveIndexNoPinnedTimestampsWithExtraGenSettingWithinLimit() throws Exception { prepareCluster(1, 1, Settings.EMPTY); - Settings indexSettings = Settings.builder().put(remoteStoreIndexSettings(0, 1)).build(); + Settings indexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 1)) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), 10) + .build(); createIndex(INDEX_NAME, indexSettings); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); - // We don't set look-back interval to 0 as we want GC to skip based on last deletion check - // RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( RemoteStorePinnedTimestampService.class, @@ -171,6 +173,61 @@ public void testLiveIndexNoPinnedTimestampsWithMetadataSkippedOnLastDeletionChec }); } + public void testLiveIndexNoPinnedTimestampsWithExtraGenSetting() throws Exception { + prepareCluster(1, 1, Settings.EMPTY); + Settings indexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 1)) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), 3) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + int numDocs = 5; + for (int i = 0; i < numDocs; i++) { + keepPinnedTimestampSchedulerUpdated(); + indexSingleDoc(INDEX_NAME, true); + } + + String translogPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.get(getNodeSettings()); + String shardDataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + DATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogDataPath = Path.of(translogRepoPath + "/" + shardDataPath + "/1"); + String shardMetadataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + METADATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogMetadataPath = Path.of(translogRepoPath + "/" + shardMetadataPath); + + assertBusy(() -> { + List metadataFiles = Files.list(translogMetadataPath).collect(Collectors.toList()); + assertEquals(4, metadataFiles.size()); + + verifyTranslogDataFileCount(metadataFiles, translogDataPath); + }); + } + public void testLiveIndexWithPinnedTimestamps() throws Exception { prepareCluster(1, 1, Settings.EMPTY); Settings indexSettings = Settings.builder() diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index cde63b37c1289..5be516166803e 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -817,10 +817,6 @@ Set getMetadataFilesToFilterActiveSegments( return metadataFilesToFilterActiveSegments; } - public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException { - deleteStaleSegments(lastNMetadataFilesToKeep, Map.of()); - } - /** * Delete stale segment and metadata files * One metadata file is kept per commit (refresh updates the same file). To read segments uploaded to remote store, @@ -836,7 +832,7 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException * @param lastNMetadataFilesToKeep number of metadata files to keep * @throws IOException in case of I/O error while reading from / writing to remote segment store */ - private void deleteStaleSegments(int lastNMetadataFilesToKeep, Map pinnedTimestampsToSkip) throws IOException { + public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException { if (lastNMetadataFilesToKeep == -1) { logger.info( "Stale segment deletion is disabled if cluster.remote_store.index.segment_metadata.retention.max_count is set to -1" @@ -863,7 +859,7 @@ private void deleteStaleSegments(int lastNMetadataFilesToKeep, Map return; } - Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(pinnedTimestampsToSkip); + Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(); Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( sortedMetadataFileList, @@ -999,8 +995,7 @@ public static void remoteDirectoryCleanup( String indexUUID, ShardId shardId, RemoteStorePathStrategy pathStrategy, - boolean forceClean, - Map pinnedTimestampsToSkip + boolean forceClean ) { try { RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) remoteDirectoryFactory.newDirectory( @@ -1012,7 +1007,7 @@ public static void remoteDirectoryCleanup( if (forceClean) { remoteSegmentStoreDirectory.delete(); } else { - remoteSegmentStoreDirectory.deleteStaleSegments(0, pinnedTimestampsToSkip); + remoteSegmentStoreDirectory.deleteStaleSegments(0); remoteSegmentStoreDirectory.deleteIfEmpty(); } } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java index cb8e9fbeba4ab..92b09188eb1ce 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java @@ -61,7 +61,7 @@ public class RemoteFsTimestampAwareTranslog extends RemoteFsTranslog { private final Map> oldFormatMetadataFileGenerationMap; private final Map> oldFormatMetadataFilePrimaryTermMap; private final AtomicLong minPrimaryTermInRemote = new AtomicLong(Long.MAX_VALUE); - private long lastTimestampOfMetadataDeletionOnRemote = System.currentTimeMillis(); + private long previousMinRemoteGenReferenced = -1; public RemoteFsTimestampAwareTranslog( TranslogConfig config, @@ -148,11 +148,10 @@ protected void trimUnreferencedReaders(boolean indexDeleted, boolean trimLocal) // This code block ensures parity with RemoteFsTranslog. Without this, we will end up making list translog metadata // call in each invocation of trimUnreferencedReaders - if (indexDeleted == false - && (System.currentTimeMillis() - lastTimestampOfMetadataDeletionOnRemote <= RemoteStoreSettings - .getPinnedTimestampsLookbackInterval() - .millis() * 2)) { + if (indexDeleted == false && previousMinRemoteGenReferenced == minRemoteGenReferenced) { return; + } else if (previousMinRemoteGenReferenced != minRemoteGenReferenced) { + previousMinRemoteGenReferenced = minRemoteGenReferenced; } // Since remote generation deletion is async, this ensures that only one generation deletion happens at a time. @@ -204,7 +203,7 @@ public void onResponse(List blobMetadata) { Set generationsToBeDeleted = getGenerationsToBeDeleted( metadataFilesNotToBeDeleted, metadataFilesToBeDeleted, - indexDeleted ? Long.MAX_VALUE : minRemoteGenReferenced + indexDeleted ? Long.MAX_VALUE : getMinGenerationToKeep() ); logger.debug(() -> "generationsToBeDeleted = " + generationsToBeDeleted); @@ -220,7 +219,6 @@ public void onResponse(List blobMetadata) { } if (metadataFilesToBeDeleted.isEmpty() == false) { - lastTimestampOfMetadataDeletionOnRemote = System.currentTimeMillis(); // Delete stale metadata files translogTransferManager.deleteMetadataFilesAsync( metadataFilesToBeDeleted, @@ -248,11 +246,15 @@ public void onFailure(Exception e) { translogTransferManager.listTranslogMetadataFilesAsync(listMetadataFilesListener); } + private long getMinGenerationToKeep() { + return minRemoteGenReferenced - indexSettings().getRemoteTranslogExtraKeep(); + } + // Visible for testing protected Set getGenerationsToBeDeleted( List metadataFilesNotToBeDeleted, List metadataFilesToBeDeleted, - long minRemoteGenReferenced + long minGenerationToKeep ) throws IOException { Set generationsFromMetadataFilesToBeDeleted = new HashSet<>(); for (String mdFile : metadataFilesToBeDeleted) { @@ -267,9 +269,9 @@ protected Set getGenerationsToBeDeleted( Set generationsToBeDeleted = new HashSet<>(); for (long generation : generationsFromMetadataFilesToBeDeleted) { // Check if the generation is not referred by metadata file matching pinned timestamps - // The check with minRemoteGenReferenced is redundant but kept as to make sure we don't delete generations + // The check with minGenerationToKeep is redundant but kept as to make sure we don't delete generations // that are not persisted in remote segment store yet. - if (generation < minRemoteGenReferenced && isGenerationPinned(generation, pinnedGenerations) == false) { + if (generation < minGenerationToKeep && isGenerationPinned(generation, pinnedGenerations) == false) { generationsToBeDeleted.add(generation); } } @@ -277,26 +279,18 @@ protected Set getGenerationsToBeDeleted( } protected List getMetadataFilesToBeDeleted(List metadataFiles, boolean indexDeleted) { - return getMetadataFilesToBeDeleted( - metadataFiles, - metadataFilePinnedTimestampMap, - minRemoteGenReferenced, - Map.of(), - indexDeleted, - logger - ); + return getMetadataFilesToBeDeleted(metadataFiles, metadataFilePinnedTimestampMap, getMinGenerationToKeep(), indexDeleted, logger); } // Visible for testing protected static List getMetadataFilesToBeDeleted( List metadataFiles, Map metadataFilePinnedTimestampMap, - long minRemoteGenReferenced, - Map pinnedTimestampsToSkip, + long minGenerationToKeep, boolean indexDeleted, Logger logger ) { - Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(pinnedTimestampsToSkip); + Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(); // Keep files since last successful run of scheduler List metadataFilesToBeDeleted = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( @@ -330,18 +324,18 @@ protected static List getMetadataFilesToBeDeleted( ); if (indexDeleted == false) { - // Filter out metadata files based on minRemoteGenReferenced - List metadataFilesContainingMinRemoteGenReferenced = metadataFilesToBeDeleted.stream().filter(md -> { + // Filter out metadata files based on minGenerationToKeep + List metadataFilesContainingMinGenerationToKeep = metadataFilesToBeDeleted.stream().filter(md -> { long maxGeneration = TranslogTransferMetadata.getMaxGenerationFromFileName(md); - return maxGeneration == -1 || maxGeneration > minRemoteGenReferenced; + return maxGeneration == -1 || maxGeneration > minGenerationToKeep; }).collect(Collectors.toList()); - metadataFilesToBeDeleted.removeAll(metadataFilesContainingMinRemoteGenReferenced); + metadataFilesToBeDeleted.removeAll(metadataFilesContainingMinGenerationToKeep); logger.trace( - "metadataFilesContainingMinRemoteGenReferenced.size = {}, metadataFilesToBeDeleted based on minRemoteGenReferenced filtering = {}, minRemoteGenReferenced = {}", - metadataFilesContainingMinRemoteGenReferenced.size(), + "metadataFilesContainingMinGenerationToKeep.size = {}, metadataFilesToBeDeleted based on minGenerationToKeep filtering = {}, minGenerationToKeep = {}", + metadataFilesContainingMinGenerationToKeep.size(), metadataFilesToBeDeleted.size(), - minRemoteGenReferenced + minGenerationToKeep ); } @@ -505,11 +499,7 @@ protected static Tuple getMinMaxPrimaryTermFromMetadataFile( } } - public static void cleanup( - TranslogTransferManager translogTransferManager, - boolean forceClean, - Map pinnedTimestampsToSkip - ) throws IOException { + public static void cleanup(TranslogTransferManager translogTransferManager, boolean forceClean) throws IOException { if (forceClean) { translogTransferManager.delete(); } else { @@ -527,7 +517,6 @@ public void onResponse(List blobMetadata) { metadataFiles, new HashMap<>(), Long.MAX_VALUE, - pinnedTimestampsToSkip, true, staticLogger ); diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java index 6d801f9f5e50e..1448c46583f6a 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java @@ -37,6 +37,7 @@ import java.util.Map; import java.util.Set; import java.util.function.Supplier; +import java.util.stream.Collectors; /** * Service for managing pinned timestamps in a remote store. @@ -47,8 +48,7 @@ @ExperimentalApi public class RemoteStorePinnedTimestampService implements Closeable { private static final Logger logger = LogManager.getLogger(RemoteStorePinnedTimestampService.class); - private static Tuple>> pinningEntityTimestampMap = new Tuple<>(-1L, Map.of()); - + private static Tuple> pinnedTimestampsSet = new Tuple<>(-1L, Set.of()); public static final String PINNED_TIMESTAMPS_PATH_TOKEN = "pinned_timestamps"; public static final String PINNED_TIMESTAMPS_FILENAME_SEPARATOR = "__"; @@ -199,23 +199,21 @@ public void cloneTimestamp(long timestamp, String existingPinningEntity, String } } - public static String getBlobName(long timestamp, String pinningEntity) { + private String getBlobName(long timestamp, String pinningEntity) { return String.join(PINNED_TIMESTAMPS_FILENAME_SEPARATOR, pinningEntity, String.valueOf(timestamp)); } - public static Tuple getPinningEntityTimestampFromBlobName(String blobName) { + private long getTimestampFromBlobName(String blobName) { String[] blobNameTokens = blobName.split(PINNED_TIMESTAMPS_FILENAME_SEPARATOR); if (blobNameTokens.length < 2) { logger.error("Pinned timestamps blob name contains invalid format: {}", blobName); } try { - String pinningEntity = blobName.substring(blobName.lastIndexOf(PINNED_TIMESTAMPS_FILENAME_SEPARATOR)); - Long timestamp = Long.parseLong(blobNameTokens[blobNameTokens.length - 1]); - return new Tuple<>(pinningEntity, timestamp); + return Long.parseLong(blobNameTokens[blobNameTokens.length - 1]); } catch (NumberFormatException e) { logger.error(() -> new ParameterizedMessage("Pinned timestamps blob name contains invalid format: {}", blobName), e); } - return null; + return -1; } /** @@ -242,6 +240,10 @@ public void unpinTimestamp(long timestamp, String pinningEntity, ActionListener< } } + public void forceSyncPinnedTimestamps() { + asyncUpdatePinnedTimestampTask.run(); + } + @Override public void close() throws IOException { asyncUpdatePinnedTimestampTask.close(); @@ -250,32 +252,14 @@ public void close() throws IOException { // Used in integ tests public void rescheduleAsyncUpdatePinnedTimestampTask(TimeValue pinnedTimestampsSchedulerInterval) { if (pinnedTimestampsSchedulerInterval != null) { - pinningEntityTimestampMap = new Tuple<>(-1L, Map.of()); + pinnedTimestampsSet = new Tuple<>(-1L, Set.of()); asyncUpdatePinnedTimestampTask.close(); startAsyncUpdateTask(pinnedTimestampsSchedulerInterval); } } public static Tuple> getPinnedTimestamps() { - return getPinnedTimestamps(null); - } - - public static Tuple> getPinnedTimestamps(Map pinnedTimestampsToSkip) { - Set allPinnedTimestamps = new HashSet<>(); - if (pinnedTimestampsToSkip == null || pinnedTimestampsToSkip.isEmpty()) { - pinningEntityTimestampMap.v2().values().forEach(allPinnedTimestamps::addAll); - } else { - for (String pinningEntity : pinningEntityTimestampMap.v2().keySet()) { - if (pinnedTimestampsToSkip.containsKey(pinningEntity)) { - Set timestamps = new HashSet<>(pinningEntityTimestampMap.v2().get(pinningEntity)); - timestamps.remove(pinnedTimestampsToSkip.get(pinningEntity)); - allPinnedTimestamps.addAll(timestamps); - } else { - allPinnedTimestamps.addAll(pinningEntityTimestampMap.v2().get(pinningEntity)); - } - } - } - return new Tuple<>(pinningEntityTimestampMap.v1(), allPinnedTimestamps); + return pinnedTimestampsSet; } /** @@ -298,22 +282,16 @@ protected void runInternal() { try { Map pinnedTimestampList = blobContainer.listBlobs(); if (pinnedTimestampList.isEmpty()) { - logger.debug("Fetched empty pinned timestamps from remote store: {}", triggerTimestamp); - pinningEntityTimestampMap = new Tuple<>(triggerTimestamp, Map.of()); + pinnedTimestampsSet = new Tuple<>(triggerTimestamp, Set.of()); return; } - Map> pinnedTimestamps = new HashMap<>(); - for (String blobName : pinnedTimestampList.keySet()) { - Tuple pinningEntityTimestamp = getPinningEntityTimestampFromBlobName(blobName); - if (pinningEntityTimestamp != null) { - if (pinnedTimestamps.containsKey(pinningEntityTimestamp.v1()) == false) { - pinnedTimestamps.put(pinningEntityTimestamp.v1(), new HashSet<>()); - } - pinnedTimestamps.get(pinningEntityTimestamp.v1()).add(pinningEntityTimestamp.v2()); - } - } + Set pinnedTimestamps = pinnedTimestampList.keySet() + .stream() + .map(RemoteStorePinnedTimestampService.this::getTimestampFromBlobName) + .filter(timestamp -> timestamp != -1) + .collect(Collectors.toSet()); logger.debug("Fetched pinned timestamps from remote store: {} - {}", triggerTimestamp, pinnedTimestamps); - pinningEntityTimestampMap = new Tuple<>(triggerTimestamp, pinnedTimestamps); + pinnedTimestampsSet = new Tuple<>(triggerTimestamp, pinnedTimestamps); } catch (Throwable t) { logger.error("Exception while fetching pinned timestamp details", t); } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 53d1e0bd6630e..a814bce7fb3cb 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -1263,8 +1263,7 @@ private void doDeleteShardSnapshots( snapshotIds, writeShardMetaDataAndComputeDeletesStep.result(), remoteSegmentStoreDirectoryFactory, - afterCleanupsListener, - snapshotIdPinnedTimestampMap + afterCleanupsListener ); } else { asyncCleanupUnlinkedShardLevelBlobs( @@ -1283,8 +1282,7 @@ private void cleanUpRemoteStoreFilesForDeletedIndicesV2( Collection snapshotIds, Collection result, RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory, - ActionListener afterCleanupsListener, - Map snapshotIdPinnedTimestampMap + ActionListener afterCleanupsListener ) { try { Set uniqueIndexIds = new HashSet<>(); @@ -1293,14 +1291,7 @@ private void cleanUpRemoteStoreFilesForDeletedIndicesV2( } // iterate through all the indices and trigger remote store directory cleanup for deleted index segments for (String indexId : uniqueIndexIds) { - cleanRemoteStoreDirectoryIfNeeded( - snapshotIds, - indexId, - repositoryData, - remoteSegmentStoreDirectoryFactory, - snapshotIdPinnedTimestampMap, - false - ); + cleanRemoteStoreDirectoryIfNeeded(snapshotIds, indexId, repositoryData, remoteSegmentStoreDirectoryFactory, false); } afterCleanupsListener.onResponse(null); } catch (Exception e) { @@ -1352,7 +1343,13 @@ private void removeSnapshotPinnedTimestamp( new ActionListener() { @Override public void onResponse(Void unused) { - logger.debug("Timestamp {} unpinned successfully for snapshot {}", timestampToUnpin, snapshotId.getName()); + logger.info("Timestamp {} unpinned successfully for snapshot {}", timestampToUnpin, snapshotId.getName()); + try { + remoteStorePinnedTimestampService.forceSyncPinnedTimestamps(); + logger.debug("Successfully synced pinned timestamp state"); + } catch (Exception e) { + logger.warn("Exception while updating pinning timestamp state, snapshot deletion will continue", e); + } listener.onResponse(null); } @@ -1458,8 +1455,7 @@ public static void remoteDirectoryCleanupAsync( ShardId shardId, String threadPoolName, RemoteStorePathStrategy pathStrategy, - boolean forceClean, - Map pinnedTimestampsToSkip + boolean forceClean ) { threadpool.executor(threadPoolName) .execute( @@ -1470,8 +1466,7 @@ public static void remoteDirectoryCleanupAsync( indexUUID, shardId, pathStrategy, - forceClean, - pinnedTimestampsToSkip + forceClean ), indexUUID, shardId @@ -1528,8 +1523,7 @@ protected void releaseRemoteStoreLockAndCleanup( new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardId)), ThreadPool.Names.REMOTE_PURGE, remoteStoreShardShallowCopySnapshot.getRemoteStorePathStrategy(), - false, - null + false ); } } @@ -2092,14 +2086,7 @@ private void executeOneStaleIndexDelete( deleteResult = deleteResult.add(cleanUpStaleSnapshotShardPathsFile(matchingShardPaths, snapshotShardPaths)); if (remoteSegmentStoreDirectoryFactory != null) { - cleanRemoteStoreDirectoryIfNeeded( - deletedSnapshots, - indexSnId, - oldRepoData, - remoteSegmentStoreDirectoryFactory, - new HashMap<>(), - true - ); + cleanRemoteStoreDirectoryIfNeeded(deletedSnapshots, indexSnId, oldRepoData, remoteSegmentStoreDirectoryFactory, true); } // Finally, we delete the [base_path]/indexId folder @@ -2157,7 +2144,6 @@ private void cleanRemoteStoreDirectoryIfNeeded( String indexSnId, RepositoryData oldRepoData, RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory, - Map snapshotIdPinnedTimestampMap, boolean forceClean ) { assert (indexSnId != null); @@ -2201,12 +2187,6 @@ private void cleanRemoteStoreDirectoryIfNeeded( prevIndexMetadata ); - String pinningEntity = SnapshotsService.getPinningEntity(getMetadata().name(), snapshotId.getUUID()); - Map pinnedTimestampsToSkip = new HashMap<>(); - if (snapshotIdPinnedTimestampMap.get(snapshotId) != null) { - pinnedTimestampsToSkip.put(pinningEntity, snapshotIdPinnedTimestampMap.get(snapshotId)); - } - for (int shardId = 0; shardId < prevIndexMetadata.getNumberOfShards(); shardId++) { ShardId shard = new ShardId(Index.UNKNOWN_INDEX_NAME, prevIndexMetadata.getIndexUUID(), shardId); remoteDirectoryCleanupAsync( @@ -2217,16 +2197,14 @@ private void cleanRemoteStoreDirectoryIfNeeded( shard, ThreadPool.Names.REMOTE_PURGE, remoteStorePathStrategy, - forceClean, - pinnedTimestampsToSkip + forceClean ); remoteTranslogCleanupAsync( remoteTranslogRepository, shard, remoteStorePathStrategy, prevIndexMetadata, - forceClean, - pinnedTimestampsToSkip + forceClean ); } } @@ -2252,8 +2230,7 @@ private void remoteTranslogCleanupAsync( ShardId shardId, RemoteStorePathStrategy remoteStorePathStrategy, IndexMetadata prevIndexMetadata, - boolean forceClean, - Map pinnedTimestampsToSkip + boolean forceClean ) { assert remoteTranslogRepository instanceof BlobStoreRepository; boolean indexMetadataEnabled = RemoteStoreUtils.determineTranslogMetadataEnabled(prevIndexMetadata); @@ -2270,7 +2247,7 @@ private void remoteTranslogCleanupAsync( indexMetadataEnabled ); try { - RemoteFsTimestampAwareTranslog.cleanup(translogTransferManager, forceClean, pinnedTimestampsToSkip); + RemoteFsTimestampAwareTranslog.cleanup(translogTransferManager, forceClean); } catch (IOException e) { logger.error("Exception while cleaning up remote translog for shard: " + shardId, e); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index fe0b67c9816dc..df3df81361a12 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -566,8 +566,7 @@ public void testCleanupAsync() throws Exception { indexUUID, shardId, pathStrategy, - false, - Map.of() + false ); verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId, pathStrategy); verify(threadPool, times(0)).executor(ThreadPool.Names.REMOTE_PURGE); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java index 799d858b7dd12..2a97c42346d31 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java @@ -402,7 +402,7 @@ public void testMetadataFileDeletion() throws Exception { ); updatePinnedTimstampTask.run(); translog.trimUnreferencedReaders(); - assertBusy(() -> { assertEquals(2, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); }); + assertBusy(() -> { assertEquals(3, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); }); } public void testMetadataFileDeletionWithPinnedTimestamps() throws Exception { @@ -715,14 +715,7 @@ public void testGetMetadataFilesToBeDeletedNoExclusion() { assertEquals( metadataFiles, - RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted( - metadataFiles, - new HashMap<>(), - Long.MAX_VALUE, - Map.of(), - false, - logger - ) + RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted(metadataFiles, new HashMap<>(), Long.MAX_VALUE, false, logger) ); } @@ -743,7 +736,6 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeOnly() { metadataFiles, new HashMap<>(), Long.MAX_VALUE, - Map.of(), false, logger ); @@ -772,7 +764,6 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnPinningOnly() throws metadataFiles, new HashMap<>(), Long.MAX_VALUE, - Map.of(), false, logger ); @@ -802,7 +793,6 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeAndPinning() throw metadataFiles, new HashMap<>(), Long.MAX_VALUE, - Map.of(), false, logger ); @@ -833,7 +823,6 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnGenerationOnly() thro metadataFiles, new HashMap<>(), 10L, - Map.of(), false, logger ); @@ -865,7 +854,6 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnGenerationDeleteIndex metadataFiles, new HashMap<>(), 10L, - Map.of(), true, logger );