From 56bc8fd00c68c75fd19ef9f2b3f54f5c102bb79a Mon Sep 17 00:00:00 2001 From: Pranit Kumar Date: Thu, 21 Aug 2025 07:29:23 +0530 Subject: [PATCH 01/27] Initial changes for Remote store SSE support Signed-off-by: Pranit Kumar --- .../cluster/metadata/IndexMetadata.java | 36 ++- .../metadata/MetadataCreateIndexService.java | 42 ++-- .../org/opensearch/index/IndexService.java | 3 +- .../org/opensearch/index/IndexSettings.java | 31 +++ .../RemoteStoreMergedSegmentWarmer.java | 67 ++++++ .../index/remote/RemoteIndexPathUploader.java | 19 ++ .../RemoteMigrationIndexMetadataUpdater.java | 8 +- .../opensearch/index/shard/IndexShard.java | 1 + .../RemoteSegmentStoreDirectoryFactory.java | 45 +++- .../store/RemoteStoreFileDownloader.java | 9 + .../CompositeRemoteRepository.java | 58 +++++ .../remotestore/RemoteStoreNodeAttribute.java | 219 +++++++++++++----- .../opensearch/snapshots/RestoreService.java | 3 +- 13 files changed, 463 insertions(+), 78 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java create mode 100644 server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index ad7b8e78744ef..a1e7adfae648f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -371,10 +371,10 @@ public Iterator> settings() { ); public static final String SETTING_REMOTE_STORE_ENABLED = "index.remote_store.enabled"; + public static final String SETTING_REMOTE_STORE_SSE_ENABLED = "index.remote_store.sse.enabled"; public static final String SETTING_INDEX_APPEND_ONLY_ENABLED = "index.append_only.enabled"; public static final String SETTING_REMOTE_SEGMENT_STORE_REPOSITORY = "index.remote_store.segment.repository"; - public static final String SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY = "index.remote_store.translog.repository"; /** @@ -414,6 +414,40 @@ public Iterator> settings() { Property.Dynamic ); + /** + * Used to specify if the index data should be persisted in the remote store. + */ + public static final Setting INDEX_REMOTE_STORE_SSE_ENABLED_SETTING = Setting.boolSetting( + SETTING_REMOTE_STORE_SSE_ENABLED, + false, + new Setting.Validator<>() { + + @Override + public void validate(final Boolean value) {} + + @Override + public void validate(final Boolean value, final Map, Object> settings) { + final Boolean isRemoteStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); + if (!isRemoteStoreEnabled) { + throw new IllegalArgumentException( + "Server Side Encryption can be enabled when " + + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + + " is enabled. " + ); + } + } + + @Override + public Iterator> settings() { + final List> settings = List.of(INDEX_REMOTE_STORE_ENABLED_SETTING); + return settings.iterator(); + } + }, + Property.IndexScope, + Property.PrivateIndex, + Property.Dynamic + ); + /** * Used to specify if the index data should be persisted in the remote store. */ diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index a889091140d12..42c501b1c0d7f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1056,7 +1056,7 @@ static Settings aggregateIndexSettings( indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); updateReplicationStrategy(indexSettingsBuilder, request.settings(), settings, combinedTemplateSettings, clusterSettings); - updateRemoteStoreSettings(indexSettingsBuilder, currentState, clusterSettings, settings, request.index()); + updateRemoteStoreSettings(indexSettingsBuilder, currentState, clusterSettings, settings, request.index(), false); if (sourceMetadata != null) { assert request.resizeType() != null; @@ -1162,11 +1162,19 @@ public static void updateRemoteStoreSettings( ClusterState clusterState, ClusterSettings clusterSettings, Settings nodeSettings, - String indexName + String indexName, + boolean isRestoreFromSnapshot ) { if ((isRemoteDataAttributePresent(nodeSettings) && clusterSettings.get(REMOTE_STORE_COMPATIBILITY_MODE_SETTING).equals(RemoteStoreNodeService.CompatibilityMode.STRICT)) || isMigratingToRemoteStore(clusterSettings)) { + + if (!isRestoreFromSnapshot) { + if (indexName.startsWith("sse-rp")) { + settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, true); + } + } + String segmentRepo, translogRepo; Optional remoteNode = clusterState.nodes() @@ -1177,21 +1185,21 @@ public static void updateRemoteStoreSettings( .findFirst(); if (remoteNode.isPresent()) { - translogRepo = RemoteStoreNodeAttribute.getTranslogRepoName(remoteNode.get().getAttributes()); - segmentRepo = RemoteStoreNodeAttribute.getSegmentRepoName(remoteNode.get().getAttributes()); - if (segmentRepo != null) { - settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true).put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, segmentRepo); - if (translogRepo != null) { - settingsBuilder.put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, translogRepo); - } else if (isMigratingToRemoteStore(clusterSettings)) { - ValidationException validationException = new ValidationException(); - validationException.addValidationErrors( - Collections.singletonList( - "Cluster is migrating to remote store but remote translog is not configured, failing index creation" - ) - ); - throw new IndexCreationException(indexName, validationException); - } + Map indexSettings = settingsBuilder.keys().stream() + .collect(Collectors.toMap(key -> key, settingsBuilder::get)); + + Settings.Builder currentSettingsBuilder = Settings.builder(); + Settings currentIndexSettings = currentSettingsBuilder.loadFromMap(indexSettings).build(); + + translogRepo = RemoteStoreNodeAttribute.getTranslogRepoName(remoteNode.get().getAttributes(), currentIndexSettings); + segmentRepo = RemoteStoreNodeAttribute.getSegmentRepoName(remoteNode.get().getAttributes(), currentIndexSettings); + + System.out.println("MetadataCreateIndexService.updateRemoteStoreSettings trepo " + translogRepo + ", srepo " + segmentRepo); + + if (segmentRepo != null && translogRepo != null) { + settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) + .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, segmentRepo) + .put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, translogRepo); } else { ValidationException validationException = new ValidationException(); validationException.addValidationErrors( diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 22441df923bf8..1d2e2219a2f02 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -701,7 +701,8 @@ public synchronized IndexShard createShard( this.indexSettings.getUUID(), shardId, this.indexSettings.getRemoteStorePathStrategy(), - this.indexSettings.getRemoteStoreSegmentPathPrefix() + this.indexSettings.getRemoteStoreSegmentPathPrefix(), + this.indexSettings.isRemoteStoreSSEnabled() ); } // When an instance of Store is created, a shardlock is created which is released on closing the instance of store. diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index a10f9d8152a79..96e192eb778d6 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -950,6 +950,8 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { */ private final boolean isCompositeIndex; + private boolean isRemoteStoreSSEnabled; + /** * Denotes whether search via star tree index is enabled for this index */ @@ -1036,9 +1038,24 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti replicationType = IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.get(settings); isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); + isRemoteStoreSSEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, false); + System.out.println("IndexSettings.IndexSettings isRemoteStoreDirectorySSEnabled " + isRemoteStoreSSEnabled); + isWarmIndex = settings.getAsBoolean(IndexModule.IS_WARM_INDEX_SETTING.getKey(), false); remoteStoreTranslogRepository = settings.get(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY); + remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); + + System.out.println("IndexSettings.IndexSettings remoteStoreRepository " + remoteStoreRepository + + " remoteStoreTranslogRepository " + remoteStoreTranslogRepository) ; + + if (isRemoteStoreSSEnabled) { + remoteStoreRepository = RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(nodeSettings, true); + remoteStoreTranslogRepository = RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(this.getNodeSettings(), true); + } + System.out.println("2. IndexSettings.IndexSettings remoteStoreRepository " + remoteStoreRepository + + " remoteStoreTranslogRepository " + remoteStoreTranslogRepository) ; + remoteTranslogUploadBufferInterval = INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); this.remoteTranslogKeepExtraGen = INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.get(settings); @@ -1239,6 +1256,9 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti ); scopedSettings.addSettingsUpdateConsumer(ALLOW_DERIVED_FIELDS, this::setAllowDerivedField); scopedSettings.addSettingsUpdateConsumer(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, this::setRemoteStoreEnabled); + + scopedSettings.addSettingsUpdateConsumer(IndexMetadata.INDEX_REMOTE_STORE_SSE_ENABLED_SETTING, this::setRemoteStoreSseEnabled); + scopedSettings.addSettingsUpdateConsumer( IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, this::setRemoteStoreRepository @@ -1427,6 +1447,13 @@ public boolean isRemoteStoreEnabled() { return isRemoteStoreEnabled; } + /** + * Returns if remote store is enabled for this index. + */ + public boolean isRemoteStoreSSEnabled() { + return isRemoteStoreSSEnabled; + } + public boolean isAssignedOnRemoteNode() { return assignedOnRemoteNode; } @@ -2137,6 +2164,10 @@ public void setRemoteStoreEnabled(boolean isRemoteStoreEnabled) { this.isRemoteStoreEnabled = isRemoteStoreEnabled; } + public void setRemoteStoreSseEnabled(boolean sseEnabled) { + this.isRemoteStoreSSEnabled = sseEnabled; + } + public void setRemoteStoreRepository(String remoteStoreRepository) { this.remoteStoreRepository = remoteStoreRepository; } diff --git a/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java b/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java new file mode 100644 index 0000000000000..f3d5f8a4cc93b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.LeafReader; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Implementation of a {@link IndexWriter.IndexReaderWarmer} when remote store is enabled. + * + * @opensearch.internal + */ +public class RemoteStoreMergedSegmentWarmer implements IndexWriter.IndexReaderWarmer { + private final TransportService transportService; + private final RecoverySettings recoverySettings; + private final ClusterService clusterService; + + public RemoteStoreMergedSegmentWarmer( + TransportService transportService, + RecoverySettings recoverySettings, + ClusterService clusterService + ) { + this.transportService = transportService; + this.recoverySettings = recoverySettings; + this.clusterService = clusterService; + } + + @Override + public void warm(LeafReader leafReader) throws IOException { + // TODO: remote store merged segment warmer + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java b/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java index 18b6d6184d1b0..de84e0de2d16d 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java @@ -81,6 +81,9 @@ public class RemoteIndexPathUploader extends IndexMetadataUploadListener { private BlobStoreRepository translogRepository; private BlobStoreRepository segmentRepository; + private BlobStoreRepository translogSSERepository; + private BlobStoreRepository segmentSSERepository; + public RemoteIndexPathUploader( ThreadPool threadPool, Settings settings, @@ -174,11 +177,24 @@ private void writeIndexPathAsync(IndexMetadata idxMD, CountDownLatch latch, List if (isTranslogSegmentRepoSame) { // If the repositories are same, then we need to upload a single file containing paths for both translog and segments. writePathToRemoteStore(idxMD, translogRepository, latch, exceptionList, COMBINED_PATH); + + if (translogSSERepository != null) { + writePathToRemoteStore(idxMD, translogSSERepository, latch, exceptionList, COMBINED_PATH); + } + } else { // If the repositories are different, then we need to upload one file per segment and translog containing their individual // paths. writePathToRemoteStore(idxMD, translogRepository, latch, exceptionList, TRANSLOG_PATH); writePathToRemoteStore(idxMD, segmentRepository, latch, exceptionList, SEGMENT_PATH); + + if (translogSSERepository != null) { + writePathToRemoteStore(idxMD, translogSSERepository, latch, exceptionList, TRANSLOG_PATH); + } + + if (segmentSSERepository != null) { + writePathToRemoteStore(idxMD, segmentSSERepository, latch, exceptionList, SEGMENT_PATH); + } } } @@ -236,6 +252,9 @@ public void start() { translogRepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(settings)); segmentRepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(settings)); + + translogSSERepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(settings, true)); + segmentSSERepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(settings, true)); } private boolean isTranslogSegmentRepoSame() { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java index 1f9ffca4460b7..83ec8fd377cb9 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java @@ -73,8 +73,12 @@ public void maybeAddRemoteIndexSettings(IndexMetadata.Builder indexMetadataBuild index ); Map remoteRepoNames = getRemoteStoreRepoName(discoveryNodes); - String segmentRepoName = RemoteStoreNodeAttribute.getSegmentRepoName(remoteRepoNames); - String tlogRepoName = RemoteStoreNodeAttribute.getTranslogRepoName(remoteRepoNames); + System.out.println("RemoteMigrationIndexMetadataUpdater.maybeAddRemoteIndexSettings Remote repo Names are " + remoteRepoNames); + + String segmentRepoName = RemoteStoreNodeAttribute.getSegmentRepoName(remoteRepoNames, currentIndexSettings); + String tlogRepoName = RemoteStoreNodeAttribute.getTranslogRepoName(remoteRepoNames, currentIndexSettings); + + System.out.println("Index name is " + indexMetadata.getIndex().getName() + " Seg repo name " + segmentRepoName + " tlogRepoName " + tlogRepoName); assert Objects.nonNull(segmentRepoName) && Objects.nonNull(tlogRepoName) : "Remote repo names cannot be null"; Settings.Builder indexSettingsBuilder = Settings.builder().put(currentIndexSettings); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 609a6290d36ce..aac1d81cc0117 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -5311,6 +5311,7 @@ public void syncTranslogFilesFromGivenRemoteTranslog( boolean isTranslogMetadataEnabled, long timestamp ) throws IOException { + boolean sseEnabled = indexSettings.isRemoteStoreSSEnabled(); RemoteFsTranslog.download( repository, shardId, diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 35aba694729cb..f4cc8858ada59 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -11,6 +11,7 @@ import org.apache.lucene.store.Directory; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.remote.RemoteStorePathStrategy; @@ -65,17 +66,52 @@ public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throw public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathStrategy pathStrategy) throws IOException { - return newDirectory(repositoryName, indexUUID, shardId, pathStrategy, null); + return newDirectory(repositoryName, indexUUID, shardId, pathStrategy, null, false); } +// public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathStrategy pathStrategy) +// throws IOException { +// return this.newDirectory(repositoryName, indexUUID, shardId, pathStrategy, false); +// } + + +// @Override +// public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { +// String repositoryName = indexSettings.getRemoteStoreRepository(); +// String indexUUID = indexSettings.getIndex().getUUID(); +// RemoteSegmentStoreDirectory directory = null; +// try { +// boolean serverSideEncryptionEnabled = indexSettings.isRemoteStoreSSEnabled(); +// System.out.println("[pranikum]: RemoteSegmentStoreDirectoryFactory.newDirectory Index name is " +// + indexSettings.getIndex().getName() + " SSE Value is " + serverSideEncryptionEnabled); +// +// System.out.println("repositoryName = " + repositoryName); +// +// directory = (RemoteSegmentStoreDirectory) newDirectory( +// repositoryName, +// indexUUID, +// path.getShardId(), +// indexSettings.getRemoteStorePathStrategy(), +// null, +// serverSideEncryptionEnabled +// ); +// +// } catch (IOException e) { +// e.printStackTrace(); +// } +// return directory; +// } + public Directory newDirectory( String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathStrategy pathStrategy, - String indexFixedPrefix + String indexFixedPrefix, + boolean isSSEEnabled ) throws IOException { assert Objects.nonNull(pathStrategy); + System.out.println("RemoteSegmentStoreDirectoryFactory.newDirectory repository Name is " + repositoryName); try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; @@ -84,6 +120,7 @@ public Directory newDirectory( String shardIdStr = String.valueOf(shardId.id()); Map pendingDownloadMergedSegments = new ConcurrentHashMap<>(); + BlobStore blobStore = blobStoreRepository.blobStore(); RemoteStorePathStrategy.ShardDataPathInput dataPathInput = RemoteStorePathStrategy.ShardDataPathInput.builder() .basePath(repositoryBasePath) .indexUUID(indexUUID) @@ -96,7 +133,7 @@ public Directory newDirectory( // Derive the path for data directory of SEGMENTS BlobPath dataPath = pathStrategy.generatePath(dataPathInput); RemoteDirectory dataDirectory = new RemoteDirectory( - blobStoreRepository.blobStore().blobContainer(dataPath), + blobStore.blobContainer(dataPath), blobStoreRepository::maybeRateLimitRemoteUploadTransfers, blobStoreRepository::maybeRateLimitLowPriorityRemoteUploadTransfers, blobStoreRepository::maybeRateLimitRemoteDownloadTransfers, @@ -115,7 +152,7 @@ public Directory newDirectory( .build(); // Derive the path for metadata directory of SEGMENTS BlobPath mdPath = pathStrategy.generatePath(mdPathInput); - RemoteDirectory metadataDirectory = new RemoteDirectory(blobStoreRepository.blobStore().blobContainer(mdPath)); + RemoteDirectory metadataDirectory = new RemoteDirectory(blobStore.blobContainer(mdPath)); // The path for lock is derived within the RemoteStoreLockManagerFactory RemoteStoreLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( diff --git a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java index ad42b6d677b41..bcbf50b1fe00f 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java @@ -114,6 +114,15 @@ private void downloadInternal( Runnable onFileCompletion, ActionListener listener ) { + try { + logger.info("[pranikum]: Going to download segment file. Stack trace is below "); + if (listener != null) { + logger.info("Listener class is " + listener.getClass().getName()); + } + throw new Exception(); + } catch (Exception e) { + e.printStackTrace(); + } final Queue queue = new ConcurrentLinkedQueue<>(toDownloadSegments); // Choose the minimum of: // - number of files to download diff --git a/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java b/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java new file mode 100644 index 0000000000000..dcd98f301f336 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.remotestore; + +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.util.HashMap; +import java.util.Map; + +/** + * Composite Repository for the ServerSideEncryption support. + */ +public class CompositeRemoteRepository { + + private final Map> repositoryEncryptionTypeMap; + + public CompositeRemoteRepository() { + repositoryEncryptionTypeMap = new HashMap<>(); + } + + public void registerCompositeRepository(RemoteStoreRepositoryType repositoryType, CompositeRepositoryEncryptionType type, RepositoryMetadata metadata) { + + Map encryptionTypeMap = repositoryEncryptionTypeMap.get(repositoryType); + if (encryptionTypeMap == null) { + encryptionTypeMap = new HashMap<>(); + } + encryptionTypeMap.put(type, metadata); + + repositoryEncryptionTypeMap.put(repositoryType, encryptionTypeMap); + } + + public RepositoryMetadata getRepository(RemoteStoreRepositoryType repositoryType, CompositeRepositoryEncryptionType encryptionType) { + return repositoryEncryptionTypeMap.get(repositoryType).get(encryptionType); + } + + /** + * Enum for Remote store repo types + */ + public enum RemoteStoreRepositoryType { + SEGMENT, + TRANSLOG + } + + /** + * Enum for composite repo types + */ + public enum CompositeRepositoryEncryptionType { + CLIENT, + SERVER + } +} diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index 56c3af3410643..60ce89f4d4829 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -9,6 +9,7 @@ package org.opensearch.node.remotestore; import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -37,12 +38,20 @@ */ public class RemoteStoreNodeAttribute { + private static final String REMOTE_STORE_TRANSLOG_REPO_PREFIX = "translog"; + private static final String REMOTE_STORE_TRANSLOG_SSE_REPO_PREFIX = "translog.sse"; + + private static final String REMOTE_STORE_SEGMENT_REPO_PREFIX = "segment"; + private static final String REMOTE_STORE_SEGMENT_SSE_REPO_PREFIX = "segment.sse"; + public static final List REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX = List.of("remote_store", "remote_publication"); // TO-DO the string constants are used only for tests and can be moved to test package public static final String REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.state.repository"; public static final String REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.segment.repository"; + public static final String REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.translog.repository"; + public static final String REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.routing_table.repository"; public static final List REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() @@ -52,13 +61,23 @@ public class RemoteStoreNodeAttribute { public static final List REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() .map(prefix -> prefix + ".routing_table.repository") .collect(Collectors.toList()); + public static final List REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() .map(prefix -> prefix + ".segment.repository") .collect(Collectors.toList()); + + public static final List REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() + .map(prefix -> prefix + ".segment.sse.repository") + .collect(Collectors.toList()); + public static final List REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() .map(prefix -> prefix + ".translog.repository") .collect(Collectors.toList()); + public static final List REMOTE_TRANSLOG_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() + .map(prefix -> prefix + ".translog.sse.repository") + .collect(Collectors.toList()); + public static final String REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s.type"; public static final String REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s." + CryptoMetadata.CRYPTO_METADATA_KEY; @@ -74,32 +93,38 @@ public class RemoteStoreNodeAttribute { + CryptoMetadata.SETTINGS_KEY; public static final String REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX = "%s.repository.%s.settings."; + public static final String REPOSITORY_SETTINGS_ATTRIBUTE_ENCRYPTION_TYPE = "encryption_type"; + private final RepositoriesMetadata repositoriesMetadata; public static List> SUPPORTED_DATA_REPO_NAME_ATTRIBUTES = Arrays.asList( REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS, - REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS + REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS, + REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS, + REMOTE_TRANSLOG_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS ); - public static final String REMOTE_STORE_MODE_KEY = "remote_store.mode"; + private static CompositeRemoteRepository compositeRemoteRepository; + private static Map repositoryMetadataMap; /** * Creates a new {@link RemoteStoreNodeAttribute} */ public RemoteStoreNodeAttribute(DiscoveryNode node) { + repositoryMetadataMap = new HashMap<>(); + compositeRemoteRepository = new CompositeRemoteRepository(); this.repositoriesMetadata = buildRepositoriesMetadata(node); } - private String validateAttributeNonNull(DiscoveryNode node, String attributeKey) { + private String getAndValidateNodeAttribute(DiscoveryNode node, String attributeKey) { String attributeValue = node.getAttributes().get(attributeKey); if (attributeValue == null || attributeValue.isEmpty()) { throw new IllegalStateException("joining node [" + node + "] doesn't have the node attribute [" + attributeKey + "]"); } - return attributeValue; } - private Tuple validateAttributeNonNull(DiscoveryNode node, List attributeKeys) { + private Tuple getAndValidateNodeAttributeEntries(DiscoveryNode node, List attributeKeys) { Tuple attributeValue = getValue(node.getAttributes(), attributeKeys); if (attributeValue == null || attributeValue.v1() == null || attributeValue.v1().isEmpty()) { throw new IllegalStateException("joining node [" + node + "] doesn't have the node attribute [" + attributeKeys.get(0) + "]"); @@ -111,12 +136,14 @@ private Tuple validateAttributeNonNull(DiscoveryNode node, List< private CryptoMetadata buildCryptoMetadata(DiscoveryNode node, String repositoryName, String prefix) { String metadataKey = String.format(Locale.getDefault(), REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, prefix, repositoryName); boolean isRepoEncrypted = node.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(metadataKey)); - if (isRepoEncrypted == false) { + System.out.println("isRepoEncrypted = " + isRepoEncrypted); + + if (!isRepoEncrypted) { return null; } - String keyProviderName = validateAttributeNonNull(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_NAME_KEY); - String keyProviderType = validateAttributeNonNull(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_TYPE_KEY); + String keyProviderName = getAndValidateNodeAttribute(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_NAME_KEY); + String keyProviderType = getAndValidateNodeAttribute(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_TYPE_KEY); String settingsAttributeKeyPrefix = String.format(Locale.getDefault(), REPOSITORY_CRYPTO_SETTINGS_PREFIX, prefix, repositoryName); @@ -132,7 +159,7 @@ private CryptoMetadata buildCryptoMetadata(DiscoveryNode node, String repository return new CryptoMetadata(keyProviderName, keyProviderType, settings.build()); } - private Map validateSettingsAttributesNonNull(DiscoveryNode node, String repositoryName, String prefix) { + private Map getSettingAttribute(DiscoveryNode node, String repositoryName, String prefix) { String settingsAttributeKeyPrefix = String.format( Locale.getDefault(), REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, @@ -143,7 +170,7 @@ private Map validateSettingsAttributesNonNull(DiscoveryNode node .keySet() .stream() .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) - .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> validateAttributeNonNull(node, key))); + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> getAndValidateNodeAttribute(node, key))); if (settingsMap.isEmpty()) { throw new IllegalStateException( @@ -155,11 +182,12 @@ private Map validateSettingsAttributesNonNull(DiscoveryNode node } private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name, String prefix) { - String type = validateAttributeNonNull( + System.out.println("RemoteStoreNodeAttribute.buildRepositoryMetadata " + name); + String type = getAndValidateNodeAttribute( node, String.format(Locale.getDefault(), REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, prefix, name) ); - Map settingsMap = validateSettingsAttributesNonNull(node, name, prefix); + Map settingsMap = getSettingAttribute(node, name, prefix); Settings.Builder settings = Settings.builder(); settingsMap.forEach(settings::put); @@ -173,16 +201,53 @@ private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String na } private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { - Map repositoryNamesWithPrefix = getValidatedRepositoryNames(node); + Map remoteStoryTypeToRepoNameMap = new HashMap<>(); + Map repositoryNamesWithPrefix = getValidatedRepositoryNames(node, remoteStoryTypeToRepoNameMap); + + for (Map.Entry repositoryNameEntry : repositoryNamesWithPrefix.entrySet()) { + System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata rep entry " + + repositoryNameEntry.getKey() + " -> " + repositoryNameEntry.getValue()); + } + List repositoryMetadataList = new ArrayList<>(); for (Map.Entry repository : repositoryNamesWithPrefix.entrySet()) { - repositoryMetadataList.add(buildRepositoryMetadata(node, repository.getKey(), repository.getValue())); + System.out.println("repository key is = " + repository.getKey() + ", value = " + repository.getValue()); + RepositoryMetadata repositoryMetadata = buildRepositoryMetadata(node, repository.getKey(), repository.getValue()); + repositoryMetadataList.add(repositoryMetadata); + + System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata" + isCompositeRepository(repositoryMetadata)); + System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata " + repositoryMetadata.name()); + repositoryMetadataMap.put(repositoryMetadata.name(), repositoryMetadata); + } + + // Let's Iterate over repo's and build Composite Repository structure + for (Map.Entry repositoryTypeToNameEntry : remoteStoryTypeToRepoNameMap.entrySet()) { + CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; + if (repositoryTypeToNameEntry.getKey().contains("sse")) { + encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.SERVER; + } + CompositeRemoteRepository.RemoteStoreRepositoryType remoteStoreRepositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.SEGMENT; + if (repositoryTypeToNameEntry.getKey().contains("translog")) { + remoteStoreRepositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.TRANSLOG; + } + + System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata rep type " + remoteStoreRepositoryType + + " Enc Type " + encryptionType + "RepttpeToNamevalue " + repositoryTypeToNameEntry.getValue() + + " Rep Metadata " + repositoryMetadataMap.get(repositoryTypeToNameEntry.getValue())); + + compositeRemoteRepository.registerCompositeRepository(remoteStoreRepositoryType, + encryptionType, + repositoryMetadataMap.get(repositoryTypeToNameEntry.getValue())); } return new RepositoriesMetadata(repositoryMetadataList); } + private boolean isCompositeRepository(RepositoryMetadata repositoryMetadata) { + return repositoryMetadata.settings().hasValue(REPOSITORY_SETTINGS_ATTRIBUTE_ENCRYPTION_TYPE); + } + private static Tuple getValue(Map attributes, List keys) { for (String key : keys) { if (attributes.containsKey(key)) { @@ -192,43 +257,24 @@ private static Tuple getValue(Map attributes, Li return null; } - private enum RemoteStoreMode { - SEGMENTS_ONLY, - DEFAULT - } - - private Map getValidatedRepositoryNames(DiscoveryNode node) { + private Map getValidatedRepositoryNames(DiscoveryNode node, Map remoteStoryTypeToRepoNameMap) { Set> repositoryNames = new HashSet<>(); - RemoteStoreMode remoteStoreMode = RemoteStoreMode.DEFAULT; - if (containsKey(node.getAttributes(), List.of(REMOTE_STORE_MODE_KEY))) { - String mode = node.getAttributes().get(REMOTE_STORE_MODE_KEY); - if (mode != null && mode.equalsIgnoreCase(RemoteStoreMode.SEGMENTS_ONLY.name())) { - remoteStoreMode = RemoteStoreMode.SEGMENTS_ONLY; - } else if (mode != null && mode.equalsIgnoreCase(RemoteStoreMode.DEFAULT.name()) == false) { - throw new IllegalStateException("Unknown remote store mode [" + mode + "] for node [" + node + "]"); - } - } - if (remoteStoreMode == RemoteStoreMode.SEGMENTS_ONLY) { - repositoryNames.add(validateAttributeNonNull(node, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS)); - } else if (containsKey(node.getAttributes(), REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS) + if (containsKey(node.getAttributes(), REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS) || containsKey(node.getAttributes(), REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { - repositoryNames.add(validateAttributeNonNull(node, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS)); - repositoryNames.add(validateAttributeNonNull(node, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS)); - repositoryNames.add(validateAttributeNonNull(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); - } else if (containsKey(node.getAttributes(), REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { - repositoryNames.add(validateAttributeNonNull(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); - } + + addRepositoryNames(node, remoteStoryTypeToRepoNameMap, repositoryNames, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS, REMOTE_STORE_SEGMENT_REPO_PREFIX); + addRepositoryNames(node, remoteStoryTypeToRepoNameMap, repositoryNames, REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS, REMOTE_STORE_SEGMENT_SSE_REPO_PREFIX); + addRepositoryNames(node, remoteStoryTypeToRepoNameMap, repositoryNames, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS, REMOTE_STORE_TRANSLOG_REPO_PREFIX); + addRepositoryNames(node, remoteStoryTypeToRepoNameMap, repositoryNames, REMOTE_TRANSLOG_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS, REMOTE_STORE_TRANSLOG_SSE_REPO_PREFIX); + + repositoryNames.add(getAndValidateNodeAttributeEntries(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); + + } else if (containsKey(node.getAttributes(), REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { + repositoryNames.add(getAndValidateNodeAttributeEntries(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); + } + if (containsKey(node.getAttributes(), REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { - if (remoteStoreMode == RemoteStoreMode.SEGMENTS_ONLY) { - throw new IllegalStateException( - "Cannot set " - + REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS - + " attributes when remote store mode is set to segments only for node [" - + node - + "]" - ); - } - repositoryNames.add(validateAttributeNonNull(node, REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); + repositoryNames.add(getAndValidateNodeAttributeEntries(node, REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); } Map repoNamesWithPrefix = new HashMap<>(); @@ -240,6 +286,17 @@ private Map getValidatedRepositoryNames(DiscoveryNode node) { return repoNamesWithPrefix; } + private void addRepositoryNames(DiscoveryNode node, + Map remoteStoryTypeToRepoNameMap, + Set> repositoryNames, + List attributeKeys, + String remoteStoreRepoPrefix) { + Tuple remoteTranslogSseAttributeKeyMap = getAndValidateNodeAttributeEntries(node, attributeKeys); + remoteStoryTypeToRepoNameMap.put(remoteStoreRepoPrefix, remoteTranslogSseAttributeKeyMap.v1()); + repositoryNames.add(remoteTranslogSseAttributeKeyMap); + } + + public static boolean isRemoteStoreAttributePresent(Settings settings) { for (String prefix : REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX) { if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { @@ -271,6 +328,16 @@ public static boolean isTranslogRepoConfigured(Settings settings) { return false; } + public static boolean isServerSideEncryptionRepoConfigured(Settings settings) { + boolean isServerSideEncryptionConfigured = false; + for (String prefix : REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { + if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { + isServerSideEncryptionConfigured = true; + } + } + return isServerSideEncryptionConfigured; + } + public static boolean isRemoteClusterStateConfigured(Settings settings) { for (String prefix : REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { @@ -289,6 +356,19 @@ public static String getRemoteStoreSegmentRepo(Settings settings) { return null; } + public static String getRemoteStoreSegmentRepo(Settings settings, boolean sseEnabled) { + if (sseEnabled) { + for (String prefix : REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { + if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { + return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); + } + } + } else { + return getRemoteStoreSegmentRepo(settings); + } + return null; + } + public static String getRemoteStoreTranslogRepo(Settings settings) { for (String prefix : REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS) { if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { @@ -298,6 +378,20 @@ public static String getRemoteStoreTranslogRepo(Settings settings) { return null; } + public static String getRemoteStoreTranslogRepo(Settings settings, boolean sseEnabled) { + if (sseEnabled) { + for (String prefix : REMOTE_TRANSLOG_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { + if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { + return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); + } + } + } else { + return getRemoteStoreTranslogRepo(settings); + } + + return null; + } + public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { return RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) && isRemoteClusterStateConfigured(settings); } @@ -355,12 +449,32 @@ public static String getRoutingTableRepoName(Map repos) { return getValueFromAnyKey(repos, REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS); } - public static String getSegmentRepoName(Map repos) { - return getValueFromAnyKey(repos, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS); + public static String getSegmentRepoName(Map repos, Settings indexSettings) { + + CompositeRemoteRepository.RemoteStoreRepositoryType repositoryType = + CompositeRemoteRepository.RemoteStoreRepositoryType.SEGMENT; + + CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = + CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; + if (indexSettings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, false)) { + encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.SERVER; + } + //getValueFromAnyKey(repos, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS); + return compositeRemoteRepository.getRepository(repositoryType, encryptionType).name(); + //return getValueFromAnyKey(repos, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS); } - public static String getTranslogRepoName(Map repos) { - return getValueFromAnyKey(repos, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS); + public static String getTranslogRepoName(Map repos, Settings indexSettings) { + CompositeRemoteRepository.RemoteStoreRepositoryType repositoryType = + CompositeRemoteRepository.RemoteStoreRepositoryType.TRANSLOG; + + CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = + CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; + if (indexSettings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, false)) { + encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.SERVER; + } + //getValueFromAnyKey(repos, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS); + return compositeRemoteRepository.getRepository(repositoryType, encryptionType).name(); } private static String getValueFromAnyKey(Map repos, List keys) { @@ -433,6 +547,7 @@ public boolean equalsWithRepoSkip(Object o, List reposToSkip) { if (o == null || getClass() != o.getClass()) return false; RemoteStoreNodeAttribute that = (RemoteStoreNodeAttribute) o; + System.out.println("[pranikum]: reposToSkip = " + reposToSkip); return this.getRepositoriesMetadata().equalsIgnoreGenerationsWithRepoSkip(that.getRepositoriesMetadata(), reposToSkip); } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 0b1cac07b0a10..47eac8816361a 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -712,7 +712,8 @@ private Settings getOverrideSettingsInternal() { clusterService.state(), clusterSettings, clusterService.getSettings(), - String.join(",", request.indices()) + String.join(",", request.indices()), + true ); return settingsBuilder.build(); } From 2cee903cfa2e65ee93fa8fdb80aab2fd85d6de8e Mon Sep 17 00:00:00 2001 From: Pranit Kumar Date: Thu, 28 Aug 2025 08:54:29 +0530 Subject: [PATCH 02/27] Add support for composite repository --- .../common/settings/IndexScopedSettings.java | 1 + .../CompositeRemoteRepository.java | 12 +- .../remotestore/RemoteStoreNodeAttribute.java | 130 ++++++++---------- 3 files changed, 66 insertions(+), 77 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index a8b76a3f2ac01..c3887de524aae 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -241,6 +241,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for remote store enablement IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_STORE_SSE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING, diff --git a/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java b/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java index dcd98f301f336..467b4a5e2e97d 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java +++ b/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java @@ -25,8 +25,9 @@ public CompositeRemoteRepository() { repositoryEncryptionTypeMap = new HashMap<>(); } - public void registerCompositeRepository(RemoteStoreRepositoryType repositoryType, CompositeRepositoryEncryptionType type, RepositoryMetadata metadata) { - + public void registerCompositeRepository(final RemoteStoreRepositoryType repositoryType, + final CompositeRepositoryEncryptionType type, + final RepositoryMetadata metadata) { Map encryptionTypeMap = repositoryEncryptionTypeMap.get(repositoryType); if (encryptionTypeMap == null) { encryptionTypeMap = new HashMap<>(); @@ -40,6 +41,13 @@ public RepositoryMetadata getRepository(RemoteStoreRepositoryType repositoryType return repositoryEncryptionTypeMap.get(repositoryType).get(encryptionType); } + @Override + public String toString() { + return "CompositeRemoteRepository{" + + "repositoryEncryptionTypeMap=" + repositoryEncryptionTypeMap + + '}'; + } + /** * Enum for Remote store repo types */ diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index 60ce89f4d4829..4fd9ce9bf44bb 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -8,6 +8,8 @@ package org.opensearch.node.remotestore; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.cluster.metadata.CryptoMetadata; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; @@ -38,11 +40,9 @@ */ public class RemoteStoreNodeAttribute { + private static final Logger logger = LogManager.getLogger(RemoteStoreNodeAttribute.class); private static final String REMOTE_STORE_TRANSLOG_REPO_PREFIX = "translog"; - private static final String REMOTE_STORE_TRANSLOG_SSE_REPO_PREFIX = "translog.sse"; - private static final String REMOTE_STORE_SEGMENT_REPO_PREFIX = "segment"; - private static final String REMOTE_STORE_SEGMENT_SSE_REPO_PREFIX = "segment.sse"; public static final List REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX = List.of("remote_store", "remote_publication"); @@ -66,18 +66,10 @@ public class RemoteStoreNodeAttribute { .map(prefix -> prefix + ".segment.repository") .collect(Collectors.toList()); - public static final List REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() - .map(prefix -> prefix + ".segment.sse.repository") - .collect(Collectors.toList()); - public static final List REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() .map(prefix -> prefix + ".translog.repository") .collect(Collectors.toList()); - public static final List REMOTE_TRANSLOG_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS = REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX.stream() - .map(prefix -> prefix + ".translog.sse.repository") - .collect(Collectors.toList()); - public static final String REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s.type"; public static final String REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s." + CryptoMetadata.CRYPTO_METADATA_KEY; @@ -93,15 +85,13 @@ public class RemoteStoreNodeAttribute { + CryptoMetadata.SETTINGS_KEY; public static final String REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX = "%s.repository.%s.settings."; - public static final String REPOSITORY_SETTINGS_ATTRIBUTE_ENCRYPTION_TYPE = "encryption_type"; + public static final String REPOSITORY_SETTINGS_SSE_ENABLED_ATTRIBUTE_KEY = "sse_enabled"; private final RepositoriesMetadata repositoriesMetadata; public static List> SUPPORTED_DATA_REPO_NAME_ATTRIBUTES = Arrays.asList( REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS, - REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS, - REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS, - REMOTE_TRANSLOG_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS + REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS ); private static CompositeRemoteRepository compositeRemoteRepository; @@ -129,14 +119,12 @@ private Tuple getAndValidateNodeAttributeEntries(DiscoveryNode n if (attributeValue == null || attributeValue.v1() == null || attributeValue.v1().isEmpty()) { throw new IllegalStateException("joining node [" + node + "] doesn't have the node attribute [" + attributeKeys.get(0) + "]"); } - return attributeValue; } private CryptoMetadata buildCryptoMetadata(DiscoveryNode node, String repositoryName, String prefix) { String metadataKey = String.format(Locale.getDefault(), REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, prefix, repositoryName); boolean isRepoEncrypted = node.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(metadataKey)); - System.out.println("isRepoEncrypted = " + isRepoEncrypted); if (!isRepoEncrypted) { return null; @@ -177,12 +165,10 @@ private Map getSettingAttribute(DiscoveryNode node, String repos "joining node [" + node + "] doesn't have settings attribute for [" + repositoryName + "] repository" ); } - return settingsMap; } private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name, String prefix) { - System.out.println("RemoteStoreNodeAttribute.buildRepositoryMetadata " + name); String type = getAndValidateNodeAttribute( node, String.format(Locale.getDefault(), REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, prefix, name) @@ -196,7 +182,6 @@ private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String na // Repository metadata built here will always be for a system repository. settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); - return new RepositoryMetadata(name, type, settings.build(), cryptoMetadata); } @@ -204,48 +189,48 @@ private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { Map remoteStoryTypeToRepoNameMap = new HashMap<>(); Map repositoryNamesWithPrefix = getValidatedRepositoryNames(node, remoteStoryTypeToRepoNameMap); - for (Map.Entry repositoryNameEntry : repositoryNamesWithPrefix.entrySet()) { - System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata rep entry " - + repositoryNameEntry.getKey() + " -> " + repositoryNameEntry.getValue()); - } - List repositoryMetadataList = new ArrayList<>(); for (Map.Entry repository : repositoryNamesWithPrefix.entrySet()) { - System.out.println("repository key is = " + repository.getKey() + ", value = " + repository.getValue()); RepositoryMetadata repositoryMetadata = buildRepositoryMetadata(node, repository.getKey(), repository.getValue()); repositoryMetadataList.add(repositoryMetadata); - - System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata" + isCompositeRepository(repositoryMetadata)); - System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata " + repositoryMetadata.name()); repositoryMetadataMap.put(repositoryMetadata.name(), repositoryMetadata); + + if (isCompositeRepository(repositoryMetadata)) { + RepositoryMetadata sseRepoMetatdata = new RepositoryMetadata(repositoryMetadata.name() + "-SSE", repositoryMetadata.type(), repositoryMetadata.settings()); + repositoryMetadataMap.put(sseRepoMetatdata.name(), sseRepoMetatdata); + repositoryMetadataList.add(sseRepoMetatdata); + } } // Let's Iterate over repo's and build Composite Repository structure for (Map.Entry repositoryTypeToNameEntry : remoteStoryTypeToRepoNameMap.entrySet()) { CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; - if (repositoryTypeToNameEntry.getKey().contains("sse")) { - encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.SERVER; - } CompositeRemoteRepository.RemoteStoreRepositoryType remoteStoreRepositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.SEGMENT; if (repositoryTypeToNameEntry.getKey().contains("translog")) { remoteStoreRepositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.TRANSLOG; } - System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata rep type " + remoteStoreRepositoryType - + " Enc Type " + encryptionType + "RepttpeToNamevalue " + repositoryTypeToNameEntry.getValue() - + " Rep Metadata " + repositoryMetadataMap.get(repositoryTypeToNameEntry.getValue())); - + String repositoryName = repositoryTypeToNameEntry.getValue(); compositeRemoteRepository.registerCompositeRepository(remoteStoreRepositoryType, encryptionType, - repositoryMetadataMap.get(repositoryTypeToNameEntry.getValue())); - } + repositoryMetadataMap.get(repositoryName)); + String sseRepositoryName = repositoryTypeToNameEntry.getValue() + "-SSE"; + System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata sseRepositoryName = " + sseRepositoryName); + if (repositoryMetadataMap.containsKey(sseRepositoryName)) { + encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.SERVER; + compositeRemoteRepository.registerCompositeRepository(remoteStoreRepositoryType, + encryptionType, + repositoryMetadataMap.get(sseRepositoryName)); + } + System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata compositeRemoteRepository is " + compositeRemoteRepository); + } return new RepositoriesMetadata(repositoryMetadataList); } private boolean isCompositeRepository(RepositoryMetadata repositoryMetadata) { - return repositoryMetadata.settings().hasValue(REPOSITORY_SETTINGS_ATTRIBUTE_ENCRYPTION_TYPE); + return repositoryMetadata.settings().hasValue(REPOSITORY_SETTINGS_SSE_ENABLED_ATTRIBUTE_KEY); } private static Tuple getValue(Map attributes, List keys) { @@ -261,14 +246,9 @@ private Map getValidatedRepositoryNames(DiscoveryNode node, Map< Set> repositoryNames = new HashSet<>(); if (containsKey(node.getAttributes(), REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS) || containsKey(node.getAttributes(), REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { - addRepositoryNames(node, remoteStoryTypeToRepoNameMap, repositoryNames, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS, REMOTE_STORE_SEGMENT_REPO_PREFIX); - addRepositoryNames(node, remoteStoryTypeToRepoNameMap, repositoryNames, REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS, REMOTE_STORE_SEGMENT_SSE_REPO_PREFIX); addRepositoryNames(node, remoteStoryTypeToRepoNameMap, repositoryNames, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS, REMOTE_STORE_TRANSLOG_REPO_PREFIX); - addRepositoryNames(node, remoteStoryTypeToRepoNameMap, repositoryNames, REMOTE_TRANSLOG_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS, REMOTE_STORE_TRANSLOG_SSE_REPO_PREFIX); - repositoryNames.add(getAndValidateNodeAttributeEntries(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); - } else if (containsKey(node.getAttributes(), REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { repositoryNames.add(getAndValidateNodeAttributeEntries(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); } @@ -291,9 +271,9 @@ private void addRepositoryNames(DiscoveryNode node, Set> repositoryNames, List attributeKeys, String remoteStoreRepoPrefix) { - Tuple remoteTranslogSseAttributeKeyMap = getAndValidateNodeAttributeEntries(node, attributeKeys); - remoteStoryTypeToRepoNameMap.put(remoteStoreRepoPrefix, remoteTranslogSseAttributeKeyMap.v1()); - repositoryNames.add(remoteTranslogSseAttributeKeyMap); + Tuple remoteStoreAttributeKeyMap = getAndValidateNodeAttributeEntries(node, attributeKeys); + remoteStoryTypeToRepoNameMap.put(remoteStoreRepoPrefix, remoteStoreAttributeKeyMap.v1()); + repositoryNames.add(remoteStoreAttributeKeyMap); } @@ -328,15 +308,15 @@ public static boolean isTranslogRepoConfigured(Settings settings) { return false; } - public static boolean isServerSideEncryptionRepoConfigured(Settings settings) { - boolean isServerSideEncryptionConfigured = false; - for (String prefix : REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { - if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { - isServerSideEncryptionConfigured = true; - } - } - return isServerSideEncryptionConfigured; - } +// public static boolean isServerSideEncryptionRepoConfigured(Settings settings) { +// boolean isServerSideEncryptionConfigured = false; +// for (String prefix : REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { +// if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { +// isServerSideEncryptionConfigured = true; +// } +// } +// return isServerSideEncryptionConfigured; +// } public static boolean isRemoteClusterStateConfigured(Settings settings) { for (String prefix : REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { @@ -357,16 +337,16 @@ public static String getRemoteStoreSegmentRepo(Settings settings) { } public static String getRemoteStoreSegmentRepo(Settings settings, boolean sseEnabled) { - if (sseEnabled) { - for (String prefix : REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { - if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { - return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); - } - } - } else { +// if (sseEnabled) { +// for (String prefix : REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { +// if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { +// return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); +// } +// } +// } else { return getRemoteStoreSegmentRepo(settings); - } - return null; +// } +// return null; } public static String getRemoteStoreTranslogRepo(Settings settings) { @@ -379,17 +359,17 @@ public static String getRemoteStoreTranslogRepo(Settings settings) { } public static String getRemoteStoreTranslogRepo(Settings settings, boolean sseEnabled) { - if (sseEnabled) { - for (String prefix : REMOTE_TRANSLOG_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { - if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { - return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); - } - } - } else { +// if (sseEnabled) { +// for (String prefix : REMOTE_TRANSLOG_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { +// if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { +// return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); +// } +// } +// } else { return getRemoteStoreTranslogRepo(settings); - } - - return null; +// } +// +// return null; } public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { From f4fdde89d5e275be52bfccabe829a30775f5ef11 Mon Sep 17 00:00:00 2001 From: Pranit Kumar Date: Thu, 28 Aug 2025 09:33:22 +0530 Subject: [PATCH 03/27] remove unwanted file Signed-off-by: Pranit Kumar --- .../RemoteStoreMergedSegmentWarmer.java | 67 ------------------- 1 file changed, 67 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java diff --git a/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java b/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java deleted file mode 100644 index f3d5f8a4cc93b..0000000000000 --- a/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.index.engine; - -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.LeafReader; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.indices.recovery.RecoverySettings; -import org.opensearch.transport.TransportService; - -import java.io.IOException; - -/** - * Implementation of a {@link IndexWriter.IndexReaderWarmer} when remote store is enabled. - * - * @opensearch.internal - */ -public class RemoteStoreMergedSegmentWarmer implements IndexWriter.IndexReaderWarmer { - private final TransportService transportService; - private final RecoverySettings recoverySettings; - private final ClusterService clusterService; - - public RemoteStoreMergedSegmentWarmer( - TransportService transportService, - RecoverySettings recoverySettings, - ClusterService clusterService - ) { - this.transportService = transportService; - this.recoverySettings = recoverySettings; - this.clusterService = clusterService; - } - - @Override - public void warm(LeafReader leafReader) throws IOException { - // TODO: remote store merged segment warmer - } -} From 1e07f063cb2ad7837abcd2a806f41c63c77c233f Mon Sep 17 00:00:00 2001 From: Mohit Godwani <81609427+mgodwan@users.noreply.github.com> Date: Fri, 22 Aug 2025 11:52:32 +0530 Subject: [PATCH 04/27] Fix access specifier for FieldMapper method to allow usage by plugins (#19113) * Fix access specifier for FieldMapper method to allow usage by plugins Signed-off-by: Mohit Godwani * Apply spotless Signed-off-by: Mohit Godwani --------- Signed-off-by: Mohit Godwani --- .../mapper/ScaledFloatDerivedSourceIT.java | 72 +++++++++++++++++++ .../opensearch/index/mapper/FieldMapper.java | 6 +- 2 files changed, 75 insertions(+), 3 deletions(-) create mode 100644 modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/ScaledFloatDerivedSourceIT.java diff --git a/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/ScaledFloatDerivedSourceIT.java b/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/ScaledFloatDerivedSourceIT.java new file mode 100644 index 0000000000000..234825541d26d --- /dev/null +++ b/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/ScaledFloatDerivedSourceIT.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.indices.refresh.RefreshResponse; +import org.opensearch.action.get.GetResponse; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; + +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; + +public class ScaledFloatDerivedSourceIT extends OpenSearchIntegTestCase { + + private static final String INDEX_NAME = "test"; + + public void testScaledFloatDerivedSource() throws Exception { + Settings.Builder settings = Settings.builder(); + settings.put(indexSettings()); + settings.put("index.derived_source.enabled", "true"); + + prepareCreate(INDEX_NAME).setSettings(settings) + .setMapping( + jsonBuilder().startObject() + .startObject("properties") + .startObject("foo") + .field("type", "scaled_float") + .field("scaling_factor", "100") + .endObject() + .endObject() + .endObject() + ) + .get(); + + ensureGreen(INDEX_NAME); + + String docId = "one_doc"; + assertEquals(DocWriteResponse.Result.CREATED, prepareIndex(docId, 1.2123422f).get().getResult()); + + RefreshResponse refreshResponse = refresh(INDEX_NAME); + assertEquals(RestStatus.OK, refreshResponse.getStatus()); + assertEquals(0, refreshResponse.getFailedShards()); + assertEquals(INDEX_NUMBER_OF_SHARDS_SETTING.get(settings.build()).intValue(), refreshResponse.getSuccessfulShards()); + + GetResponse getResponse = client().prepareGet() + .setFetchSource(true) + .setId(docId) + .setIndex(INDEX_NAME) + .get(TimeValue.timeValueMinutes(1)); + assertTrue(getResponse.isExists()); + assertEquals(1.21d, getResponse.getSourceAsMap().get("foo")); + } + + private IndexRequestBuilder prepareIndex(String id, float number) throws IOException { + return client().prepareIndex(INDEX_NAME) + .setId(id) + .setSource(jsonBuilder().startObject().field("foo", number).endObject().toString(), XContentType.JSON); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java index 39a3a73c9529c..aaa2c9c029974 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java @@ -642,7 +642,7 @@ protected void canDeriveSourceInternal() { /** * Validates if doc values is enabled for a field or not */ - void checkDocValuesForDerivedSource() { + protected void checkDocValuesForDerivedSource() { if (!mappedFieldType.hasDocValues()) { throw new UnsupportedOperationException("Unable to derive source for [" + name() + "] with doc values disabled"); } @@ -651,7 +651,7 @@ void checkDocValuesForDerivedSource() { /** * Validates if stored field is enabled for a field or not */ - void checkStoredForDerivedSource() { + protected void checkStoredForDerivedSource() { if (!mappedFieldType.isStored()) { throw new UnsupportedOperationException("Unable to derive source for [" + name() + "] with store disabled"); } @@ -660,7 +660,7 @@ void checkStoredForDerivedSource() { /** * Validates if doc_values or stored field is enabled for a field or not */ - void checkStoredAndDocValuesForDerivedSource() { + protected void checkStoredAndDocValuesForDerivedSource() { if (!mappedFieldType.isStored() && !mappedFieldType.hasDocValues()) { throw new UnsupportedOperationException("Unable to derive source for [" + name() + "] with stored and " + "docValues disabled"); } From 420c038bc86de4da96b685ffc78f548f77d11363 Mon Sep 17 00:00:00 2001 From: pranikum Date: Fri, 22 Aug 2025 17:06:57 +0530 Subject: [PATCH 05/27] [Repository S3] Move async http client to CRT from Netty and add configurability to chose client via repo settings (#18800) Signed-off-by: Pranit Kumar --- CHANGELOG.md | 1 + plugins/repository-s3/build.gradle | 10 +- .../licenses/aws-crt-client-2.30.31.jar.sha1 | 1 + .../licenses/aws-crt-client-LICENSE.txt | 202 +++++++++++++++++ .../licenses/aws-crt-client-NOTICE.txt | 12 + .../licenses/crt-core-2.30.31.jar.sha1 | 1 + .../licenses/crt-core-LICENSE.txt | 206 ++++++++++++++++++ .../licenses/crt-core-NOTICE.txt | 25 +++ .../s3/S3BlobStoreRepositoryTests.java | 3 + .../s3/S3RepositoryThirdPartyTests.java | 3 + .../repositories/s3/S3AsyncService.java | 196 +++++++++++++---- .../repositories/s3/S3Repository.java | 22 ++ .../repositories/s3/S3RepositoryPlugin.java | 16 +- .../s3/EventLoopThreadFilter.java | 25 +++ .../repositories/s3/S3AsyncServiceTests.java | 113 +++++++++- .../s3/S3BlobContainerRetriesTests.java | 3 + .../s3/S3RepositoryPluginTests.java | 1 + .../repositories/s3/S3RepositoryTests.java | 18 ++ 18 files changed, 797 insertions(+), 61 deletions(-) create mode 100644 plugins/repository-s3/licenses/aws-crt-client-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-crt-client-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/aws-crt-client-NOTICE.txt create mode 100644 plugins/repository-s3/licenses/crt-core-2.30.31.jar.sha1 create mode 100644 plugins/repository-s3/licenses/crt-core-LICENSE.txt create mode 100644 plugins/repository-s3/licenses/crt-core-NOTICE.txt create mode 100644 plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/EventLoopThreadFilter.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d610041ae02a..40a00d3d69358 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - The dynamic mapping parameter supports false_allow_templates ([#19065](https://github.com/opensearch-project/OpenSearch/pull/19065)) - Add a toBuilder method in EngineConfig to support easy modification of configs([#19054](https://github.com/opensearch-project/OpenSearch/pull/19054)) - Add StoreFactory plugin interface for custom Store implementations([#19091](https://github.com/opensearch-project/OpenSearch/pull/19091)) +- Use S3CrtClient for higher throughput while uploading files to S3 ([#18800](https://github.com/opensearch-project/OpenSearch/pull/18800)) - Add a dynamic setting to change skip_cache_factor and min_frequency for querycache ([#18351](https://github.com/opensearch-project/OpenSearch/issues/18351)) ### Changed diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index b1a83565f0e87..643b34797ccc8 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -78,7 +78,8 @@ dependencies { api "software.amazon.awssdk:aws-query-protocol:${versions.aws}" api "software.amazon.awssdk:sts:${versions.aws}" api "software.amazon.awssdk:netty-nio-client:${versions.aws}" - + api "software.amazon.awssdk:crt-core:${versions.aws}" + api "software.amazon.awssdk:aws-crt-client:${versions.aws}" api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "commons-logging:commons-logging:${versions.commonslogging}" @@ -545,13 +546,6 @@ thirdPartyAudit { 'software.amazon.awssdk.arns.Arn', 'software.amazon.awssdk.arns.ArnResource', - 'software.amazon.awssdk.crtcore.CrtConfigurationUtils', - 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration', - 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration$Builder', - 'software.amazon.awssdk.crtcore.CrtConnectionHealthConfiguration$DefaultBuilder', - 'software.amazon.awssdk.crtcore.CrtProxyConfiguration', - 'software.amazon.awssdk.crtcore.CrtProxyConfiguration$Builder', - 'software.amazon.awssdk.crtcore.CrtProxyConfiguration$DefaultBuilder', 'software.amazon.eventstream.HeaderValue', 'software.amazon.eventstream.Message', 'software.amazon.eventstream.MessageDecoder' diff --git a/plugins/repository-s3/licenses/aws-crt-client-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/aws-crt-client-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..61ce2ed2a2234 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-crt-client-2.30.31.jar.sha1 @@ -0,0 +1 @@ +05dd1f7501ec4062622f2dd2231caad8d54079e3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-crt-client-LICENSE.txt b/plugins/repository-s3/licenses/aws-crt-client-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-crt-client-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/aws-crt-client-NOTICE.txt b/plugins/repository-s3/licenses/aws-crt-client-NOTICE.txt new file mode 100644 index 0000000000000..6c7dc983f8c7a --- /dev/null +++ b/plugins/repository-s3/licenses/aws-crt-client-NOTICE.txt @@ -0,0 +1,12 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). diff --git a/plugins/repository-s3/licenses/crt-core-2.30.31.jar.sha1 b/plugins/repository-s3/licenses/crt-core-2.30.31.jar.sha1 new file mode 100644 index 0000000000000..4b26ce35772ed --- /dev/null +++ b/plugins/repository-s3/licenses/crt-core-2.30.31.jar.sha1 @@ -0,0 +1 @@ +0da8346395a4b95003c1effd9ed4df7708185e5a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/crt-core-LICENSE.txt b/plugins/repository-s3/licenses/crt-core-LICENSE.txt new file mode 100644 index 0000000000000..1eef70a9b9f42 --- /dev/null +++ b/plugins/repository-s3/licenses/crt-core-LICENSE.txt @@ -0,0 +1,206 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + Note: Other license terms may apply to certain, identified software files contained within or distributed + with the accompanying software if such terms are included in the directory containing the accompanying software. + Such other license terms will then apply in lieu of the terms of the software license above. diff --git a/plugins/repository-s3/licenses/crt-core-NOTICE.txt b/plugins/repository-s3/licenses/crt-core-NOTICE.txt new file mode 100644 index 0000000000000..4c36a6c147c4a --- /dev/null +++ b/plugins/repository-s3/licenses/crt-core-NOTICE.txt @@ -0,0 +1,25 @@ +AWS SDK for Java 2.0 +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +This product includes software developed by +Amazon Technologies, Inc (http://www.amazon.com/). + +********************** +THIRD PARTY COMPONENTS +********************** +This software includes third party software subject to the following copyrights: +- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty. +- PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. +- Apache Commons Lang - https://github.com/apache/commons-lang +- Netty Reactive Streams - https://github.com/playframework/netty-reactive-streams +- Jackson-core - https://github.com/FasterXML/jackson-core +- Jackson-dataformat-cbor - https://github.com/FasterXML/jackson-dataformats-binary + +The licenses for these third party components are included in LICENSE.txt + +- For Apache Commons Lang see also this required NOTICE: + Apache Commons Lang + Copyright 2001-2020 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (https://www.apache.org/). diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index d54abb413c6fd..15dd2b875ebc4 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -31,6 +31,8 @@ package org.opensearch.repositories.s3; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; @@ -87,6 +89,7 @@ import static org.hamcrest.Matchers.equalTo; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") +@ThreadLeakFilters(filters = EventLoopThreadFilter.class) // Need to set up a new cluster for each test because cluster settings use randomized authentication settings @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class S3BlobStoreRepositoryTests extends OpenSearchMockAPIBasedRepositoryIntegTestCase { diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index 79b5cc654b921..8f198f144b23c 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -31,6 +31,8 @@ package org.opensearch.repositories.s3; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import software.amazon.awssdk.services.s3.model.StorageClass; import org.opensearch.common.SuppressForbidden; @@ -53,6 +55,7 @@ import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; +@ThreadLeakFilters(filters = EventLoopThreadFilter.class) public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { @Override diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index afbeaff323d51..862e91c291073 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -22,8 +22,9 @@ import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.http.crt.ProxyConfiguration; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; -import software.amazon.awssdk.http.nio.netty.ProxyConfiguration; import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3AsyncClient; @@ -63,7 +64,10 @@ class S3AsyncService implements Closeable { private static final String DEFAULT_S3_ENDPOINT = "s3.amazonaws.com"; - private volatile Map clientsCache = emptyMap(); + // We will need to support the cache with both type of clients. Since S3ClientSettings doesn't contain Http Client. + // Also adding the Http Client type in S3ClientSettings is not good option since it is used by Async and Sync clients. + // We can segregate the types of cache here itself + private volatile Map> s3HttpClientTypesClientsCache = emptyMap(); /** * Client settings calculated from static configuration and settings in the keystore. @@ -82,12 +86,24 @@ class S3AsyncService implements Closeable { private final @Nullable ScheduledExecutorService clientExecutorService; S3AsyncService(final Path configPath, @Nullable ScheduledExecutorService clientExecutorService) { + staticClientSettings = MapBuilder.newMapBuilder() - .put("default", S3ClientSettings.getClientSettings(Settings.EMPTY, "default", configPath)) + .put( + buildClientName("default", S3Repository.CRT_ASYNC_HTTP_CLIENT_TYPE), + S3ClientSettings.getClientSettings(Settings.EMPTY, "default", configPath) + ) + .put( + buildClientName("default", S3Repository.NETTY_ASYNC_HTTP_CLIENT_TYPE), + S3ClientSettings.getClientSettings(Settings.EMPTY, "default", configPath) + ) .immutableMap(); this.clientExecutorService = clientExecutorService; } + private String buildClientName(final String clientValue, final String asyncClientType) { + return clientValue + "-" + asyncClientType; + } + S3AsyncService(final Path configPath) { this(configPath, null); } @@ -102,9 +118,24 @@ public synchronized void refreshAndClearCache(Map clie // shutdown all unused clients // others will shutdown on their respective release releaseCachedClients(); - this.staticClientSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); + MapBuilder defaultBuilder = MapBuilder.newMapBuilder(); + for (Map.Entry entrySet : clientsSettings.entrySet()) { + defaultBuilder.put( + buildClientName(entrySet.getKey(), S3Repository.CRT_ASYNC_HTTP_CLIENT_TYPE), + clientsSettings.get(entrySet.getKey()) + ); + defaultBuilder.put( + buildClientName(entrySet.getKey(), S3Repository.NETTY_ASYNC_HTTP_CLIENT_TYPE), + clientsSettings.get(entrySet.getKey()) + ); + } + + staticClientSettings = defaultBuilder.immutableMap(); derivedClientSettings = emptyMap(); - assert this.staticClientSettings.containsKey("default") : "always at least have 'default'"; + assert this.staticClientSettings.containsKey(buildClientName("default", S3Repository.NETTY_ASYNC_HTTP_CLIENT_TYPE)) + : "Static Client Settings should contain default Netty client"; + assert this.staticClientSettings.containsKey(buildClientName("default", S3Repository.CRT_ASYNC_HTTP_CLIENT_TYPE)) + : "Static Client Settings should contain default CRT client"; // clients are built lazily by {@link client} } @@ -118,28 +149,57 @@ public AmazonAsyncS3Reference client( AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { + String asyncHttpClientType = S3Repository.S3_ASYNC_HTTP_CLIENT_TYPE.get(repositoryMetadata.settings()); + final S3ClientSettings clientSettings = settings(repositoryMetadata); - { - final AmazonAsyncS3Reference clientReference = clientsCache.get(clientSettings); - if (clientReference != null && clientReference.tryIncRef()) { - return clientReference; - } + AmazonAsyncS3Reference clientReference = getCachedClientForHttpTypeAndClientSettings(asyncHttpClientType, clientSettings); + if (clientReference != null) { + return clientReference; } + synchronized (this) { - final AmazonAsyncS3Reference existing = clientsCache.get(clientSettings); - if (existing != null && existing.tryIncRef()) { - return existing; + AmazonAsyncS3Reference existingClient = getCachedClientForHttpTypeAndClientSettings(asyncHttpClientType, clientSettings); + if (existingClient != null) { + return existingClient; } - final AmazonAsyncS3Reference clientReference = new AmazonAsyncS3Reference( - buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder) + // If the client reference is not found in cache. Let's create it. + final AmazonAsyncS3Reference newClientReference = new AmazonAsyncS3Reference( + buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder, asyncHttpClientType) ); - clientReference.incRef(); - clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientSettings, clientReference).immutableMap(); - return clientReference; + newClientReference.incRef(); + + // Get or create new client cache map for the HTTP client type + Map clientsCacheForType = s3HttpClientTypesClientsCache.getOrDefault( + asyncHttpClientType, + emptyMap() + ); + + // Update both cache levels atomically + s3HttpClientTypesClientsCache = MapBuilder.newMapBuilder(s3HttpClientTypesClientsCache) + .put( + asyncHttpClientType, + MapBuilder.newMapBuilder(clientsCacheForType).put(clientSettings, newClientReference).immutableMap() + ) + .immutableMap(); + return newClientReference; } } + private AmazonAsyncS3Reference getCachedClientForHttpTypeAndClientSettings( + final String asyncHttpClientType, + final S3ClientSettings clientSettings + ) { + final Map clientsCacheMap = s3HttpClientTypesClientsCache.get(asyncHttpClientType); + if (clientsCacheMap != null && !clientsCacheMap.isEmpty()) { + final AmazonAsyncS3Reference clientReference = clientsCacheMap.get(clientSettings); + if (clientReference != null && clientReference.tryIncRef()) { + return clientReference; + } + } + return null; + } + /** * Either fetches {@link S3ClientSettings} for a given {@link RepositoryMetadata} from cached settings or creates them * by overriding static client settings from {@link #staticClientSettings} with settings found in the repository metadata. @@ -154,7 +214,10 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { return existing; } } - final String clientName = S3Repository.CLIENT_NAME.get(settings); + final String clientName = buildClientName( + S3Repository.CLIENT_NAME.get(settings), + S3Repository.S3_ASYNC_HTTP_CLIENT_TYPE.get(repositoryMetadata.settings()) + ); final S3ClientSettings staticSettings = staticClientSettings.get(clientName); if (staticSettings != null) { synchronized (this) { @@ -180,7 +243,8 @@ synchronized AmazonAsyncS3WithCredentials buildClient( final S3ClientSettings clientSettings, AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, - AsyncExecutorContainer normalExecutorBuilder + AsyncExecutorContainer normalExecutorBuilder, + String asyncHttpClientType ) { setDefaultAwsProfilePath(); final S3AsyncClientBuilder builder = S3AsyncClient.builder(); @@ -209,7 +273,7 @@ synchronized AmazonAsyncS3WithCredentials buildClient( builder.forcePathStyle(true); } - builder.httpClient(buildHttpClient(clientSettings, urgentExecutorBuilder.getAsyncTransferEventLoopGroup())); + builder.httpClient(buildHttpClient(clientSettings, urgentExecutorBuilder.getAsyncTransferEventLoopGroup(), asyncHttpClientType)); builder.asyncConfiguration( ClientAsyncConfiguration.builder() .advancedOption( @@ -220,7 +284,7 @@ synchronized AmazonAsyncS3WithCredentials buildClient( ); final S3AsyncClient urgentClient = SocketAccess.doPrivileged(builder::build); - builder.httpClient(buildHttpClient(clientSettings, priorityExecutorBuilder.getAsyncTransferEventLoopGroup())); + builder.httpClient(buildHttpClient(clientSettings, priorityExecutorBuilder.getAsyncTransferEventLoopGroup(), asyncHttpClientType)); builder.asyncConfiguration( ClientAsyncConfiguration.builder() .advancedOption( @@ -231,7 +295,7 @@ synchronized AmazonAsyncS3WithCredentials buildClient( ); final S3AsyncClient priorityClient = SocketAccess.doPrivileged(builder::build); - builder.httpClient(buildHttpClient(clientSettings, normalExecutorBuilder.getAsyncTransferEventLoopGroup())); + builder.httpClient(buildHttpClient(clientSettings, normalExecutorBuilder.getAsyncTransferEventLoopGroup(), asyncHttpClientType)); builder.asyncConfiguration( ClientAsyncConfiguration.builder() .advancedOption( @@ -241,38 +305,32 @@ synchronized AmazonAsyncS3WithCredentials buildClient( .build() ); final S3AsyncClient client = SocketAccess.doPrivileged(builder::build); - return AmazonAsyncS3WithCredentials.create(client, priorityClient, urgentClient, credentials); } - static ClientOverrideConfiguration buildOverrideConfiguration( - final S3ClientSettings clientSettings, - ScheduledExecutorService clientExecutorService + static SdkAsyncHttpClient buildHttpClient( + S3ClientSettings clientSettings, + AsyncTransferEventLoopGroup asyncTransferEventLoopGroup, + final String asyncHttpClientType ) { - RetryPolicy retryPolicy = SocketAccess.doPrivileged( - () -> RetryPolicy.builder() - .numRetries(clientSettings.maxRetries) - .throttlingBackoffStrategy( - clientSettings.throttleRetries ? BackoffStrategy.defaultThrottlingStrategy(RetryMode.STANDARD) : BackoffStrategy.none() - ) - .build() - ); - ClientOverrideConfiguration.Builder builder = ClientOverrideConfiguration.builder(); - if (clientExecutorService != null) { - builder = builder.scheduledExecutorService(clientExecutorService); + logger.debug("S3 Http client type [{}]", asyncHttpClientType); + if (S3Repository.NETTY_ASYNC_HTTP_CLIENT_TYPE.equals(asyncHttpClientType)) { + return buildAsyncNettyHttpClient(clientSettings, asyncTransferEventLoopGroup); } - - return builder.retryPolicy(retryPolicy).apiCallAttemptTimeout(Duration.ofMillis(clientSettings.requestTimeoutMillis)).build(); + return buildAsyncCrtHttpClient(clientSettings); } - // pkg private for tests - static SdkAsyncHttpClient buildHttpClient(S3ClientSettings clientSettings, AsyncTransferEventLoopGroup asyncTransferEventLoopGroup) { + static SdkAsyncHttpClient buildAsyncNettyHttpClient( + final S3ClientSettings clientSettings, + final AsyncTransferEventLoopGroup asyncTransferEventLoopGroup + ) { // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. NettyNioAsyncHttpClient.Builder clientBuilder = NettyNioAsyncHttpClient.builder(); if (clientSettings.proxySettings.getType() != ProxySettings.ProxyType.DIRECT) { - ProxyConfiguration.Builder proxyConfiguration = ProxyConfiguration.builder(); + software.amazon.awssdk.http.nio.netty.ProxyConfiguration.Builder proxyConfiguration = + software.amazon.awssdk.http.nio.netty.ProxyConfiguration.builder(); proxyConfiguration.scheme(clientSettings.proxySettings.getType().toProtocol().toString()); proxyConfiguration.host(clientSettings.proxySettings.getHostName()); proxyConfiguration.port(clientSettings.proxySettings.getPort()); @@ -292,6 +350,46 @@ static SdkAsyncHttpClient buildHttpClient(S3ClientSettings clientSettings, Async return clientBuilder.build(); } + static SdkAsyncHttpClient buildAsyncCrtHttpClient(final S3ClientSettings clientSettings) { + AwsCrtAsyncHttpClient.Builder crtClientBuilder = AwsCrtAsyncHttpClient.builder(); + + if (clientSettings.proxySettings.getType() != ProxySettings.ProxyType.DIRECT) { + ProxyConfiguration.Builder crtProxyConfiguration = ProxyConfiguration.builder(); + + crtProxyConfiguration.scheme(clientSettings.proxySettings.getType().toProtocol().toString()); + crtProxyConfiguration.host(clientSettings.proxySettings.getHostName()); + crtProxyConfiguration.port(clientSettings.proxySettings.getPort()); + crtProxyConfiguration.username(clientSettings.proxySettings.getUsername()); + crtProxyConfiguration.password(clientSettings.proxySettings.getPassword()); + + crtClientBuilder.proxyConfiguration(crtProxyConfiguration.build()); + } + + crtClientBuilder.connectionTimeout(Duration.ofMillis(clientSettings.connectionTimeoutMillis)); + crtClientBuilder.maxConcurrency(clientSettings.maxConnections); + return crtClientBuilder.build(); + } + + static ClientOverrideConfiguration buildOverrideConfiguration( + final S3ClientSettings clientSettings, + ScheduledExecutorService clientExecutorService + ) { + RetryPolicy retryPolicy = SocketAccess.doPrivileged( + () -> RetryPolicy.builder() + .numRetries(clientSettings.maxRetries) + .throttlingBackoffStrategy( + clientSettings.throttleRetries ? BackoffStrategy.defaultThrottlingStrategy(RetryMode.STANDARD) : BackoffStrategy.none() + ) + .build() + ); + ClientOverrideConfiguration.Builder builder = ClientOverrideConfiguration.builder(); + if (clientExecutorService != null) { + builder = builder.scheduledExecutorService(clientExecutorService); + } + + return builder.retryPolicy(retryPolicy).apiCallAttemptTimeout(Duration.ofMillis(clientSettings.requestTimeoutMillis)).build(); + } + // pkg private for tests static AwsCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) { final AwsCredentials basicCredentials = clientSettings.credentials; @@ -388,13 +486,16 @@ private static IrsaCredentials buildFromEnvironment(IrsaCredentials defaults) { } public synchronized void releaseCachedClients() { - // the clients will shutdown when they will not be used anymore - for (final AmazonAsyncS3Reference clientReference : clientsCache.values()) { - clientReference.decRef(); + // There will be 2 types of caches CRT and Netty + for (Map clientTypeCaches : s3HttpClientTypesClientsCache.values()) { + // the clients will shutdown when they will not be used anymore + for (final AmazonAsyncS3Reference clientReference : clientTypeCaches.values()) { + clientReference.decRef(); + } } // clear previously cached clients, they will be build lazily - clientsCache = emptyMap(); + s3HttpClientTypesClientsCache = emptyMap(); derivedClientSettings = emptyMap(); } @@ -453,7 +554,6 @@ public AwsCredentials resolveCredentials() { @Override public void close() { releaseCachedClients(); - } @Nullable diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index 1c894203a805c..12bd9202a1838 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -124,6 +124,10 @@ class S3Repository extends MeteredBlobStoreRepository { static final Setting BUCKET_SETTING = Setting.simpleString("bucket"); static final String BUCKET_DEFAULT_ENCRYPTION_TYPE = "bucket_default"; + + public static final String NETTY_ASYNC_HTTP_CLIENT_TYPE = "netty"; + public static final String CRT_ASYNC_HTTP_CLIENT_TYPE = "crt"; + /** * The type of S3 Server Side Encryption to use. * Defaults to AES256. @@ -171,6 +175,15 @@ class S3Repository extends MeteredBlobStoreRepository { } }); + /** + * Type of Async client to be used for S3 Uploads. Defaults to crt. + */ + static final Setting S3_ASYNC_HTTP_CLIENT_TYPE = Setting.simpleString( + "s3_async_client_type", + CRT_ASYNC_HTTP_CLIENT_TYPE, + Setting.Property.NodeScope + ); + /** * Maximum size of files that can be uploaded using a single upload request. */ @@ -604,6 +617,15 @@ private void validateRepositoryMetadata(RepositoryMetadata newRepositoryMetadata validateStorageClass(STORAGE_CLASS_SETTING.get(settings)); validateCannedACL(CANNED_ACL_SETTING.get(settings)); + validateHttpClientType(S3_ASYNC_HTTP_CLIENT_TYPE.get(settings)); + } + + // package access for tests + void validateHttpClientType(String httpClientType) { + if (!(httpClientType.equalsIgnoreCase(NETTY_ASYNC_HTTP_CLIENT_TYPE) + || httpClientType.equalsIgnoreCase(CRT_ASYNC_HTTP_CLIENT_TYPE))) { + throw new BlobStoreException("Invalid http client type. `" + httpClientType + "`"); + } } private static void validateStorageClass(String storageClassStringValue) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 80aea8263e5a0..0f501eae27ad0 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -216,6 +216,7 @@ public Collection createComponents( int urgentEventLoopThreads = urgentPoolCount(clusterService.getSettings()); int priorityEventLoopThreads = priorityPoolCount(clusterService.getSettings()); int normalEventLoopThreads = normalPoolCount(clusterService.getSettings()); + this.urgentExecutorBuilder = new AsyncExecutorContainer( threadPool.executor(URGENT_FUTURE_COMPLETION), threadPool.executor(URGENT_STREAM_READER), @@ -371,7 +372,8 @@ public List> getSettings() { S3Repository.REDIRECT_LARGE_S3_UPLOAD, S3Repository.UPLOAD_RETRY_ENABLED, S3Repository.S3_PRIORITY_PERMIT_ALLOCATION_PERCENT, - S3Repository.PERMIT_BACKED_TRANSFER_ENABLED + S3Repository.PERMIT_BACKED_TRANSFER_ENABLED, + S3Repository.S3_ASYNC_HTTP_CLIENT_TYPE ); } @@ -387,8 +389,14 @@ public void reload(Settings settings) { public void close() throws IOException { service.close(); s3AsyncService.close(); - urgentExecutorBuilder.getAsyncTransferEventLoopGroup().close(); - priorityExecutorBuilder.getAsyncTransferEventLoopGroup().close(); - normalExecutorBuilder.getAsyncTransferEventLoopGroup().close(); + if (urgentExecutorBuilder.getAsyncTransferEventLoopGroup() != null) { + urgentExecutorBuilder.getAsyncTransferEventLoopGroup().close(); + } + if (priorityExecutorBuilder.getAsyncTransferEventLoopGroup() != null) { + priorityExecutorBuilder.getAsyncTransferEventLoopGroup().close(); + } + if (normalExecutorBuilder.getAsyncTransferEventLoopGroup() != null) { + normalExecutorBuilder.getAsyncTransferEventLoopGroup().close(); + } } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/EventLoopThreadFilter.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/EventLoopThreadFilter.java new file mode 100644 index 0000000000000..2ed6b123cbb48 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/EventLoopThreadFilter.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.ThreadFilter; + +/** + * While using CRT client we are seeing ThreadLeak for the AwsEventLoop threads. These are Native threads and are + * initialized one thread per core. We tried to specifically close the thread but couldn't get it terminated. + * We have opened a git-hub issue "..." for the same. + * Currently, we are using thread filter. + */ +public class EventLoopThreadFilter implements ThreadFilter { + + @Override + public boolean reject(Thread t) { + return t.getName().startsWith("AwsEventLoop"); + } +} diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java index de9ad46bb222d..cdddf19d142ff 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java @@ -8,6 +8,10 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; + import org.opensearch.cli.SuppressForbidden; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.MockSecureSettings; @@ -20,6 +24,12 @@ import java.util.Map; import java.util.concurrent.Executors; +import io.netty.channel.nio.NioEventLoopGroup; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + public class S3AsyncServiceTests extends OpenSearchTestCase implements ConfigPathSupport { @Override @@ -32,9 +42,23 @@ public void setUp() throws Exception { public void testCachedClientsAreReleased() { final S3AsyncService s3AsyncService = new S3AsyncService(configPath()); - final Settings settings = Settings.builder().put("endpoint", "http://first").put("region", "us-east-2").build(); + final Settings settings = Settings.builder() + .put("endpoint", "http://first") + .put("region", "us-east-2") + .put(S3Repository.S3_ASYNC_HTTP_CLIENT_TYPE.getKey(), S3Repository.NETTY_ASYNC_HTTP_CLIENT_TYPE) + .build(); + + final Settings crtSettings = Settings.builder() + .put("endpoint", "http://first") + .put("region", "us-east-2") + .put(S3Repository.S3_ASYNC_HTTP_CLIENT_TYPE.getKey(), S3Repository.CRT_ASYNC_HTTP_CLIENT_TYPE) + .build(); + final RepositoryMetadata metadata1 = new RepositoryMetadata("first", "s3", settings); final RepositoryMetadata metadata2 = new RepositoryMetadata("second", "s3", settings); + + final RepositoryMetadata metadata3 = new RepositoryMetadata("second", "s3", crtSettings); + final RepositoryMetadata metadata4 = new RepositoryMetadata("second", "s3", crtSettings); final AsyncExecutorContainer asyncExecutorContainer = new AsyncExecutorContainer( Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), @@ -46,6 +70,23 @@ public void testCachedClientsAreReleased() { final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); + + final AmazonAsyncS3Reference reference2 = SocketAccess.doPrivileged( + () -> s3AsyncService.client(metadata2, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) + ); + + final AmazonAsyncS3Reference reference3 = SocketAccess.doPrivileged( + () -> s3AsyncService.client(metadata3, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) + ); + + final AmazonAsyncS3Reference reference4 = SocketAccess.doPrivileged( + () -> s3AsyncService.client(metadata4, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) + ); + + assertSame(reference, reference2); + assertSame(reference3, reference4); + assertNotSame(reference, reference3); + reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( @@ -92,4 +133,74 @@ public void testCachedClientsWithCredentialsAreReleased() { final S3ClientSettings clientSettingsReloaded = s3AsyncService.settings(metadata1); assertNotSame(clientSettings, clientSettingsReloaded); } + + public void testBuildHttpClientWithNetty() { + final int port = randomIntBetween(10, 1080); + final String userName = randomAlphaOfLength(10); + final String password = randomAlphaOfLength(10); + final String proxyType = randomFrom("http", "https", "socks"); + final S3AsyncService s3AsyncService = new S3AsyncService(configPath()); + + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.proxy.username", userName); + secureSettings.setString("s3.client.default.proxy.password", password); + + final Settings settings = Settings.builder() + .put("endpoint", "http://first") + .put("region", "us-east-2") + .put("s3.client.default.proxy.type", proxyType) + .put("s3.client.default.proxy.host", randomFrom("127.0.0.10")) + .put("s3.client.default.proxy.port", randomFrom(port)) + .setSecureSettings(secureSettings) + .build(); + final RepositoryMetadata metadata1 = new RepositoryMetadata("first", "s3", settings); + final S3ClientSettings clientSettings = s3AsyncService.settings(metadata1); + + AsyncTransferEventLoopGroup eventLoopGroup = mock(AsyncTransferEventLoopGroup.class); + when(eventLoopGroup.getEventLoopGroup()).thenReturn(mock(NioEventLoopGroup.class)); + + SdkAsyncHttpClient asyncClient = S3AsyncService.buildHttpClient( + clientSettings, + eventLoopGroup, + S3Repository.NETTY_ASYNC_HTTP_CLIENT_TYPE + ); + assertNotNull(asyncClient); + assertTrue(asyncClient instanceof NettyNioAsyncHttpClient); + verify(eventLoopGroup).getEventLoopGroup(); + } + + public void testBuildHttpClientWithCRT() { + final int port = randomIntBetween(10, 1080); + final String userName = randomAlphaOfLength(10); + final String password = randomAlphaOfLength(10); + final String proxyType = randomFrom("http", "https", "socks"); + final S3AsyncService s3AsyncService = new S3AsyncService(configPath()); + + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.proxy.username", userName); + secureSettings.setString("s3.client.default.proxy.password", password); + + final Settings settings = Settings.builder() + .put("endpoint", "http://first") + .put("region", "us-east-2") + .put("s3.client.default.proxy.type", proxyType) + .put("s3.client.default.proxy.host", randomFrom("127.0.0.10")) + .put("s3.client.default.proxy.port", randomFrom(port)) + .setSecureSettings(secureSettings) + .build(); + + final RepositoryMetadata metadata1 = new RepositoryMetadata("first", "s3", settings); + final S3ClientSettings clientSettings = s3AsyncService.settings(metadata1); + + AsyncTransferEventLoopGroup eventLoopGroup = mock(AsyncTransferEventLoopGroup.class); + when(eventLoopGroup.getEventLoopGroup()).thenReturn(mock(NioEventLoopGroup.class)); + + SdkAsyncHttpClient asyncClient = S3AsyncService.buildHttpClient( + clientSettings, + eventLoopGroup, + S3Repository.CRT_ASYNC_HTTP_CLIENT_TYPE + ); + assertNotNull(asyncClient); + assertTrue(asyncClient instanceof AwsCrtAsyncHttpClient); + } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java index 4193609ac520d..786a56d973551 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -31,6 +31,8 @@ package org.opensearch.repositories.s3; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.io.SdkDigestInputStream; import software.amazon.awssdk.utils.internal.Base16; @@ -118,6 +120,7 @@ * This class tests how a {@link S3BlobContainer} and its underlying AWS S3 client are retrying requests when reading or writing blobs. */ @SuppressForbidden(reason = "use a http server") +@ThreadLeakFilters(filters = EventLoopThreadFilter.class) public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTestCase implements ConfigPathSupport { private S3Service service; diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java index c0ee9cb6d980f..799cbe90103e5 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java @@ -74,6 +74,7 @@ public void testGetExecutorBuilders() throws IOException { + "] is deprecated" ); } + assertTrue(plugin.getSettings().contains(S3Repository.S3_ASYNC_HTTP_CLIENT_TYPE)); } finally { if (threadPool != null) { terminate(threadPool); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java index f8e9903bb3577..49c6a31e32816 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java @@ -35,6 +35,7 @@ import software.amazon.awssdk.services.s3.S3Client; import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -157,6 +158,23 @@ public void testRestrictedSettingsDefault() { } } + public void testValidateHttpLClientType_Valid_Values() { + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3Repo = createS3Repo(metadata)) { + // Don't expect any Exception + s3Repo.validateHttpClientType(S3Repository.CRT_ASYNC_HTTP_CLIENT_TYPE); + s3Repo.validateHttpClientType(S3Repository.NETTY_ASYNC_HTTP_CLIENT_TYPE); + } + } + + public void testValidateHttpLClientType_Invalid_Values() { + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3Repo = createS3Repo(metadata)) { + // Don't expect any Exception + assertThrows(BlobStoreException.class, () -> s3Repo.validateHttpClientType(randomAlphaOfLength(4))); + } + } + private S3Repository createS3Repo(RepositoryMetadata metadata) { return new S3Repository( metadata, From 96b9ef5bbe2701107aa3ae91a9aecfdfb4a54991 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 22:42:04 -0400 Subject: [PATCH 06/27] Bump com.google.auth:google-auth-library-oauth2-http from 1.37.1 to 1.38.0 in /plugins/repository-gcs (#19144) * Bump com.google.auth:google-auth-library-oauth2-http Bumps com.google.auth:google-auth-library-oauth2-http from 1.37.1 to 1.38.0. --- updated-dependencies: - dependency-name: com.google.auth:google-auth-library-oauth2-http dependency-version: 1.38.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 4 ++-- .../licenses/google-auth-library-credentials-1.37.1.jar.sha1 | 1 - .../licenses/google-auth-library-credentials-1.38.0.jar.sha1 | 1 + .../licenses/google-auth-library-oauth2-http-1.37.1.jar.sha1 | 1 - .../licenses/google-auth-library-oauth2-http-1.38.0.jar.sha1 | 1 + 6 files changed, 5 insertions(+), 4 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/google-auth-library-credentials-1.37.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-auth-library-credentials-1.38.0.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.37.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.38.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 40a00d3d69358..4fa5652ccc7eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.nimbusds:nimbus-jose-jwt` from 10.3 to 10.4.2 ([#19099](https://github.com/opensearch-project/OpenSearch/pull/19099), [#19101](https://github.com/opensearch-project/OpenSearch/pull/19101)) - Bump netty from 4.1.121.Final to 4.1.124.Final ([#19103](https://github.com/opensearch-project/OpenSearch/pull/19103)) - Bump google cloud storage from 1.113.1 to 2.55.0 ([#4547](https://github.com/opensearch-project/OpenSearch/pull/4547)) +- Bump `com.google.auth:google-auth-library-oauth2-http` from 1.37.1 to 1.38.0 ([#19144](https://github.com/opensearch-project/OpenSearch/pull/19144)) ### Deprecated diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index e8338976fae5d..3055296682853 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -74,8 +74,8 @@ dependencies { runtimeOnly "com.google.code.gson:gson:2.12.1" runtimeOnly "com.google.api.grpc:proto-google-common-protos:2.60.0" runtimeOnly "com.google.api.grpc:proto-google-iam-v1:1.55.0" - implementation "com.google.auth:google-auth-library-credentials:1.37.1" - implementation "com.google.auth:google-auth-library-oauth2-http:1.37.1" + implementation "com.google.auth:google-auth-library-credentials:1.38.0" + implementation "com.google.auth:google-auth-library-oauth2-http:1.38.0" runtimeOnly "com.google.oauth-client:google-oauth-client:1.39.0" // 1.39.0 in bom implementation "com.google.api-client:google-api-client:2.7.2" implementation "com.google.http-client:google-http-client:1.47.1" diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-1.37.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-1.37.1.jar.sha1 deleted file mode 100644 index fd3bcfebb878e..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-credentials-1.37.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -894c1cd371380e254290ac7c7df04372bf547a8f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-1.38.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-1.38.0.jar.sha1 new file mode 100644 index 0000000000000..866b777fb139b --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-1.38.0.jar.sha1 @@ -0,0 +1 @@ +0fa8a919c22292e2617e6adf2554dc3e9260797d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.37.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.37.1.jar.sha1 deleted file mode 100644 index a0e34c8071d43..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.37.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -86a3c90a6b80128fccac09dead6158fe7cc5e7bd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.38.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.38.0.jar.sha1 new file mode 100644 index 0000000000000..d42722a0ea0f5 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.38.0.jar.sha1 @@ -0,0 +1 @@ +7910bf19b88fd9c34b1c8dce353102c2eb0f9399 \ No newline at end of file From 04b4ee7c5874007d108c58930d3226f479fa1424 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 09:39:49 -0400 Subject: [PATCH 07/27] Bump com.squareup.okio:okio from 3.15.0 to 3.16.0 in /test/fixtures/hdfs-fixture (#19146) * Bump com.squareup.okio:okio in /test/fixtures/hdfs-fixture Bumps [com.squareup.okio:okio](https://github.com/square/okio) from 3.15.0 to 3.16.0. - [Release notes](https://github.com/square/okio/releases) - [Changelog](https://github.com/square/okio/blob/master/CHANGELOG.md) - [Commits](https://github.com/square/okio/compare/parent-3.15.0...parent-3.16.0) --- updated-dependencies: - dependency-name: com.squareup.okio:okio dependency-version: 3.16.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Craig Perkins Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Craig Perkins --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4fa5652ccc7eb..26b17221fe304 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump netty from 4.1.121.Final to 4.1.124.Final ([#19103](https://github.com/opensearch-project/OpenSearch/pull/19103)) - Bump google cloud storage from 1.113.1 to 2.55.0 ([#4547](https://github.com/opensearch-project/OpenSearch/pull/4547)) - Bump `com.google.auth:google-auth-library-oauth2-http` from 1.37.1 to 1.38.0 ([#19144](https://github.com/opensearch-project/OpenSearch/pull/19144)) +- Bump `com.squareup.okio:okio` from 3.15.0 to 3.16.0 ([#19146](https://github.com/opensearch-project/OpenSearch/pull/19146)) ### Deprecated diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index bba7fb9f51857..91dd493a635d1 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -90,6 +90,6 @@ dependencies { runtimeOnly("com.squareup.okhttp3:okhttp:5.1.0") { exclude group: "com.squareup.okio" } - runtimeOnly "com.squareup.okio:okio:3.15.0" + runtimeOnly "com.squareup.okio:okio:3.16.0" runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.8" } From cf5557310727ff58e6e2b7e69b150f4fbb806d2f Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Tue, 26 Aug 2025 12:27:06 -0400 Subject: [PATCH 08/27] Update CHANGELOG to use correct comparison link of 3.2..main (#19151) Signed-off-by: Craig Perkins --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 26b17221fe304..b32e0a38d715f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,4 +50,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security -[Unreleased 3.x]: https://github.com/opensearch-project/OpenSearch/compare/3.1...main +[Unreleased 3.x]: https://github.com/opensearch-project/OpenSearch/compare/3.2...main From b2033bf2d150cb149e672fb83ff1ac74c9d774a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 13:04:52 -0400 Subject: [PATCH 09/27] Bump com.azure:azure-storage-common from 12.30.1 to 12.30.2 in /plugins/repository-azure (#19145) * Bump com.azure:azure-storage-common in /plugins/repository-azure Bumps [com.azure:azure-storage-common](https://github.com/Azure/azure-sdk-for-java) from 12.30.1 to 12.30.2. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-storage-blob_12.30.1...azure-storage-common_12.30.2) --- updated-dependencies: - dependency-name: com.azure:azure-storage-common dependency-version: 12.30.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] * Upgrade com.azure:azure-storage-blob to 12.31.2 Signed-off-by: Craig Perkins --------- Signed-off-by: dependabot[bot] Signed-off-by: Craig Perkins Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Craig Perkins --- CHANGELOG.md | 2 +- plugins/repository-azure/build.gradle | 4 ++-- .../licenses/azure-storage-blob-12.30.1.jar.sha1 | 1 - .../licenses/azure-storage-blob-12.31.2.jar.sha1 | 1 + .../licenses/azure-storage-common-12.30.1.jar.sha1 | 1 - .../licenses/azure-storage-common-12.30.2.jar.sha1 | 1 + 6 files changed, 5 insertions(+), 5 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.30.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.31.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.30.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.30.2.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index b32e0a38d715f..125b11e021fe7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,7 +34,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `commons-cli:commons-cli` from 1.9.0 to 1.10.0 ([#19021](https://github.com/opensearch-project/OpenSearch/pull/19021)) - Bump `org.jline:jline` from 3.30.4 to 3.30.5 ([#19013](https://github.com/opensearch-project/OpenSearch/pull/19013)) - Bump `com.github.spotbugs:spotbugs-annotations` from 4.9.3 to 4.9.4 ([#19015](https://github.com/opensearch-project/OpenSearch/pull/19015)) -- Bump `com.azure:azure-storage-common` from 12.29.1 to 12.30.1 ([#19016](https://github.com/opensearch-project/OpenSearch/pull/19016)) +- Bump `com.azure:azure-storage-common` from 12.29.1 to 12.30.2 ([#19016](https://github.com/opensearch-project/OpenSearch/pull/19016), [#19145](https://github.com/opensearch-project/OpenSearch/pull/19145)) - Update OpenTelemetry to 1.53.0 and OpenTelemetry SemConv to 1.34.0 ([#19068](https://github.com/opensearch-project/OpenSearch/pull/19068)) - Bump `1password/load-secrets-action` from 2 to 3 ([#19100](https://github.com/opensearch-project/OpenSearch/pull/19100)) - Bump `com.nimbusds:nimbus-jose-jwt` from 10.3 to 10.4.2 ([#19099](https://github.com/opensearch-project/OpenSearch/pull/19099), [#19101](https://github.com/opensearch-project/OpenSearch/pull/19101)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 78257161a5c82..ad3dca45e778a 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -47,7 +47,7 @@ dependencies { api 'com.azure:azure-core:1.55.5' api 'com.azure:azure-json:1.5.0' api 'com.azure:azure-xml:1.2.0' - api 'com.azure:azure-storage-common:12.30.1' + api 'com.azure:azure-storage-common:12.30.2' api 'com.azure:azure-core-http-netty:1.15.12' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" @@ -56,7 +56,7 @@ dependencies { api "io.netty:netty-resolver-dns:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') - api 'com.azure:azure-storage-blob:12.30.1' + api 'com.azure:azure-storage-blob:12.31.2' api 'com.azure:azure-identity:1.14.2' // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.30.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.30.1.jar.sha1 deleted file mode 100644 index 34189c82a88ba..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-blob-12.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -deaa55c7c985bec01cbbc4fef41d2da3d511dcbc \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.31.2.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.31.2.jar.sha1 new file mode 100644 index 0000000000000..1a22d5360fe1a --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-blob-12.31.2.jar.sha1 @@ -0,0 +1 @@ +092c5c3fb7796f42bece7f3f6d3fc51072b71475 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.30.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.30.1.jar.sha1 deleted file mode 100644 index 16690b638df84..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.30.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e94e0c1e780e479bc328ccaf35f10fd2c76c9778 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.30.2.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.30.2.jar.sha1 new file mode 100644 index 0000000000000..b78e3fc5f5ad2 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.30.2.jar.sha1 @@ -0,0 +1 @@ +203214375d7fbf214f5cacefd2c851e87a708a98 \ No newline at end of file From 23a772f44063b3e0c7e4f691daf43e1d510738f5 Mon Sep 17 00:00:00 2001 From: Andrey Pleskach Date: Tue, 26 Aug 2025 21:32:15 +0200 Subject: [PATCH 10/27] Bump Slf4j from 1.7.36 to 2.0.17 (#19136) Signed-off-by: Andrey Pleskach --- CHANGELOG.md | 3 ++- client/rest/build.gradle | 3 --- client/rest/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - client/rest/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + gradle/libs.versions.toml | 2 +- plugins/arrow-flight-rpc/build.gradle | 4 ---- plugins/arrow-flight-rpc/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/arrow-flight-rpc/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/cache-ehcache/build.gradle | 3 --- plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/cache-ehcache/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/crypto-kms/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/discovery-ec2/build.gradle | 3 --- plugins/discovery-ec2/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/discovery-ec2/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/identity-shiro/build.gradle | 3 --- plugins/identity-shiro/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/identity-shiro/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/ingest-attachment/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/ingestion-kafka/build.gradle | 3 --- plugins/ingestion-kafka/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/ingestion-kafka/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/ingestion-kinesis/build.gradle | 4 ---- plugins/ingestion-kinesis/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/ingestion-kinesis/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/repository-azure/build.gradle | 3 --- plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/repository-azure/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/repository-gcs/build.gradle | 5 ----- plugins/repository-gcs/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/repository-gcs/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/repository-hdfs/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + plugins/repository-s3/licenses/slf4j-api-1.7.36.jar.sha1 | 1 - plugins/repository-s3/licenses/slf4j-api-2.0.17.jar.sha1 | 1 + 37 files changed, 16 insertions(+), 46 deletions(-) delete mode 100644 client/rest/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 client/rest/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/arrow-flight-rpc/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/arrow-flight-rpc/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/cache-ehcache/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/identity-shiro/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/identity-shiro/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/ingestion-kafka/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/ingestion-kafka/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/ingestion-kinesis/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/ingestion-kinesis/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/repository-azure/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/slf4j-api-2.0.17.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/repository-s3/licenses/slf4j-api-2.0.17.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 125b11e021fe7..d0eeeaab39e52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,9 +39,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `1password/load-secrets-action` from 2 to 3 ([#19100](https://github.com/opensearch-project/OpenSearch/pull/19100)) - Bump `com.nimbusds:nimbus-jose-jwt` from 10.3 to 10.4.2 ([#19099](https://github.com/opensearch-project/OpenSearch/pull/19099), [#19101](https://github.com/opensearch-project/OpenSearch/pull/19101)) - Bump netty from 4.1.121.Final to 4.1.124.Final ([#19103](https://github.com/opensearch-project/OpenSearch/pull/19103)) -- Bump google cloud storage from 1.113.1 to 2.55.0 ([#4547](https://github.com/opensearch-project/OpenSearch/pull/4547)) +- Bump Google Cloud Storage SDK from 1.113.1 to 2.55.0 ([#18922](https://github.com/opensearch-project/OpenSearch/pull/18922)) - Bump `com.google.auth:google-auth-library-oauth2-http` from 1.37.1 to 1.38.0 ([#19144](https://github.com/opensearch-project/OpenSearch/pull/19144)) - Bump `com.squareup.okio:okio` from 3.15.0 to 3.16.0 ([#19146](https://github.com/opensearch-project/OpenSearch/pull/19146)) +- Bump Slf4j from 1.7.36 to 2.0.17 ([#19136](https://github.com/opensearch-project/OpenSearch/pull/19136)) ### Deprecated diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 22fb38ded3bde..ed5eedb65e140 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -105,9 +105,6 @@ testingConventions { thirdPartyAudit { ignoreMissingClasses( 'org.conscrypt.Conscrypt', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', //commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', diff --git a/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 b/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/client/rest/licenses/slf4j-api-2.0.17.jar.sha1 b/client/rest/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/client/rest/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 3cd058d83ba4b..40441dba894bb 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -14,7 +14,7 @@ snakeyaml = "2.1" icu4j = "77.1" supercsv = "2.4.0" log4j = "2.21.0" -slf4j = "1.7.36" +slf4j = "2.0.17" asm = "9.7" jettison = "1.5.4" woodstox = "6.4.0" diff --git a/plugins/arrow-flight-rpc/build.gradle b/plugins/arrow-flight-rpc/build.gradle index eb14e4ecea577..034a0043a4a61 100644 --- a/plugins/arrow-flight-rpc/build.gradle +++ b/plugins/arrow-flight-rpc/build.gradle @@ -130,10 +130,6 @@ tasks.named('thirdPartyAudit').configure { 'org.apache.commons.logging.Log', 'org.apache.commons.logging.LogFactory', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', - // from Log4j (deliberate, Netty will fallback to Log4j 2) 'org.apache.log4j.Level', 'org.apache.log4j.Logger', diff --git a/plugins/arrow-flight-rpc/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/arrow-flight-rpc/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/arrow-flight-rpc/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/arrow-flight-rpc/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/arrow-flight-rpc/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/arrow-flight-rpc/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/cache-ehcache/build.gradle b/plugins/cache-ehcache/build.gradle index 6390b045db8ea..64cf3a963db74 100644 --- a/plugins/cache-ehcache/build.gradle +++ b/plugins/cache-ehcache/build.gradle @@ -79,9 +79,6 @@ thirdPartyAudit { 'org.osgi.framework.BundleActivator', 'org.osgi.framework.BundleContext', 'org.osgi.framework.ServiceReference', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder' ) } diff --git a/plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/cache-ehcache/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/cache-ehcache/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/cache-ehcache/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/crypto-kms/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/crypto-kms/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 7a7eb8da24fb6..8aeae37742c19 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -162,9 +162,6 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', 'software.amazon.eventstream.HeaderValue', 'software.amazon.eventstream.Message', 'software.amazon.eventstream.MessageDecoder', diff --git a/plugins/discovery-ec2/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/discovery-ec2/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/discovery-ec2/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/discovery-ec2/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/discovery-ec2/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/identity-shiro/build.gradle b/plugins/identity-shiro/build.gradle index f72155e1d28b2..223a69f9eb353 100644 --- a/plugins/identity-shiro/build.gradle +++ b/plugins/identity-shiro/build.gradle @@ -66,9 +66,6 @@ thirdPartyAudit.ignoreMissingClasses( 'org.apache.log4j.Logger', 'org.apache.log4j.Priority', 'org.cryptacular.bean.HashBean', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', 'org.springframework.context.MessageSource', 'org.springframework.context.support.MessageSourceAccessor' ) diff --git a/plugins/identity-shiro/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/identity-shiro/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/identity-shiro/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/identity-shiro/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/identity-shiro/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/ingest-attachment/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/ingestion-kafka/build.gradle b/plugins/ingestion-kafka/build.gradle index 6a9809674b39a..9ba91190944dc 100644 --- a/plugins/ingestion-kafka/build.gradle +++ b/plugins/ingestion-kafka/build.gradle @@ -67,9 +67,6 @@ thirdPartyAudit { 'net.jpountz.util.SafeUtils', 'net.jpountz.xxhash.XXHash32', 'net.jpountz.xxhash.XXHashFactory', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', 'com.google.common.util.concurrent.ListenableFuture', 'io.grpc.BindableService', 'io.grpc.CallOptions', diff --git a/plugins/ingestion-kafka/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/ingestion-kafka/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/ingestion-kafka/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/ingestion-kafka/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/ingestion-kafka/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/ingestion-kafka/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/ingestion-kinesis/build.gradle b/plugins/ingestion-kinesis/build.gradle index a8100018c7f4a..7acc7b8fbff46 100644 --- a/plugins/ingestion-kinesis/build.gradle +++ b/plugins/ingestion-kinesis/build.gradle @@ -126,10 +126,6 @@ thirdPartyAudit { 'org.apache.log4j.Logger', 'org.apache.log4j.Priority', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', - 'org.graalvm.nativeimage.hosted.Feature', 'org.graalvm.nativeimage.hosted.Feature$AfterImageWriteAccess', diff --git a/plugins/ingestion-kinesis/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/ingestion-kinesis/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/ingestion-kinesis/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/ingestion-kinesis/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/ingestion-kinesis/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/ingestion-kinesis/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index ad3dca45e778a..3ba2c591644da 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -170,9 +170,6 @@ thirdPartyAudit { 'javax.xml.bind.annotation.adapters.XmlJavaTypeAdapters', 'org.osgi.framework.BundleActivator', 'org.osgi.framework.BundleContext', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', 'io.micrometer.common.KeyValue', 'io.micrometer.common.KeyValues', 'io.micrometer.common.docs.KeyName', diff --git a/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/repository-azure/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 3055296682853..881f56f91ca61 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -277,11 +277,6 @@ thirdPartyAudit { 'org.graalvm.nativeimage.hosted.Feature$DuringAnalysisAccess', 'org.graalvm.nativeimage.hosted.Feature$FeatureAccess', 'org.graalvm.nativeimage.hosted.RuntimeReflection', - //slf4j dependencies - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', - 'org.slf4j.spi.LoggingEventBuilder', ) } diff --git a/plugins/repository-gcs/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-gcs/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/repository-gcs/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/repository-gcs/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/repository-gcs/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/repository-hdfs/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-s3/licenses/slf4j-api-1.7.36.jar.sha1 deleted file mode 100644 index 77b9917528382..0000000000000 --- a/plugins/repository-s3/licenses/slf4j-api-1.7.36.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/slf4j-api-2.0.17.jar.sha1 b/plugins/repository-s3/licenses/slf4j-api-2.0.17.jar.sha1 new file mode 100644 index 0000000000000..435f6c13a28b6 --- /dev/null +++ b/plugins/repository-s3/licenses/slf4j-api-2.0.17.jar.sha1 @@ -0,0 +1 @@ +d9e58ac9c7779ba3bf8142aff6c830617a7fe60f \ No newline at end of file From 8a3f0cffb582de0a0e985bfcfa768d65de0b4d51 Mon Sep 17 00:00:00 2001 From: Atri Sharma Date: Wed, 27 Aug 2025 19:20:46 +0530 Subject: [PATCH 11/27] Add query rewriting infrastructure to reduce query complexity (#19060) * Add query rewriting infrastructure to reduce query complexity Implements three query optimizations that work together: - Boolean flattening: removes unnecessary nested boolean queries - Terms merging: combines multiple term queries on same field in filter/should contexts - Match-all removal: eliminates redundant match_all queries Key features: - 60-70% reduction in query nodes for typical filtered queries - Feature flag: search.query_rewriting.enabled (default: true) - Preserves exact query semantics and results Signed-off-by: Atri Sharma * Fix forbidden api issues Signed-off-by: Atri Sharma * Update writers and get tests to pass Signed-off-by: Atri Sharma * Update per CI Signed-off-by: Atri Sharma * Fix term merging threshold and update comments Signed-off-by: Atri Sharma * Expose setting and update per comments Signed-off-by: Atri Sharma * Update CHANGELOG Signed-off-by: Atri Sharma * Fix tests and ensure scoring MATCH ALL query is preserved Signed-off-by: Atri Sharma * Migrate must to filter and must not to should optimizations to query rewriting infrastructure This commit migrates two existing query optimizations from BoolQueryBuilder to the new query rewriting infrastructure: 1. **MustToFilterRewriter**: Moves non scoring queries (range, geo, numeric term/terms/match) from must to filter clauses to avoid unnecessary scoring calculations (from PR #18541) 2. **MustNotToShouldRewriter**: Transforms negative queries into positive complements for better performance on single valued numeric fields (from PRs #17655 and #18498) Changes: Add MustToFilterRewriter with priority 150 (runs after boolean flattening) Add MustNotToShouldRewriter with priority 175 (runs after must to filter) Register both rewriters in QueryRewriterRegistry Add comprehensive test suites (15 tests for must to filter, 14 for must not to should) Disable legacy implementations in BoolQueryBuilder Comment out BoolQueryBuilder tests that relied on the old implementations The new rewriters maintain full backward compatibility while providing: Better separation of concerns Recursive rewriting for nested boolean queries Proper error handling and logging Consistent priority based execution order Signed-off-by: Atri Sharma * Handle fields with missing fields Signed-off-by: Atri Sharma --------- Signed-off-by: Atri Sharma --- CHANGELOG.md | 1 + .../common/settings/ClusterSettings.java | 2 + .../index/query/BoolQueryBuilder.java | 54 --- .../org/opensearch/search/SearchService.java | 35 +- .../search/query/QueryRewriter.java | 50 +++ .../search/query/QueryRewriterRegistry.java | 113 ++++++ .../rewriters/BooleanFlatteningRewriter.java | 239 +++++++++++++ .../rewriters/MatchAllRemovalRewriter.java | 239 +++++++++++++ .../rewriters/MustNotToShouldRewriter.java | 251 ++++++++++++++ .../query/rewriters/MustToFilterRewriter.java | 177 ++++++++++ .../query/rewriters/TermsMergingRewriter.java | 314 +++++++++++++++++ .../search/query/rewriters/package-info.java | 31 ++ .../index/query/BoolQueryBuilderTests.java | 152 -------- .../query/QueryRewriterRegistryTests.java | 328 ++++++++++++++++++ .../BooleanFlatteningRewriterTests.java | 182 ++++++++++ .../MatchAllRemovalRewriterTests.java | 167 +++++++++ .../MustNotToShouldRewriterTests.java | 284 +++++++++++++++ .../rewriters/MustToFilterRewriterTests.java | 309 +++++++++++++++++ .../rewriters/TermsMergingRewriterTests.java | 292 ++++++++++++++++ 19 files changed, 3012 insertions(+), 208 deletions(-) create mode 100644 server/src/main/java/org/opensearch/search/query/QueryRewriter.java create mode 100644 server/src/main/java/org/opensearch/search/query/QueryRewriterRegistry.java create mode 100644 server/src/main/java/org/opensearch/search/query/rewriters/BooleanFlatteningRewriter.java create mode 100644 server/src/main/java/org/opensearch/search/query/rewriters/MatchAllRemovalRewriter.java create mode 100644 server/src/main/java/org/opensearch/search/query/rewriters/MustNotToShouldRewriter.java create mode 100644 server/src/main/java/org/opensearch/search/query/rewriters/MustToFilterRewriter.java create mode 100644 server/src/main/java/org/opensearch/search/query/rewriters/TermsMergingRewriter.java create mode 100644 server/src/main/java/org/opensearch/search/query/rewriters/package-info.java create mode 100644 server/src/test/java/org/opensearch/search/query/QueryRewriterRegistryTests.java create mode 100644 server/src/test/java/org/opensearch/search/query/rewriters/BooleanFlatteningRewriterTests.java create mode 100644 server/src/test/java/org/opensearch/search/query/rewriters/MatchAllRemovalRewriterTests.java create mode 100644 server/src/test/java/org/opensearch/search/query/rewriters/MustNotToShouldRewriterTests.java create mode 100644 server/src/test/java/org/opensearch/search/query/rewriters/MustToFilterRewriterTests.java create mode 100644 server/src/test/java/org/opensearch/search/query/rewriters/TermsMergingRewriterTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index d0eeeaab39e52..106e8b36c3531 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added - Expand fetch phase profiling to support inner hits and top hits aggregation phases ([##18936](https://github.com/opensearch-project/OpenSearch/pull/18936)) - Add temporal routing processors for time-based document routing ([#18920](https://github.com/opensearch-project/OpenSearch/issues/18920)) +- Implement Query Rewriting Infrastructure ([#19060](https://github.com/opensearch-project/OpenSearch/pull/19060)) - The dynamic mapping parameter supports false_allow_templates ([#19065](https://github.com/opensearch-project/OpenSearch/pull/19065)) - Add a toBuilder method in EngineConfig to support easy modification of configs([#19054](https://github.com/opensearch-project/OpenSearch/pull/19054)) - Add StoreFactory plugin interface for custom Store implementations([#19091](https://github.com/opensearch-project/OpenSearch/pull/19091)) diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 8aec7386fcf81..3f954cd9f9c37 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -804,6 +804,8 @@ public void apply(Settings value, Settings current, Settings previous) { BlobStoreRepository.SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD, SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, + SearchService.QUERY_REWRITING_ENABLED_SETTING, + SearchService.QUERY_REWRITING_TERMS_THRESHOLD_SETTING, // Composite index settings CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING, diff --git a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java index 46c5a40457ce7..a2f71a7064903 100644 --- a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java @@ -49,8 +49,6 @@ import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.NumberFieldMapper; import java.io.IOException; import java.util.ArrayList; @@ -402,9 +400,6 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws return any.get(); } - changed |= rewriteMustNotRangeClausesToShould(newBuilder, queryRewriteContext); - changed |= rewriteMustClausesToFilter(newBuilder, queryRewriteContext); - if (changed) { newBuilder.adjustPureNegative = adjustPureNegative; if (minimumShouldMatch != null) { @@ -559,53 +554,4 @@ private boolean checkAllDocsHaveOneValue(List contexts, Strin } return true; } - - private boolean rewriteMustClausesToFilter(BoolQueryBuilder newBuilder, QueryRewriteContext queryRewriteContext) { - // If we have must clauses which return the same score for all matching documents, like numeric term queries or ranges, - // moving them from must clauses to filter clauses improves performance in some cases. - // This works because it can let Lucene use MaxScoreCache to skip non-competitive docs. - boolean changed = false; - Set mustClausesToMove = new HashSet<>(); - - QueryShardContext shardContext; - if (queryRewriteContext == null) { - shardContext = null; - } else { - shardContext = queryRewriteContext.convertToShardContext(); // can still be null - } - - for (QueryBuilder clause : mustClauses) { - if (isClauseIrrelevantToScoring(clause, shardContext)) { - mustClausesToMove.add(clause); - changed = true; - } - } - - newBuilder.mustClauses.removeAll(mustClausesToMove); - newBuilder.filterClauses.addAll(mustClausesToMove); - return changed; - } - - private boolean isClauseIrrelevantToScoring(QueryBuilder clause, QueryShardContext context) { - // This is an incomplete list of clauses this might apply for; it can be expanded in future. - - // If a clause is purely numeric, for example a date range, its score is unimportant as - // it'll be the same for all returned docs - if (clause instanceof RangeQueryBuilder) return true; - if (clause instanceof GeoBoundingBoxQueryBuilder) return true; - - // Further optimizations depend on knowing whether the field is numeric. - // QueryBuilder.doRewrite() is called several times in the search flow, and the shard context telling us this - // is only available the last time, when it's called from SearchService.executeQueryPhase(). - // Skip moving these clauses if we don't have the shard context. - if (context == null) return false; - if (!(clause instanceof WithFieldName wfn)) return false; - MappedFieldType fieldType = context.fieldMapper(wfn.fieldName()); - if (!(fieldType instanceof NumberFieldMapper.NumberFieldType)) return false; - - if (clause instanceof MatchQueryBuilder) return true; - if (clause instanceof TermQueryBuilder) return true; - if (clause instanceof TermsQueryBuilder) return true; - return false; - } } diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index beecab73ffeab..c6fe57188eff1 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -136,6 +136,7 @@ import org.opensearch.search.profile.Profilers; import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.search.query.QueryPhase; +import org.opensearch.search.query.QueryRewriterRegistry; import org.opensearch.search.query.QuerySearchRequest; import org.opensearch.search.query.QuerySearchResult; import org.opensearch.search.query.ScrollQuerySearchResult; @@ -276,6 +277,27 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.Deprecated ); + public static final Setting QUERY_REWRITING_ENABLED_SETTING = Setting.boolSetting( + "search.query_rewriting.enabled", + true, + Property.Dynamic, + Property.NodeScope + ); + + /** + * Controls the threshold for the number of term queries on the same field that triggers + * the TermsMergingRewriter to combine them into a single terms query. For example, + * if set to 16 (default), when 16 or more term queries target the same field within + * a boolean clause, they will be merged into a single terms query for better performance. + */ + public static final Setting QUERY_REWRITING_TERMS_THRESHOLD_SETTING = Setting.intSetting( + "search.query_rewriting.terms_threshold", + 16, + 2, // minimum value + Property.Dynamic, + Property.NodeScope + ); + // Allow concurrent segment search for all requests public static final String CONCURRENT_SEGMENT_SEARCH_MODE_ALL = "all"; @@ -507,6 +529,10 @@ public SearchService( this.concurrentSearchDeciderFactories = concurrentSearchDeciderFactories; this.pluginProfilers = pluginProfilers; + + // Initialize QueryRewriterRegistry with cluster settings so TermsMergingRewriter + // can register its settings update consumer + QueryRewriterRegistry.INSTANCE.initialize(settings, clusterService.getClusterSettings()); } private void validateKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAlive) { @@ -1488,8 +1514,13 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.size(source.size()); Map innerHitBuilders = new HashMap<>(); if (source.query() != null) { - InnerHitContextBuilder.extractInnerHits(source.query(), innerHitBuilders); - context.parsedQuery(queryShardContext.toQuery(source.query())); + QueryBuilder query = source.query(); + + // Apply query rewriting optimizations + query = QueryRewriterRegistry.INSTANCE.rewrite(query, queryShardContext); + + InnerHitContextBuilder.extractInnerHits(query, innerHitBuilders); + context.parsedQuery(queryShardContext.toQuery(query)); } if (source.postFilter() != null) { InnerHitContextBuilder.extractInnerHits(source.postFilter(), innerHitBuilders); diff --git a/server/src/main/java/org/opensearch/search/query/QueryRewriter.java b/server/src/main/java/org/opensearch/search/query/QueryRewriter.java new file mode 100644 index 0000000000000..32854c5881d61 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/QueryRewriter.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; + +/** + * Interface for query rewriting implementations that optimize query structure + * before conversion to Lucene queries. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface QueryRewriter { + + /** + * Rewrites the given query builder to a more optimal form. + * + * @param query The query to rewrite + * @param context The search execution context + * @return The rewritten query (may be the same instance if no rewrite needed) + */ + QueryBuilder rewrite(QueryBuilder query, QueryShardContext context); + + /** + * Returns the priority of this rewriter. Lower values execute first. + * This allows control over rewrite ordering when multiple rewriters + * may interact. + * + * @return The priority value + */ + default int priority() { + return 1000; + } + + /** + * Returns the name of this rewriter for debugging and profiling. + * + * @return The rewriter name + */ + String name(); +} diff --git a/server/src/main/java/org/opensearch/search/query/QueryRewriterRegistry.java b/server/src/main/java/org/opensearch/search/query/QueryRewriterRegistry.java new file mode 100644 index 0000000000000..7de5a5c8e554e --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/QueryRewriterRegistry.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.SearchService; +import org.opensearch.search.query.rewriters.BooleanFlatteningRewriter; +import org.opensearch.search.query.rewriters.MatchAllRemovalRewriter; +import org.opensearch.search.query.rewriters.MustNotToShouldRewriter; +import org.opensearch.search.query.rewriters.MustToFilterRewriter; +import org.opensearch.search.query.rewriters.TermsMergingRewriter; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * Registry for query rewriters + * + * @opensearch.internal + */ +public final class QueryRewriterRegistry { + + private static final Logger logger = LogManager.getLogger(QueryRewriterRegistry.class); + + public static final QueryRewriterRegistry INSTANCE = new QueryRewriterRegistry(); + + /** + * Default rewriters. + * CopyOnWriteArrayList is used for thread-safety during registration. + */ + private final CopyOnWriteArrayList rewriters; + + /** + * Whether query rewriting is enabled. + */ + private volatile boolean enabled; + + private QueryRewriterRegistry() { + this.rewriters = new CopyOnWriteArrayList<>(); + + // Register default rewriters using singletons + registerRewriter(BooleanFlatteningRewriter.INSTANCE); + registerRewriter(MustToFilterRewriter.INSTANCE); + registerRewriter(MustNotToShouldRewriter.INSTANCE); + registerRewriter(MatchAllRemovalRewriter.INSTANCE); + registerRewriter(TermsMergingRewriter.INSTANCE); + } + + /** + * Register a custom query rewriter. + * + * @param rewriter The rewriter to register + */ + public void registerRewriter(QueryRewriter rewriter) { + if (rewriter != null) { + rewriters.add(rewriter); + logger.info("Registered query rewriter: {}", rewriter.name()); + } + } + + /** + * Initialize the registry with cluster settings. + * This must be called once during system startup to properly configure + * the TermsMergingRewriter with settings and update consumers. + * + * @param settings Initial cluster settings + * @param clusterSettings Cluster settings for registering update consumers + */ + public void initialize(Settings settings, ClusterSettings clusterSettings) { + TermsMergingRewriter.INSTANCE.initialize(settings, clusterSettings); + this.enabled = SearchService.QUERY_REWRITING_ENABLED_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + SearchService.QUERY_REWRITING_ENABLED_SETTING, + (Boolean enabled) -> this.enabled = enabled + ); + } + + public QueryBuilder rewrite(QueryBuilder query, QueryShardContext context) { + if (!enabled || query == null) { + return query; + } + + List sortedRewriters = new ArrayList<>(rewriters); + sortedRewriters.sort(Comparator.comparingInt(QueryRewriter::priority)); + + QueryBuilder current = query; + for (QueryRewriter rewriter : sortedRewriters) { + try { + QueryBuilder rewritten = rewriter.rewrite(current, context); + if (rewritten != current) { + current = rewritten; + } + } catch (Exception e) { + logger.warn("Query rewriter {} failed: {}", rewriter.name(), e.getMessage()); + } + } + + return current; + } +} diff --git a/server/src/main/java/org/opensearch/search/query/rewriters/BooleanFlatteningRewriter.java b/server/src/main/java/org/opensearch/search/query/rewriters/BooleanFlatteningRewriter.java new file mode 100644 index 0000000000000..5fe316e9a1c1a --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/rewriters/BooleanFlatteningRewriter.java @@ -0,0 +1,239 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query.rewriters; + +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.query.QueryRewriter; + +import java.util.ArrayList; +import java.util.List; + +/** + * Rewrites nested boolean queries to flatten unnecessary nesting levels. + * For example: + *
+ * {"bool": {"filter": [{"bool": {"filter": [{"term": {"field": "value"}}]}}]}}
+ * 
+ * becomes: + *
+ * {"bool": {"filter": [{"term": {"field": "value"}}]}}
+ * 
+ * + * Note: While Lucene's BooleanQuery does flatten pure disjunctions (SHOULD-only clauses) + * for WAND optimization, it does NOT flatten other nested structures like filter-in-filter + * or must-in-must. This rewriter handles those additional patterns that are common in + * user-generated and template-based queries but not optimized by Lucene. + * + * @opensearch.internal + */ +public class BooleanFlatteningRewriter implements QueryRewriter { + + public static final BooleanFlatteningRewriter INSTANCE = new BooleanFlatteningRewriter(); + + private BooleanFlatteningRewriter() { + // Singleton + } + + @Override + public QueryBuilder rewrite(QueryBuilder query, QueryShardContext context) { + if (!(query instanceof BoolQueryBuilder)) { + return query; + } + + BoolQueryBuilder boolQuery = (BoolQueryBuilder) query; + + // First check if flattening is needed + if (!needsFlattening(boolQuery)) { + return query; + } + + return flattenBoolQuery(boolQuery); + } + + private boolean needsFlattening(BoolQueryBuilder boolQuery) { + // Check all clause types for nested bool queries that can be flattened + if (hasFlattenableBool(boolQuery.must(), ClauseType.MUST) + || hasFlattenableBool(boolQuery.filter(), ClauseType.FILTER) + || hasFlattenableBool(boolQuery.should(), ClauseType.SHOULD) + || hasFlattenableBool(boolQuery.mustNot(), ClauseType.MUST_NOT)) { + return true; + } + + // Check if any nested bool queries need flattening + return hasNestedBoolThatNeedsFlattening(boolQuery); + } + + private boolean hasFlattenableBool(List clauses, ClauseType parentType) { + for (QueryBuilder clause : clauses) { + if (clause instanceof BoolQueryBuilder) { + BoolQueryBuilder nestedBool = (BoolQueryBuilder) clause; + // Can flatten if nested bool only has one type of clause matching parent + if (canFlatten(nestedBool, parentType)) { + return true; + } + } + } + return false; + } + + private boolean hasNestedBoolThatNeedsFlattening(BoolQueryBuilder boolQuery) { + for (QueryBuilder clause : boolQuery.must()) { + if (clause instanceof BoolQueryBuilder && needsFlattening((BoolQueryBuilder) clause)) { + return true; + } + } + for (QueryBuilder clause : boolQuery.filter()) { + if (clause instanceof BoolQueryBuilder && needsFlattening((BoolQueryBuilder) clause)) { + return true; + } + } + for (QueryBuilder clause : boolQuery.should()) { + if (clause instanceof BoolQueryBuilder && needsFlattening((BoolQueryBuilder) clause)) { + return true; + } + } + for (QueryBuilder clause : boolQuery.mustNot()) { + if (clause instanceof BoolQueryBuilder && needsFlattening((BoolQueryBuilder) clause)) { + return true; + } + } + return false; + } + + private BoolQueryBuilder flattenBoolQuery(BoolQueryBuilder original) { + BoolQueryBuilder flattened = new BoolQueryBuilder(); + + flattened.boost(original.boost()); + flattened.queryName(original.queryName()); + flattened.minimumShouldMatch(original.minimumShouldMatch()); + flattened.adjustPureNegative(original.adjustPureNegative()); + + flattenClauses(original.must(), flattened, ClauseType.MUST); + flattenClauses(original.filter(), flattened, ClauseType.FILTER); + flattenClauses(original.should(), flattened, ClauseType.SHOULD); + flattenClauses(original.mustNot(), flattened, ClauseType.MUST_NOT); + + return flattened; + } + + private void flattenClauses(List clauses, BoolQueryBuilder target, ClauseType clauseType) { + for (QueryBuilder clause : clauses) { + if (clause instanceof BoolQueryBuilder) { + BoolQueryBuilder nestedBool = (BoolQueryBuilder) clause; + + if (canFlatten(nestedBool, clauseType)) { + // Flatten the nested bool query by extracting its clauses + List nestedClauses = getClausesForType(nestedBool, clauseType); + for (QueryBuilder nestedClause : nestedClauses) { + // Recursively flatten if needed + if (nestedClause instanceof BoolQueryBuilder) { + nestedClause = flattenBoolQuery((BoolQueryBuilder) nestedClause); + } + addClauseBasedOnType(target, nestedClause, clauseType); + } + } else { + // Can't flatten this bool, but recursively flatten its contents + BoolQueryBuilder flattenedNested = flattenBoolQuery(nestedBool); + addClauseBasedOnType(target, flattenedNested, clauseType); + } + } else { + // Non-boolean clause, add as-is + addClauseBasedOnType(target, clause, clauseType); + } + } + } + + private boolean canFlatten(BoolQueryBuilder nestedBool, ClauseType parentType) { + // Can only flatten if: + // 1. The nested bool has the same properties as default (boost=1, no queryName, etc.) + // 2. The nested bool only has clauses of the same type as the parent + + if (nestedBool.boost() != 1.0f || nestedBool.queryName() != null) { + return false; + } + + // Check if only has clauses matching parent type + switch (parentType) { + case MUST: + return !nestedBool.must().isEmpty() + && nestedBool.filter().isEmpty() + && nestedBool.should().isEmpty() + && nestedBool.mustNot().isEmpty(); + case FILTER: + return nestedBool.must().isEmpty() + && !nestedBool.filter().isEmpty() + && nestedBool.should().isEmpty() + && nestedBool.mustNot().isEmpty(); + case SHOULD: + return nestedBool.must().isEmpty() + && nestedBool.filter().isEmpty() + && !nestedBool.should().isEmpty() + && nestedBool.mustNot().isEmpty() + && nestedBool.minimumShouldMatch() == null; + case MUST_NOT: + return nestedBool.must().isEmpty() + && nestedBool.filter().isEmpty() + && nestedBool.should().isEmpty() + && !nestedBool.mustNot().isEmpty(); + default: + return false; + } + } + + private List getClausesForType(BoolQueryBuilder bool, ClauseType type) { + switch (type) { + case MUST: + return bool.must(); + case FILTER: + return bool.filter(); + case SHOULD: + return bool.should(); + case MUST_NOT: + return bool.mustNot(); + default: + return new ArrayList<>(); + } + } + + private void addClauseBasedOnType(BoolQueryBuilder target, QueryBuilder clause, ClauseType type) { + switch (type) { + case MUST: + target.must(clause); + break; + case FILTER: + target.filter(clause); + break; + case SHOULD: + target.should(clause); + break; + case MUST_NOT: + target.mustNot(clause); + break; + } + } + + @Override + public int priority() { + return 100; + } + + @Override + public String name() { + return "boolean_flattening"; + } + + private enum ClauseType { + MUST, + FILTER, + SHOULD, + MUST_NOT + } +} diff --git a/server/src/main/java/org/opensearch/search/query/rewriters/MatchAllRemovalRewriter.java b/server/src/main/java/org/opensearch/search/query/rewriters/MatchAllRemovalRewriter.java new file mode 100644 index 0000000000000..39d2257e483bc --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/rewriters/MatchAllRemovalRewriter.java @@ -0,0 +1,239 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query.rewriters; + +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.query.QueryRewriter; + +import java.util.List; + +/** + * Removes unnecessary match_all queries from boolean contexts where they have no effect. + * + * @opensearch.internal + */ +public class MatchAllRemovalRewriter implements QueryRewriter { + + public static final MatchAllRemovalRewriter INSTANCE = new MatchAllRemovalRewriter(); + + private MatchAllRemovalRewriter() { + // Singleton + } + + @Override + public QueryBuilder rewrite(QueryBuilder query, QueryShardContext context) { + if (query instanceof BoolQueryBuilder) { + return rewriteBoolQuery((BoolQueryBuilder) query); + } + return query; + } + + private QueryBuilder rewriteBoolQuery(BoolQueryBuilder original) { + // Special case: bool query with only match_all queries and no should/mustNot + if (original.should().isEmpty() && original.mustNot().isEmpty()) { + boolean onlyMatchAll = true; + int matchAllCount = 0; + int matchAllInMust = 0; + + for (QueryBuilder q : original.must()) { + if (q instanceof MatchAllQueryBuilder) { + matchAllCount++; + matchAllInMust++; + } else { + // Don't treat constant score queries or any other queries as match_all + onlyMatchAll = false; + break; + } + } + + if (onlyMatchAll) { + for (QueryBuilder q : original.filter()) { + if (q instanceof MatchAllQueryBuilder) { + matchAllCount++; + } else { + onlyMatchAll = false; + break; + } + } + } + + // Only convert to single match_all if there are no must clauses + // (to preserve scoring) or if there's only one match_all total + if (onlyMatchAll && matchAllCount > 0 && (matchAllInMust == 0 || matchAllCount == 1)) { + // Convert to single match_all, preserving boost + MatchAllQueryBuilder matchAll = new MatchAllQueryBuilder(); + if (original.boost() != 1.0f) { + matchAll.boost(original.boost()); + } + return matchAll; + } + } + + // Check if we need rewriting + boolean needsRewrite = shouldRewrite(original); + + if (!needsRewrite) { + return original; + } + + // Clone the query structure + BoolQueryBuilder rewritten = new BoolQueryBuilder(); + rewritten.boost(original.boost()); + rewritten.queryName(original.queryName()); + rewritten.minimumShouldMatch(original.minimumShouldMatch()); + rewritten.adjustPureNegative(original.adjustPureNegative()); + + // Process each clause type with different match_all removal logic: + // - must: Remove match_all only if other queries exist (preserves scoring semantics) + // - filter: Always remove match_all (it's redundant in non-scoring context) + // - should: Keep match_all (changes OR semantics if removed) + // - mustNot: Keep match_all (excluding all docs is meaningful) + processClausesWithContext(original.must(), rewritten::must, true, original, true); + processClauses(original.filter(), rewritten::filter, true, original); + processClauses(original.should(), rewritten::should, false, original); + processClauses(original.mustNot(), rewritten::mustNot, false, original); + + return rewritten; + } + + private boolean shouldRewrite(BoolQueryBuilder bool) { + // Check if any must/filter has match_all + if (hasMatchAll(bool.must()) || hasMatchAll(bool.filter())) { + return true; + } + + // Check nested bool queries + return hasNestedBoolThatNeedsRewrite(bool); + } + + private boolean hasMatchAll(List clauses) { + for (QueryBuilder q : clauses) { + if (q instanceof MatchAllQueryBuilder) { + return true; + } + } + return false; + } + + private boolean hasNestedBoolThatNeedsRewrite(BoolQueryBuilder bool) { + for (QueryBuilder q : bool.must()) { + if (q instanceof BoolQueryBuilder && shouldRewrite((BoolQueryBuilder) q)) { + return true; + } + } + for (QueryBuilder q : bool.filter()) { + if (q instanceof BoolQueryBuilder && shouldRewrite((BoolQueryBuilder) q)) { + return true; + } + } + for (QueryBuilder q : bool.should()) { + if (q instanceof BoolQueryBuilder && shouldRewrite((BoolQueryBuilder) q)) { + return true; + } + } + for (QueryBuilder q : bool.mustNot()) { + if (q instanceof BoolQueryBuilder && shouldRewrite((BoolQueryBuilder) q)) { + return true; + } + } + return false; + } + + private void processClausesWithContext( + List clauses, + ClauseAdder adder, + boolean removeMatchAll, + BoolQueryBuilder original, + boolean isMustClause + ) { + if (!removeMatchAll) { + processClauses(clauses, adder, false, original); + return; + } + + // For must clauses, only remove match_all if there are other non-match_all queries + if (isMustClause) { + boolean hasNonMatchAll = clauses.stream().anyMatch(q -> !(q instanceof MatchAllQueryBuilder)); + + // Also check if we're in a scoring context (no filter/should/mustNot clauses) + boolean isScoringContext = original.filter().isEmpty() && original.should().isEmpty() && original.mustNot().isEmpty(); + + if (!hasNonMatchAll || isScoringContext) { + // All queries are match_all or we're in a scoring context, don't remove any to preserve scoring + processClauses(clauses, adder, false, original); + return; + } + } + + // Otherwise, use normal processing + processClauses(clauses, adder, removeMatchAll, original); + } + + private void processClauses(List clauses, ClauseAdder adder, boolean removeMatchAll, BoolQueryBuilder original) { + if (!removeMatchAll) { + // For should/mustNot, don't remove match_all + for (QueryBuilder clause : clauses) { + if (clause instanceof BoolQueryBuilder) { + adder.addClause(rewriteBoolQuery((BoolQueryBuilder) clause)); + } else { + adder.addClause(clause); + } + } + return; + } + + // For must/filter, remove match_all if: + // 1. There are other non-match_all clauses in the same list OR + // 2. There are clauses in other lists (must, filter, should, mustNot) + boolean hasOtherClauses = hasNonMatchAllInSameList(clauses) || hasClausesInOtherLists(original); + + for (QueryBuilder clause : clauses) { + if (clause instanceof BoolQueryBuilder) { + adder.addClause(rewriteBoolQuery((BoolQueryBuilder) clause)); + } else if (clause instanceof MatchAllQueryBuilder && hasOtherClauses) { + // Skip match_all + continue; + } else { + adder.addClause(clause); + } + } + } + + private boolean hasNonMatchAllInSameList(List clauses) { + for (QueryBuilder q : clauses) { + if (!(q instanceof MatchAllQueryBuilder)) { + return true; + } + } + return false; + } + + private boolean hasClausesInOtherLists(BoolQueryBuilder bool) { + // Check if there are any clauses in any list + return !bool.must().isEmpty() || !bool.filter().isEmpty() || !bool.should().isEmpty() || !bool.mustNot().isEmpty(); + } + + @Override + public int priority() { + return 300; + } + + @Override + public String name() { + return "match_all_removal"; + } + + @FunctionalInterface + private interface ClauseAdder { + void addClause(QueryBuilder clause); + } +} diff --git a/server/src/main/java/org/opensearch/search/query/rewriters/MustNotToShouldRewriter.java b/server/src/main/java/org/opensearch/search/query/rewriters/MustNotToShouldRewriter.java new file mode 100644 index 0000000000000..ddcbea48b4b12 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/rewriters/MustNotToShouldRewriter.java @@ -0,0 +1,251 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query.rewriters; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PointValues; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.ComplementAwareQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.WithFieldName; +import org.opensearch.search.query.QueryRewriter; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Rewrites must_not clauses to should clauses when possible. + * This improves performance by transforming negative queries into positive ones. + * + * For example: + *
+ * {"bool": {"must_not": [{"range": {"age": {"gte": 18, "lte": 65}}}]}}
+ * 
+ * becomes: + *
+ * {"bool": {"must": [{"bool": {"should": [
+ *   {"range": {"age": {"lt": 18}}},
+ *   {"range": {"age": {"gt": 65}}}
+ * ], "minimum_should_match": 1}}]}}
+ * 
+ * + * This optimization applies to: + * - RangeQueryBuilder + * - TermQueryBuilder (on numeric fields) + * - TermsQueryBuilder (on numeric fields) + * - MatchQueryBuilder (on numeric fields) + * + * @opensearch.internal + */ +public class MustNotToShouldRewriter implements QueryRewriter { + + public static final MustNotToShouldRewriter INSTANCE = new MustNotToShouldRewriter(); + + private MustNotToShouldRewriter() { + // Singleton + } + + @Override + public QueryBuilder rewrite(QueryBuilder query, QueryShardContext context) { + if (!(query instanceof BoolQueryBuilder)) { + return query; + } + + BoolQueryBuilder boolQuery = (BoolQueryBuilder) query; + + // We need LeafReaderContexts to verify single-valued fields (only for must_not rewriting) + List leafReaderContexts = null; + List mustNotClausesToRewrite = new ArrayList<>(); + + // Only process must_not clauses if they exist + if (!boolQuery.mustNot().isEmpty()) { + leafReaderContexts = getLeafReaderContexts(context); + if (leafReaderContexts != null && !leafReaderContexts.isEmpty()) { + Map fieldCounts = new HashMap<>(); + + // Find complement-aware queries that can be rewritten + for (QueryBuilder clause : boolQuery.mustNot()) { + if (clause instanceof ComplementAwareQueryBuilder && clause instanceof WithFieldName) { + WithFieldName wfn = (WithFieldName) clause; + fieldCounts.merge(wfn.fieldName(), 1, Integer::sum); + } + } + + // For now, only handle the case where there's exactly 1 complement-aware query per field + for (QueryBuilder clause : boolQuery.mustNot()) { + if (clause instanceof ComplementAwareQueryBuilder && clause instanceof WithFieldName) { + WithFieldName wfn = (WithFieldName) clause; + String fieldName = wfn.fieldName(); + + if (fieldCounts.getOrDefault(fieldName, 0) == 1) { + // Check that all docs on this field have exactly 1 value + if (checkAllDocsHaveOneValue(leafReaderContexts, fieldName)) { + mustNotClausesToRewrite.add(clause); + } + } + } + } + } + } + + // Create a new BoolQueryBuilder with rewritten clauses + BoolQueryBuilder rewritten = new BoolQueryBuilder(); + + // Copy all properties + rewritten.boost(boolQuery.boost()); + rewritten.queryName(boolQuery.queryName()); + rewritten.minimumShouldMatch(boolQuery.minimumShouldMatch()); + rewritten.adjustPureNegative(boolQuery.adjustPureNegative()); + + // Copy must clauses (rewrite nested queries first) + for (QueryBuilder mustClause : boolQuery.must()) { + rewritten.must(rewrite(mustClause, context)); + } + + // Copy filter clauses (rewrite nested queries first) + for (QueryBuilder filterClause : boolQuery.filter()) { + rewritten.filter(rewrite(filterClause, context)); + } + + // Copy should clauses (rewrite nested queries first) + for (QueryBuilder shouldClause : boolQuery.should()) { + rewritten.should(rewrite(shouldClause, context)); + } + + // Process must_not clauses + boolean changed = false; + for (QueryBuilder mustNotClause : boolQuery.mustNot()) { + if (mustNotClausesToRewrite.contains(mustNotClause)) { + // Rewrite this clause + ComplementAwareQueryBuilder caq = (ComplementAwareQueryBuilder) mustNotClause; + List complement = caq.getComplement(context); + + if (complement != null && !complement.isEmpty()) { + BoolQueryBuilder nestedBoolQuery = new BoolQueryBuilder(); + nestedBoolQuery.minimumShouldMatch(1); + for (QueryBuilder complementComponent : complement) { + nestedBoolQuery.should(complementComponent); + } + rewritten.must(nestedBoolQuery); + changed = true; + } else { + // If complement couldn't be determined, keep original + rewritten.mustNot(mustNotClause); + } + } else { + // Keep clauses we're not rewriting + rewritten.mustNot(rewrite(mustNotClause, context)); + } + } + + // Handle minimumShouldMatch adjustment + if (changed && boolQuery.minimumShouldMatch() == null) { + if (!boolQuery.should().isEmpty() && boolQuery.must().isEmpty() && boolQuery.filter().isEmpty()) { + // If there were originally should clauses and no must/filter clauses, + // null minimumShouldMatch defaults to 1 in Lucene. + // But if there was originally a must or filter clause, the default is 0. + // If we added a must clause due to this rewrite, we should respect the original default. + rewritten.minimumShouldMatch(1); + } + } + + // Check if any nested queries were rewritten + boolean nestedQueriesChanged = false; + for (QueryBuilder mustClause : boolQuery.must()) { + if (mustClause instanceof BoolQueryBuilder && rewritten.must().contains(mustClause) == false) { + nestedQueriesChanged = true; + break; + } + } + if (!nestedQueriesChanged) { + for (QueryBuilder filterClause : boolQuery.filter()) { + if (filterClause instanceof BoolQueryBuilder && rewritten.filter().contains(filterClause) == false) { + nestedQueriesChanged = true; + break; + } + } + } + if (!nestedQueriesChanged) { + for (QueryBuilder shouldClause : boolQuery.should()) { + if (shouldClause instanceof BoolQueryBuilder && rewritten.should().contains(shouldClause) == false) { + nestedQueriesChanged = true; + break; + } + } + } + if (!nestedQueriesChanged) { + for (QueryBuilder mustNotClause : boolQuery.mustNot()) { + if (mustNotClause instanceof BoolQueryBuilder && rewritten.mustNot().contains(mustNotClause) == false) { + nestedQueriesChanged = true; + break; + } + } + } + + return (changed || nestedQueriesChanged) ? rewritten : query; + } + + private List getLeafReaderContexts(QueryShardContext context) { + if (context == null) { + return null; + } + try { + return context.getIndexReader().leaves(); + } catch (Exception e) { + return null; + } + } + + private boolean checkAllDocsHaveOneValue(List leafReaderContexts, String fieldName) { + try { + for (LeafReaderContext leafReaderContext : leafReaderContexts) { + PointValues pointValues = leafReaderContext.reader().getPointValues(fieldName); + if (pointValues != null) { + int docCount = pointValues.getDocCount(); + long valueCount = pointValues.size(); + // Check if all documents have exactly one value + if (docCount != valueCount) { + return false; + } + // Also check if all documents in the segment have a value for this field + // If some documents are missing the field, we can't do the optimization + // because the semantics change (missing values won't match positive queries) + int maxDoc = leafReaderContext.reader().maxDoc(); + if (docCount != maxDoc) { + return false; + } + } else { + // If there are no point values but there are documents, some docs are missing the field + if (leafReaderContext.reader().maxDoc() > 0) { + return false; + } + } + } + return true; + } catch (IOException e) { + return false; + } + } + + @Override + public int priority() { + // Run after boolean flattening (100) and must-to-filter (150) + // but before terms merging (200) + return 175; + } + + @Override + public String name() { + return "must_not_to_should"; + } +} diff --git a/server/src/main/java/org/opensearch/search/query/rewriters/MustToFilterRewriter.java b/server/src/main/java/org/opensearch/search/query/rewriters/MustToFilterRewriter.java new file mode 100644 index 0000000000000..4960302d0c812 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/rewriters/MustToFilterRewriter.java @@ -0,0 +1,177 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query.rewriters; + +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.GeoBoundingBoxQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.index.query.WithFieldName; +import org.opensearch.search.query.QueryRewriter; + +import java.util.ArrayList; +import java.util.List; + +/** + * Rewrites must clauses to filter clauses when they don't affect scoring. + * This improves performance by avoiding unnecessary scoring calculations. + * + * For example: + *
+ * {"bool": {"must": [
+ *   {"range": {"date": {"gte": "2024-01-01"}}},
+ *   {"term": {"status": "active"}}
+ * ]}}
+ * 
+ * becomes: + *
+ * {"bool": {
+ *   "filter": [{"range": {"date": {"gte": "2024-01-01"}}}],
+ *   "must": [{"term": {"status": "active"}}]
+ * }}
+ * 
+ * + * @opensearch.internal + */ +public class MustToFilterRewriter implements QueryRewriter { + + public static final MustToFilterRewriter INSTANCE = new MustToFilterRewriter(); + + private MustToFilterRewriter() { + // Singleton + } + + @Override + public QueryBuilder rewrite(QueryBuilder query, QueryShardContext context) { + if (!(query instanceof BoolQueryBuilder)) { + return query; + } + + BoolQueryBuilder boolQuery = (BoolQueryBuilder) query; + + // If there are no must clauses, nothing to rewrite + if (boolQuery.must().isEmpty()) { + return query; + } + + // First, rewrite all clauses recursively + List rewrittenMustClauses = new ArrayList<>(); + List mustClausesToMove = new ArrayList<>(); + + for (QueryBuilder clause : boolQuery.must()) { + QueryBuilder rewrittenClause = rewriteIfNeeded(clause, context); + rewrittenMustClauses.add(rewrittenClause); + + if (isClauseIrrelevantToScoring(rewrittenClause, context)) { + mustClausesToMove.add(rewrittenClause); + } + } + + // Check if anything changed - either clauses to move or nested rewrites + boolean hasChanges = !mustClausesToMove.isEmpty(); + for (int i = 0; i < boolQuery.must().size(); i++) { + if (boolQuery.must().get(i) != rewrittenMustClauses.get(i)) { + hasChanges = true; + break; + } + } + + if (!hasChanges) { + return query; + } + + // Create a new BoolQueryBuilder with moved clauses + BoolQueryBuilder rewritten = new BoolQueryBuilder(); + + // Copy all properties + rewritten.boost(boolQuery.boost()); + rewritten.queryName(boolQuery.queryName()); + rewritten.minimumShouldMatch(boolQuery.minimumShouldMatch()); + rewritten.adjustPureNegative(boolQuery.adjustPureNegative()); + + // Copy must clauses except the ones we're moving + for (QueryBuilder rewrittenClause : rewrittenMustClauses) { + if (!mustClausesToMove.contains(rewrittenClause)) { + rewritten.must(rewrittenClause); + } + } + + // Add the moved clauses to filter + for (QueryBuilder movedClause : mustClausesToMove) { + rewritten.filter(movedClause); + } + + // Copy existing filter clauses + for (QueryBuilder filterClause : boolQuery.filter()) { + rewritten.filter(rewriteIfNeeded(filterClause, context)); + } + + // Copy should and mustNot clauses + for (QueryBuilder shouldClause : boolQuery.should()) { + rewritten.should(rewriteIfNeeded(shouldClause, context)); + } + for (QueryBuilder mustNotClause : boolQuery.mustNot()) { + rewritten.mustNot(rewriteIfNeeded(mustNotClause, context)); + } + + return rewritten; + } + + private QueryBuilder rewriteIfNeeded(QueryBuilder query, QueryShardContext context) { + // Recursively rewrite nested boolean queries + if (query instanceof BoolQueryBuilder) { + return rewrite(query, context); + } + return query; + } + + private boolean isClauseIrrelevantToScoring(QueryBuilder clause, QueryShardContext context) { + // This is an incomplete list of clauses this might apply for; it can be expanded in future. + + // If a clause is purely numeric, for example a date range, its score is unimportant as + // it'll be the same for all returned docs + if (clause instanceof RangeQueryBuilder) return true; + if (clause instanceof GeoBoundingBoxQueryBuilder) return true; + + // Further optimizations depend on knowing whether the field is numeric. + // Skip moving these clauses if we don't have the shard context. + if (context == null) return false; + + if (!(clause instanceof WithFieldName)) return false; + + WithFieldName wfn = (WithFieldName) clause; + MappedFieldType fieldType = context.fieldMapper(wfn.fieldName()); + + if (!(fieldType instanceof NumberFieldMapper.NumberFieldType)) return false; + + // Numeric field queries have constant scores + if (clause instanceof MatchQueryBuilder) return true; + if (clause instanceof TermQueryBuilder) return true; + if (clause instanceof TermsQueryBuilder) return true; + + return false; + } + + @Override + public int priority() { + // Run after boolean flattening (100) but before terms merging (200) + return 150; + } + + @Override + public String name() { + return "must_to_filter"; + } +} diff --git a/server/src/main/java/org/opensearch/search/query/rewriters/TermsMergingRewriter.java b/server/src/main/java/org/opensearch/search/query/rewriters/TermsMergingRewriter.java new file mode 100644 index 0000000000000..9c4d863602091 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/rewriters/TermsMergingRewriter.java @@ -0,0 +1,314 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query.rewriters; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.search.SearchService; +import org.opensearch.search.query.QueryRewriter; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Rewrites multiple term queries on the same field into a single terms query. + * For example: + *
+ * {"bool": {"filter": [
+ *   {"term": {"status": "active"}},
+ *   {"term": {"status": "pending"}},
+ *   {"term": {"category": "A"}}
+ * ]}}
+ * 
+ * becomes: + *
+ * {"bool": {"filter": [
+ *   {"terms": {"status": ["active", "pending"]}},
+ *   {"term": {"category": "A"}}
+ * ]}}
+ * 
+ * + * Note: Terms are only merged when there are enough terms to benefit from + * the terms query's bit set optimization (default threshold: 16 terms). + * This avoids performance regressions for small numbers of terms where + * individual term queries may perform better. + * + * @opensearch.internal + */ +public class TermsMergingRewriter implements QueryRewriter { + + public static final TermsMergingRewriter INSTANCE = new TermsMergingRewriter(); + + /** + * Default minimum number of terms to merge. Below this threshold, individual + * term queries may perform better than a terms query. + * Based on Lucene's TermInSetQuery optimization characteristics. + */ + private static final int DEFAULT_MINIMUM_TERMS_TO_MERGE = 16; + + /** + * The minimum number of terms to merge. + */ + private volatile int minimumTermsToMerge = DEFAULT_MINIMUM_TERMS_TO_MERGE; + + /** + * Creates a new rewriter. + */ + private TermsMergingRewriter() { + // Singleton + } + + /** + * Initialize this rewriter with cluster settings. + * This registers an update consumer to keep the threshold in sync with the cluster setting. + * + * @param settings Initial settings + * @param clusterSettings Cluster settings to register update consumer + */ + public void initialize(Settings settings, ClusterSettings clusterSettings) { + this.minimumTermsToMerge = SearchService.QUERY_REWRITING_TERMS_THRESHOLD_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + SearchService.QUERY_REWRITING_TERMS_THRESHOLD_SETTING, + threshold -> this.minimumTermsToMerge = threshold + ); + } + + @Override + public QueryBuilder rewrite(QueryBuilder query, QueryShardContext context) { + if (!(query instanceof BoolQueryBuilder)) { + return query; + } + + BoolQueryBuilder boolQuery = (BoolQueryBuilder) query; + + // First check if merging is needed + if (!needsMerging(boolQuery)) { + return query; + } + + BoolQueryBuilder rewritten = new BoolQueryBuilder(); + + rewritten.boost(boolQuery.boost()); + rewritten.queryName(boolQuery.queryName()); + rewritten.minimumShouldMatch(boolQuery.minimumShouldMatch()); + rewritten.adjustPureNegative(boolQuery.adjustPureNegative()); + + // Only merge terms in contexts where it's semantically safe + rewriteClausesNoMerge(boolQuery.must(), rewritten::must); // Don't merge in must + rewriteClauses(boolQuery.filter(), rewritten::filter); // Safe to merge + rewriteClauses(boolQuery.should(), rewritten::should); // Safe to merge + rewriteClausesNoMerge(boolQuery.mustNot(), rewritten::mustNot); // Don't merge in mustNot + + return rewritten; + } + + private boolean needsMerging(BoolQueryBuilder boolQuery) { + // Check filter and should clauses for mergeable terms + if (hasMergeableTerms(boolQuery.filter()) || hasMergeableTerms(boolQuery.should())) { + return true; + } + + // Check nested bool queries + return hasNestedBoolThatNeedsMerging(boolQuery); + } + + private boolean hasMergeableTerms(List clauses) { + Map> fieldBoosts = new HashMap<>(); + + for (QueryBuilder clause : clauses) { + if (clause instanceof TermQueryBuilder) { + TermQueryBuilder termQuery = (TermQueryBuilder) clause; + String field = termQuery.fieldName(); + float boost = termQuery.boost(); + + fieldBoosts.computeIfAbsent(field, k -> new ArrayList<>()).add(boost); + + List boosts = fieldBoosts.get(field); + if (boosts.size() >= minimumTermsToMerge) { + // Check if all boosts are the same + float firstBoost = boosts.get(0); + boolean sameBoost = boosts.stream().allMatch(b -> b == firstBoost); + if (sameBoost) { + return true; + } + } + } else if (clause instanceof TermsQueryBuilder) { + // Check if there are enough term queries that can be merged with this terms query + TermsQueryBuilder termsQuery = (TermsQueryBuilder) clause; + String field = termsQuery.fieldName(); + int additionalTerms = 0; + + for (QueryBuilder other : clauses) { + if (other != clause && other instanceof TermQueryBuilder) { + TermQueryBuilder termQuery = (TermQueryBuilder) other; + if (field.equals(termQuery.fieldName()) && termsQuery.boost() == termQuery.boost()) { + additionalTerms++; + } + } + } + + // Only worth merging if the combined size would meet the threshold + if (termsQuery.values().size() + additionalTerms >= minimumTermsToMerge) { + return true; + } + } + } + + return false; + } + + private boolean hasNestedBoolThatNeedsMerging(BoolQueryBuilder boolQuery) { + for (QueryBuilder clause : boolQuery.must()) { + if (clause instanceof BoolQueryBuilder && needsMerging((BoolQueryBuilder) clause)) { + return true; + } + } + for (QueryBuilder clause : boolQuery.filter()) { + if (clause instanceof BoolQueryBuilder && needsMerging((BoolQueryBuilder) clause)) { + return true; + } + } + for (QueryBuilder clause : boolQuery.should()) { + if (clause instanceof BoolQueryBuilder && needsMerging((BoolQueryBuilder) clause)) { + return true; + } + } + for (QueryBuilder clause : boolQuery.mustNot()) { + if (clause instanceof BoolQueryBuilder && needsMerging((BoolQueryBuilder) clause)) { + return true; + } + } + return false; + } + + private void rewriteClauses(List clauses, ClauseAdder adder) { + Map termsMap = new HashMap<>(); + List nonTermClauses = new ArrayList<>(); + + // Group term queries by field + for (QueryBuilder clause : clauses) { + if (clause instanceof TermQueryBuilder) { + TermQueryBuilder termQuery = (TermQueryBuilder) clause; + String field = termQuery.fieldName(); + float boost = termQuery.boost(); + + TermsInfo info = termsMap.get(field); + if (info != null && info.boost != boost) { + // Different boost, can't merge - add as single term + nonTermClauses.add(clause); + } else { + termsMap.computeIfAbsent(field, k -> new TermsInfo(boost)).addValue(termQuery.value()); + } + } else if (clause instanceof TermsQueryBuilder) { + // Existing terms query - add to it + TermsQueryBuilder termsQuery = (TermsQueryBuilder) clause; + String field = termsQuery.fieldName(); + float boost = termsQuery.boost(); + + TermsInfo info = termsMap.get(field); + if (info != null && info.boost != boost) { + // Different boost, can't merge + nonTermClauses.add(clause); + } else { + info = termsMap.computeIfAbsent(field, k -> new TermsInfo(boost)); + for (Object value : termsQuery.values()) { + info.addValue(value); + } + } + } else if (clause instanceof BoolQueryBuilder) { + // Recursively rewrite nested bool queries + nonTermClauses.add(rewrite(clause, null)); + } else { + nonTermClauses.add(clause); + } + } + + // Create terms queries for fields with multiple values + for (Map.Entry entry : termsMap.entrySet()) { + String field = entry.getKey(); + TermsInfo info = entry.getValue(); + + if (info.values.size() == 1) { + // Single value, keep as term query + TermQueryBuilder termQuery = new TermQueryBuilder(field, info.values.get(0)); + if (info.boost != 1.0f) { + termQuery.boost(info.boost); + } + adder.addClause(termQuery); + } else if (info.values.size() >= minimumTermsToMerge) { + // Many values, merge into terms query for better performance + TermsQueryBuilder termsQuery = new TermsQueryBuilder(field, info.values); + if (info.boost != 1.0f) { + termsQuery.boost(info.boost); + } + adder.addClause(termsQuery); + } else { + // Few values, keep as individual term queries for better performance + for (Object value : info.values) { + TermQueryBuilder termQuery = new TermQueryBuilder(field, value); + if (info.boost != 1.0f) { + termQuery.boost(info.boost); + } + adder.addClause(termQuery); + } + } + } + + // Add non-term clauses + for (QueryBuilder clause : nonTermClauses) { + adder.addClause(clause); + } + } + + private void rewriteClausesNoMerge(List clauses, ClauseAdder adder) { + for (QueryBuilder clause : clauses) { + if (clause instanceof BoolQueryBuilder) { + // Recursively rewrite nested bool queries + adder.addClause(rewrite(clause, null)); + } else { + adder.addClause(clause); + } + } + } + + @Override + public int priority() { + return 200; + } + + @Override + public String name() { + return "terms_merging"; + } + + @FunctionalInterface + private interface ClauseAdder { + void addClause(QueryBuilder clause); + } + + private static class TermsInfo { + final float boost; + final List values = new ArrayList<>(); + + TermsInfo(float boost) { + this.boost = boost; + } + + void addValue(Object value) { + values.add(value); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/query/rewriters/package-info.java b/server/src/main/java/org/opensearch/search/query/rewriters/package-info.java new file mode 100644 index 0000000000000..167d97e097185 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/rewriters/package-info.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Query rewriting optimizations for improving search performance. + * + *

This package contains various query rewriters that transform queries + * into more efficient forms while maintaining semantic equivalence. + * + *

The rewriters include: + *

    + *
  • {@link org.opensearch.search.query.rewriters.BooleanFlatteningRewriter} - + * Flattens nested boolean queries with single clauses
  • + *
  • {@link org.opensearch.search.query.rewriters.MatchAllRemovalRewriter} - + * Removes redundant match_all queries from boolean clauses
  • + *
  • {@link org.opensearch.search.query.rewriters.TermsMergingRewriter} - + * Merges multiple term queries on the same field into a single terms query
  • + *
  • {@link org.opensearch.search.query.rewriters.MustNotToShouldRewriter} - + * Transforms must_not queries to should queries for better performance
  • + *
  • {@link org.opensearch.search.query.rewriters.MustToFilterRewriter} - + * Moves scoring-irrelevant queries from must to filter clauses
  • + *
+ * + * @opensearch.internal + */ +package org.opensearch.search.query.rewriters; diff --git a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java index cb83b8e1986b9..85e1d0f00c661 100644 --- a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java @@ -73,7 +73,6 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class BoolQueryBuilderTests extends AbstractQueryTestCase { @Override @@ -517,63 +516,6 @@ public void testVisit() { } - public void testOneMustNotRangeRewritten() throws Exception { - int from = 10; - int to = 20; - Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new StandardAnalyzer())); - addDocument(w, INT_FIELD_NAME, 1); - DirectoryReader reader = DirectoryReader.open(w); - IndexSearcher searcher = getIndexSearcher(reader); - - for (boolean includeLower : new boolean[] { true, false }) { - for (boolean includeUpper : new boolean[] { true, false }) { - BoolQueryBuilder qb = new BoolQueryBuilder(); - QueryBuilder rq = getRangeQueryBuilder(INT_FIELD_NAME, from, to, includeLower, includeUpper); - qb.mustNot(rq); - - BoolQueryBuilder rewritten = (BoolQueryBuilder) Rewriteable.rewrite(qb, createShardContext(searcher)); - assertFalse(rewritten.mustNot().contains(rq)); - - QueryBuilder expectedLowerQuery = getRangeQueryBuilder(INT_FIELD_NAME, null, from, false, !includeLower); - QueryBuilder expectedUpperQuery = getRangeQueryBuilder(INT_FIELD_NAME, to, null, !includeUpper, true); - assertEquals(1, rewritten.must().size()); - - BoolQueryBuilder nestedBoolQuery = (BoolQueryBuilder) rewritten.must().get(0); - assertEquals(2, nestedBoolQuery.should().size()); - assertEquals("1", nestedBoolQuery.minimumShouldMatch()); - assertTrue(nestedBoolQuery.should().contains(expectedLowerQuery)); - assertTrue(nestedBoolQuery.should().contains(expectedUpperQuery)); - } - } - IOUtils.close(w, reader, dir); - } - - public void testOneSingleEndedMustNotRangeRewritten() throws Exception { - // Test a must_not range query with only one endpoint is rewritten correctly - int from = 10; - Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new StandardAnalyzer())); - addDocument(w, INT_FIELD_NAME, 1); - DirectoryReader reader = DirectoryReader.open(w); - IndexSearcher searcher = getIndexSearcher(reader); - - BoolQueryBuilder qb = new BoolQueryBuilder(); - QueryBuilder rq = getRangeQueryBuilder(INT_FIELD_NAME, from, null, false, false); - qb.mustNot(rq); - BoolQueryBuilder rewritten = (BoolQueryBuilder) Rewriteable.rewrite(qb, createShardContext(searcher)); - assertFalse(rewritten.mustNot().contains(rq)); - - QueryBuilder expectedQuery = getRangeQueryBuilder(INT_FIELD_NAME, null, from, false, true); - assertEquals(1, rewritten.must().size()); - BoolQueryBuilder nestedBoolQuery = (BoolQueryBuilder) rewritten.must().get(0); - assertEquals(1, nestedBoolQuery.should().size()); - assertTrue(nestedBoolQuery.should().contains(expectedQuery)); - assertEquals("1", nestedBoolQuery.minimumShouldMatch()); - - IOUtils.close(w, reader, dir); - } - public void testMultipleComplementAwareOnSameFieldNotRewritten() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new StandardAnalyzer())); @@ -641,100 +583,6 @@ public void testMustNotRewriteDisabledWithoutExactlyOneValuePerDoc() throws Exce IOUtils.close(w, reader, dir); } - public void testOneMustNotNumericMatchQueryRewritten() throws Exception { - Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new StandardAnalyzer())); - addDocument(w, INT_FIELD_NAME, 1); - DirectoryReader reader = DirectoryReader.open(w); - IndexSearcher searcher = getIndexSearcher(reader); - - BoolQueryBuilder qb = new BoolQueryBuilder(); - int excludedValue = 200; - QueryBuilder matchQuery = new MatchQueryBuilder(INT_FIELD_NAME, excludedValue); - qb.mustNot(matchQuery); - - BoolQueryBuilder rewritten = (BoolQueryBuilder) Rewriteable.rewrite(qb, createShardContext(searcher)); - assertFalse(rewritten.mustNot().contains(matchQuery)); - - QueryBuilder expectedLowerQuery = getRangeQueryBuilder(INT_FIELD_NAME, null, excludedValue, true, false); - QueryBuilder expectedUpperQuery = getRangeQueryBuilder(INT_FIELD_NAME, excludedValue, null, false, true); - assertEquals(1, rewritten.must().size()); - - BoolQueryBuilder nestedBoolQuery = (BoolQueryBuilder) rewritten.must().get(0); - assertEquals(2, nestedBoolQuery.should().size()); - assertEquals("1", nestedBoolQuery.minimumShouldMatch()); - assertTrue(nestedBoolQuery.should().contains(expectedLowerQuery)); - assertTrue(nestedBoolQuery.should().contains(expectedUpperQuery)); - - // When the QueryShardContext is null, we should not rewrite any match queries as we can't confirm if they're on numeric fields. - QueryRewriteContext nullContext = mock(QueryRewriteContext.class); - when(nullContext.convertToShardContext()).thenReturn(null); - BoolQueryBuilder rewrittenNoContext = (BoolQueryBuilder) Rewriteable.rewrite(qb, nullContext); - assertTrue(rewrittenNoContext.mustNot().contains(matchQuery)); - assertTrue(rewrittenNoContext.should().isEmpty()); - - IOUtils.close(w, reader, dir); - } - - public void testMustClausesRewritten() throws Exception { - BoolQueryBuilder qb = new BoolQueryBuilder(); - - // Should be moved - QueryBuilder intTermQuery = new TermQueryBuilder(INT_FIELD_NAME, 200); - QueryBuilder rangeQuery = new RangeQueryBuilder(INT_FIELD_NAME).gt(10).lt(20); - // Should be moved to filter clause, the boost applies equally to all matched docs - QueryBuilder rangeQueryWithBoost = new RangeQueryBuilder(DATE_FIELD_NAME).gt(10).lt(20).boost(2); - QueryBuilder intTermsQuery = new TermsQueryBuilder(INT_FIELD_NAME, new int[] { 1, 4, 100 }); - QueryBuilder boundingBoxQuery = new GeoBoundingBoxQueryBuilder(GEO_POINT_FIELD_NAME); - QueryBuilder doubleMatchQuery = new MatchQueryBuilder(DOUBLE_FIELD_NAME, 5.5); - - // Should not be moved - QueryBuilder textTermQuery = new TermQueryBuilder(TEXT_FIELD_NAME, "bar"); - QueryBuilder textTermsQuery = new TermsQueryBuilder(TEXT_FIELD_NAME, "foo", "bar"); - QueryBuilder textMatchQuery = new MatchQueryBuilder(TEXT_FIELD_NAME, "baz"); - - qb.must(intTermQuery); - qb.must(rangeQuery); - qb.must(rangeQueryWithBoost); - qb.must(intTermsQuery); - qb.must(boundingBoxQuery); - qb.must(doubleMatchQuery); - - qb.must(textTermQuery); - qb.must(textTermsQuery); - qb.must(textMatchQuery); - - BoolQueryBuilder rewritten = (BoolQueryBuilder) Rewriteable.rewrite(qb, createShardContext()); - for (QueryBuilder clause : List.of( - intTermQuery, - rangeQuery, - rangeQueryWithBoost, - intTermsQuery, - boundingBoxQuery, - doubleMatchQuery - )) { - assertFalse(rewritten.must().contains(clause)); - assertTrue(rewritten.filter().contains(clause)); - } - for (QueryBuilder clause : List.of(textTermQuery, textTermsQuery, textMatchQuery)) { - assertTrue(rewritten.must().contains(clause)); - assertFalse(rewritten.filter().contains(clause)); - } - - // If we have null QueryShardContext, match/term/terms queries should not be moved as we can't determine if they're numeric. - QueryRewriteContext nullContext = mock(QueryRewriteContext.class); - when(nullContext.convertToShardContext()).thenReturn(null); - rewritten = (BoolQueryBuilder) Rewriteable.rewrite(qb, nullContext); - for (QueryBuilder clause : List.of(rangeQuery, rangeQueryWithBoost, boundingBoxQuery)) { - assertFalse(rewritten.must().contains(clause)); - assertTrue(rewritten.filter().contains(clause)); - } - for (QueryBuilder clause : List.of(textTermQuery, textTermsQuery, textMatchQuery, intTermQuery, intTermsQuery, doubleMatchQuery)) { - assertTrue(rewritten.must().contains(clause)); - assertFalse(rewritten.filter().contains(clause)); - } - } - private QueryBuilder getRangeQueryBuilder(String fieldName, Integer lower, Integer upper, boolean includeLower, boolean includeUpper) { RangeQueryBuilder rq = new RangeQueryBuilder(fieldName); if (lower != null) { diff --git a/server/src/test/java/org/opensearch/search/query/QueryRewriterRegistryTests.java b/server/src/test/java/org/opensearch/search/query/QueryRewriterRegistryTests.java new file mode 100644 index 0000000000000..64cb2ff5efb27 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/query/QueryRewriterRegistryTests.java @@ -0,0 +1,328 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.SearchService; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class QueryRewriterRegistryTests extends OpenSearchTestCase { + + private final QueryShardContext context = mock(QueryShardContext.class); + + @Override + public void setUp() throws Exception { + super.setUp(); + // Initialize registry with default settings + Settings settings = Settings.builder() + .put(SearchService.QUERY_REWRITING_ENABLED_SETTING.getKey(), true) + .put(SearchService.QUERY_REWRITING_TERMS_THRESHOLD_SETTING.getKey(), 16) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + QueryRewriterRegistry.INSTANCE.initialize(settings, clusterSettings); + } + + public void testCompleteRewritingPipeline() { + // Test that all rewriters work together correctly + QueryBuilder nestedBool = QueryBuilders.boolQuery() + .must(QueryBuilders.matchAllQuery()) + .must(QueryBuilders.termQuery("status", "active")) + .must(QueryBuilders.termQuery("status", "pending")); + + QueryBuilder query = QueryBuilders.boolQuery() + .must(nestedBool) + .filter(QueryBuilders.matchAllQuery()) + .filter(QueryBuilders.termQuery("type", "product")) + .filter(QueryBuilders.termQuery("type", "service")); + + QueryBuilder rewritten = QueryRewriterRegistry.INSTANCE.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should have: + // - Flattened nested boolean + // - Terms in must clauses are NOT merged (semantically different) + // - Removed match_all queries + assertThat(rewrittenBool.must().size(), equalTo(2)); // two term queries for status + assertThat(rewrittenBool.must().get(0), instanceOf(TermQueryBuilder.class)); + assertThat(rewrittenBool.must().get(1), instanceOf(TermQueryBuilder.class)); + + assertThat(rewrittenBool.filter().size(), equalTo(2)); // two term queries for type (below threshold) + assertThat(rewrittenBool.filter().get(0), instanceOf(TermQueryBuilder.class)); + assertThat(rewrittenBool.filter().get(1), instanceOf(TermQueryBuilder.class)); + } + + public void testDisabledRewriting() { + // Test disabled rewriting via settings + Settings settings = Settings.builder().put(SearchService.QUERY_REWRITING_ENABLED_SETTING.getKey(), false).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + // Initialize with disabled setting + QueryRewriterRegistry.INSTANCE.initialize(settings, clusterSettings); + + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.matchAllQuery()) + .filter(QueryBuilders.termQuery("field", "value")); + + QueryBuilder rewritten = QueryRewriterRegistry.INSTANCE.rewrite(query, context); + assertSame(query, rewritten); + + // Enable via settings update + clusterSettings.applySettings(Settings.builder().put(SearchService.QUERY_REWRITING_ENABLED_SETTING.getKey(), true).build()); + + // Now it should rewrite + QueryBuilder rewritten2 = QueryRewriterRegistry.INSTANCE.rewrite(query, context); + assertNotSame(query, rewritten2); + } + + public void testNullQuery() { + // Null query should return null + QueryBuilder rewritten = QueryRewriterRegistry.INSTANCE.rewrite(null, context); + assertNull(rewritten); + } + + public void testRewriterPriorityOrder() { + // Test that rewriters are applied in correct order + // Create a query that will be affected by multiple rewriters + QueryBuilder deeplyNested = QueryBuilders.boolQuery() + .must( + QueryBuilders.boolQuery() + .must(QueryBuilders.matchAllQuery()) + .must(QueryBuilders.termQuery("field", "value1")) + .must(QueryBuilders.termQuery("field", "value2")) + ); + + QueryBuilder rewritten = QueryRewriterRegistry.INSTANCE.rewrite(deeplyNested, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should be flattened first, match_all kept in must (scoring context), but terms NOT merged in must context + assertThat(rewrittenBool.must().size(), equalTo(3)); // match_all + 2 term queries + // Check that we have one match_all and two term queries (order may vary) + long matchAllCount = rewrittenBool.must().stream().filter(q -> q instanceof MatchAllQueryBuilder).count(); + long termCount = rewrittenBool.must().stream().filter(q -> q instanceof TermQueryBuilder).count(); + assertThat(matchAllCount, equalTo(1L)); + assertThat(termCount, equalTo(2L)); + } + + public void testComplexRealWorldQuery() { + // Test a complex real-world-like query + QueryBuilder query = QueryBuilders.boolQuery() + .must( + QueryBuilders.boolQuery() + .must(QueryBuilders.matchAllQuery()) + .filter(QueryBuilders.termQuery("category", "electronics")) + .filter(QueryBuilders.termQuery("category", "computers")) + ) + .filter( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("brand", "apple")) + .should(QueryBuilders.termQuery("brand", "dell")) + .should(QueryBuilders.termQuery("brand", "hp")) + ) + .must(QueryBuilders.rangeQuery("price").gte(500).lte(2000)) + .mustNot(QueryBuilders.termQuery("status", "discontinued")); + + QueryBuilder rewritten = QueryRewriterRegistry.INSTANCE.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // After rewriting: + // - The nested bool in must clause should be flattened + // - match_all should be removed + // - term queries should be merged into terms query + // - The filter bool with brand terms should be preserved + // - The range query should be moved from must to filter by MustToFilterRewriter + + // Check must clauses (should have terms query for category only - range moved to filter) + assertThat(rewrittenBool.must().size(), equalTo(1)); + + // Check filter clauses (should have the brand bool query AND the range query) + assertThat(rewrittenBool.filter().size(), equalTo(2)); + // One should be the brand bool query + boolean hasBoolFilter = false; + boolean hasRangeFilter = false; + for (QueryBuilder filter : rewrittenBool.filter()) { + if (filter instanceof BoolQueryBuilder) { + hasBoolFilter = true; + } else if (filter instanceof RangeQueryBuilder) { + hasRangeFilter = true; + } + } + assertTrue(hasBoolFilter); + assertTrue(hasRangeFilter); + + // Must not should be preserved + assertThat(rewrittenBool.mustNot().size(), equalTo(1)); + } + + public void testPerformanceMetrics() { + // Test that we log performance metrics in debug mode + // This is more of a sanity check that the timing code doesn't throw exceptions + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("field1", "value1")) + .must(QueryBuilders.termQuery("field1", "value2")) + .filter(QueryBuilders.matchAllQuery()); + + // Should not throw any exceptions + QueryBuilder rewritten = QueryRewriterRegistry.INSTANCE.rewrite(query, context); + assertNotNull(rewritten); + } + + public void testRewriterErrorHandling() { + // Test that if a rewriter throws an exception, others still run + // This is handled internally by QueryRewriterRegistry + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("field", "value")) + .filter(QueryBuilders.matchAllQuery()); + + // Even if one rewriter fails, others should still be applied + QueryBuilder rewritten = QueryRewriterRegistry.INSTANCE.rewrite(query, context); + assertNotNull(rewritten); + } + + public void testVeryComplexMixedQuery() { + // Test a very complex query with all optimizations applicable + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.matchAllQuery()) + .must( + QueryBuilders.boolQuery() + .must( + QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("status", "active")) + .filter(QueryBuilders.termQuery("status", "pending")) + .filter(QueryBuilders.termQuery("status", "approved")) + ) + .must( + QueryBuilders.boolQuery() + .must(QueryBuilders.matchAllQuery()) + .filter(QueryBuilders.termQuery("type", "A")) + .filter(QueryBuilders.termQuery("type", "B")) + ) + ) + .filter(QueryBuilders.matchAllQuery()) + .filter( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("priority", "high")) + .should(QueryBuilders.termQuery("priority", "medium")) + .should(QueryBuilders.termQuery("priority", "low")) + ) + .should(QueryBuilders.termQuery("category", "urgent")) + .should(QueryBuilders.termQuery("category", "important")) + .mustNot(QueryBuilders.boolQuery().must(QueryBuilders.termQuery("archived", "true"))) + .minimumShouldMatch(1); + + QueryBuilder rewritten = QueryRewriterRegistry.INSTANCE.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder result = (BoolQueryBuilder) rewritten; + + // Check that minimum should match is preserved + assertThat(result.minimumShouldMatch(), equalTo("1")); + + // Verify optimizations were applied + assertNotSame(query, rewritten); + + // Should have flattened structure and merged terms + assertTrue(result.must().size() >= 1); + assertTrue(result.filter().size() >= 1); + } + + public void testCustomRewriterRegistration() { + // Create a custom rewriter for testing + QueryRewriter customRewriter = new QueryRewriter() { + @Override + public QueryBuilder rewrite(QueryBuilder query, QueryShardContext context) { + if (query instanceof TermQueryBuilder) { + TermQueryBuilder termQuery = (TermQueryBuilder) query; + if ("test_field".equals(termQuery.fieldName()) && "test_value".equals(termQuery.value())) { + // Replace with a different query + return QueryBuilders.termQuery("custom_field", "custom_value"); + } + } else if (query instanceof BoolQueryBuilder) { + // Recursively apply to nested queries + BoolQueryBuilder boolQuery = (BoolQueryBuilder) query; + BoolQueryBuilder rewritten = new BoolQueryBuilder(); + + // Copy settings + rewritten.boost(boolQuery.boost()); + rewritten.queryName(boolQuery.queryName()); + rewritten.minimumShouldMatch(boolQuery.minimumShouldMatch()); + rewritten.adjustPureNegative(boolQuery.adjustPureNegative()); + + // Recursively rewrite clauses + boolean changed = false; + for (QueryBuilder must : boolQuery.must()) { + QueryBuilder rewrittenClause = rewrite(must, context); + rewritten.must(rewrittenClause); + if (rewrittenClause != must) changed = true; + } + for (QueryBuilder filter : boolQuery.filter()) { + QueryBuilder rewrittenClause = rewrite(filter, context); + rewritten.filter(rewrittenClause); + if (rewrittenClause != filter) changed = true; + } + for (QueryBuilder should : boolQuery.should()) { + QueryBuilder rewrittenClause = rewrite(should, context); + rewritten.should(rewrittenClause); + if (rewrittenClause != should) changed = true; + } + for (QueryBuilder mustNot : boolQuery.mustNot()) { + QueryBuilder rewrittenClause = rewrite(mustNot, context); + rewritten.mustNot(rewrittenClause); + if (rewrittenClause != mustNot) changed = true; + } + + return changed ? rewritten : query; + } + return query; + } + + @Override + public int priority() { + return 1000; // High priority to ensure it runs last + } + + @Override + public String name() { + return "test_custom_rewriter"; + } + }; + + // Register the custom rewriter + QueryRewriterRegistry.INSTANCE.registerRewriter(customRewriter); + + // Test that it's applied + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("test_field", "test_value")) + .filter(QueryBuilders.termQuery("other_field", "other_value")); + + QueryBuilder rewritten = QueryRewriterRegistry.INSTANCE.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // The custom rewriter should have replaced the term query + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.must().get(0), instanceOf(TermQueryBuilder.class)); + TermQueryBuilder mustTerm = (TermQueryBuilder) rewrittenBool.must().get(0); + assertThat(mustTerm.fieldName(), equalTo("custom_field")); + assertThat(mustTerm.value(), equalTo("custom_value")); + } +} diff --git a/server/src/test/java/org/opensearch/search/query/rewriters/BooleanFlatteningRewriterTests.java b/server/src/test/java/org/opensearch/search/query/rewriters/BooleanFlatteningRewriterTests.java new file mode 100644 index 0000000000000..317d753493cf1 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/query/rewriters/BooleanFlatteningRewriterTests.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query.rewriters; + +import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class BooleanFlatteningRewriterTests extends OpenSearchTestCase { + + private final BooleanFlatteningRewriter rewriter = BooleanFlatteningRewriter.INSTANCE; + private final QueryShardContext context = mock(QueryShardContext.class); + + public void testSimpleBooleanQuery() { + // Simple boolean query should not be modified + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("field1", "value1")) + .filter(QueryBuilders.termQuery("field2", "value2")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testNestedBooleanFlattening() { + // Nested boolean query with single must clause should be flattened + QueryBuilder nestedBool = QueryBuilders.boolQuery().must(QueryBuilders.termQuery("field1", "value1")); + + QueryBuilder query = QueryBuilders.boolQuery().must(nestedBool).filter(QueryBuilders.termQuery("field2", "value2")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // The nested bool should be flattened + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.must().get(0), instanceOf(QueryBuilders.termQuery("field1", "value1").getClass())); + assertThat(rewrittenBool.filter().size(), equalTo(1)); + } + + public void testMultipleNestedBooleansFlattening() { + // Multiple nested boolean queries should all be flattened + QueryBuilder nested1 = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("field1", "value1")) + .must(QueryBuilders.termQuery("field2", "value2")); + + QueryBuilder nested2 = QueryBuilders.boolQuery().filter(QueryBuilders.termQuery("field3", "value3")); + + QueryBuilder query = QueryBuilders.boolQuery().must(nested1).filter(nested2); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // All nested clauses should be flattened + assertThat(rewrittenBool.must().size(), equalTo(2)); + assertThat(rewrittenBool.filter().size(), equalTo(1)); + } + + public void testShouldClauseFlattening() { + // Should clauses should also be flattened + QueryBuilder nestedShould = QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field1", "value1")) + .should(QueryBuilders.termQuery("field2", "value2")); + + QueryBuilder query = QueryBuilders.boolQuery().should(nestedShould).must(QueryBuilders.termQuery("field3", "value3")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should clauses should be flattened + assertThat(rewrittenBool.should().size(), equalTo(2)); + assertThat(rewrittenBool.must().size(), equalTo(1)); + } + + public void testMustNotClauseNoFlattening() { + // Must_not clauses should NOT be flattened to preserve semantics + QueryBuilder nestedMustNot = QueryBuilders.boolQuery().must(QueryBuilders.termQuery("field1", "value1")); + + QueryBuilder query = QueryBuilders.boolQuery().mustNot(nestedMustNot).must(QueryBuilders.termQuery("field2", "value2")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Must_not should not be flattened + assertThat(rewrittenBool.mustNot().size(), equalTo(1)); + assertThat(rewrittenBool.mustNot().get(0), instanceOf(BoolQueryBuilder.class)); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/18906") + public void testDeepNesting() { + // TODO: This test expects complete flattening of deeply nested bool queries + // where intermediate bool wrappers are removed entirely. Our current implementation + // only flattens by merging same-type clauses but preserves the bool structure. + // This would require a different optimization strategy. + + // Deep nesting should be flattened at all levels + QueryBuilder deepNested = QueryBuilders.boolQuery() + .must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.termQuery("field1", "value1")))); + + QueryBuilder rewritten = rewriter.rewrite(deepNested, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should be flattened to single level bool with term query + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.must().get(0), instanceOf(TermQueryBuilder.class)); + + // Verify the term query details + TermQueryBuilder termQuery = (TermQueryBuilder) rewrittenBool.must().get(0); + assertThat(termQuery.fieldName(), equalTo("field1")); + assertThat(termQuery.value(), equalTo("value1")); + } + + public void testMixedClauseTypes() { + // Mixed clause types with different minimumShouldMatch settings + QueryBuilder nested = QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field1", "value1")) + .should(QueryBuilders.termQuery("field2", "value2")) + .minimumShouldMatch(1); + + QueryBuilder query = QueryBuilders.boolQuery().must(nested).minimumShouldMatch(2); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // Should not flatten due to different minimumShouldMatch + } + + public void testEmptyBooleanQuery() { + // Empty boolean query should not cause issues + QueryBuilder query = QueryBuilders.boolQuery(); + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testNonBooleanQuery() { + // Non-boolean queries should be returned as-is + QueryBuilder query = QueryBuilders.termQuery("field", "value"); + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testVeryDeepNesting() { + // Test with 10 levels of nesting + QueryBuilder innermost = QueryBuilders.termQuery("field", "value"); + for (int i = 0; i < 10; i++) { + innermost = QueryBuilders.boolQuery().must(innermost); + } + + QueryBuilder rewritten = rewriter.rewrite(innermost, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + + // Should be flattened significantly + BoolQueryBuilder result = (BoolQueryBuilder) rewritten; + assertThat(result.must().size(), equalTo(1)); + } + + public void testQueryNamePreservation() { + // Ensure query names are preserved during flattening + QueryBuilder query = QueryBuilders.boolQuery() + .queryName("outer") + .must(QueryBuilders.boolQuery().queryName("inner").must(QueryBuilders.termQuery("field", "value"))); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + BoolQueryBuilder result = (BoolQueryBuilder) rewritten; + assertThat(result.queryName(), equalTo("outer")); + } +} diff --git a/server/src/test/java/org/opensearch/search/query/rewriters/MatchAllRemovalRewriterTests.java b/server/src/test/java/org/opensearch/search/query/rewriters/MatchAllRemovalRewriterTests.java new file mode 100644 index 0000000000000..6f2c6cf93133d --- /dev/null +++ b/server/src/test/java/org/opensearch/search/query/rewriters/MatchAllRemovalRewriterTests.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query.rewriters; + +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class MatchAllRemovalRewriterTests extends OpenSearchTestCase { + + private final MatchAllRemovalRewriter rewriter = MatchAllRemovalRewriter.INSTANCE; + private final QueryShardContext context = mock(QueryShardContext.class); + + public void testRemoveMatchAllFromMust() { + // match_all in must clause should NOT be removed in scoring context + QueryBuilder query = QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).must(QueryBuilders.termQuery("field", "value")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // match_all should be kept in scoring context + assertThat(rewrittenBool.must().size(), equalTo(2)); + } + + public void testRemoveMatchAllFromFilter() { + // match_all in filter clause should be removed + QueryBuilder query = QueryBuilders.boolQuery() + .filter(QueryBuilders.matchAllQuery()) + .filter(QueryBuilders.rangeQuery("price").gt(100)) + .must(QueryBuilders.termQuery("category", "electronics")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // match_all should be removed from filter + assertThat(rewrittenBool.filter().size(), equalTo(1)); + assertThat(rewrittenBool.must().size(), equalTo(1)); + } + + public void testKeepMatchAllInShould() { + // match_all in should clause should be kept + QueryBuilder query = QueryBuilders.boolQuery() + .should(QueryBuilders.matchAllQuery()) + .should(QueryBuilders.termQuery("field", "value")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes for should clause + } + + public void testKeepMatchAllInMustNot() { + // match_all in must_not clause should be kept (it's meaningful) + QueryBuilder query = QueryBuilders.boolQuery() + .mustNot(QueryBuilders.matchAllQuery()) + .must(QueryBuilders.termQuery("field", "value")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes for must_not clause + } + + public void testOnlyMatchAllQuery() { + // Boolean query with only match_all should be simplified to match_all + QueryBuilder query = QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(QueryBuilders.matchAllQuery().getClass())); + } + + public void testMultipleMatchAllQueries() { + // Multiple match_all queries should all be removed + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.matchAllQuery()) + .must(QueryBuilders.matchAllQuery()) + .filter(QueryBuilders.matchAllQuery()) + .must(QueryBuilders.termQuery("field", "value")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // With filter clause present, this is not a pure scoring context + // Since there are non-match_all queries in must, match_all should be removed + assertThat(rewrittenBool.must().size(), equalTo(1)); // only term query remains + assertThat(rewrittenBool.filter().size(), equalTo(0)); + } + + public void testNestedBooleanWithMatchAll() { + // Nested boolean queries should also have match_all removed + QueryBuilder nested = QueryBuilders.boolQuery() + .must(QueryBuilders.matchAllQuery()) + .must(QueryBuilders.termQuery("field1", "value1")); + + QueryBuilder query = QueryBuilders.boolQuery().must(nested).filter(QueryBuilders.termQuery("field2", "value2")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Nested bool keeps match_all in scoring context + BoolQueryBuilder nestedRewritten = (BoolQueryBuilder) rewrittenBool.must().get(0); + assertThat(nestedRewritten.must().size(), equalTo(2)); // match_all + term + } + + public void testEmptyBoolAfterRemoval() { + // Bool with only match_all in must/filter - keeps match_all in must in scoring context + QueryBuilder query = QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).filter(QueryBuilders.matchAllQuery()); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // match_all in must is kept in scoring context, match_all in filter is removed + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.filter().size(), equalTo(0)); + } + + public void testBoolWithOnlyMustNotAfterRemoval() { + // Bool with only must_not after removal should not be converted to match_all + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.matchAllQuery()) + .mustNot(QueryBuilders.termQuery("status", "deleted")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // must clause keeps match_all in scoring context, must_not preserved + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.mustNot().size(), equalTo(1)); + } + + public void testNonBooleanQuery() { + // Non-boolean queries should be returned as-is + QueryBuilder query = QueryBuilders.matchAllQuery(); + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testEmptyBooleanQuery() { + // Empty boolean query should not be converted + QueryBuilder query = QueryBuilders.boolQuery(); + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testBoostPreservation() { + // When converting bool with only match_all to match_all, preserve boost + QueryBuilder query = QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).boost(2.0f); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(QueryBuilders.matchAllQuery().getClass())); + assertThat(rewritten.boost(), equalTo(2.0f)); + } +} diff --git a/server/src/test/java/org/opensearch/search/query/rewriters/MustNotToShouldRewriterTests.java b/server/src/test/java/org/opensearch/search/query/rewriters/MustNotToShouldRewriterTests.java new file mode 100644 index 0000000000000..4fd21d9ad601e --- /dev/null +++ b/server/src/test/java/org/opensearch/search/query/rewriters/MustNotToShouldRewriterTests.java @@ -0,0 +1,284 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query.rewriters; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.IntPoint; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.store.Directory; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MustNotToShouldRewriterTests extends OpenSearchTestCase { + + private final MustNotToShouldRewriter rewriter = MustNotToShouldRewriter.INSTANCE; + private QueryShardContext context; + private Directory directory; + private IndexReader reader; + + @Override + public void setUp() throws Exception { + super.setUp(); + context = mock(QueryShardContext.class); + + // Create an index with single-valued numeric fields + directory = newDirectory(); + IndexWriterConfig config = new IndexWriterConfig(Lucene.STANDARD_ANALYZER); + IndexWriter writer = new IndexWriter(directory, config); + + // Add some documents with single-valued numeric fields + for (int i = 0; i < 100; i++) { + Document doc = new Document(); + doc.add(new IntPoint("age", i)); + doc.add(new IntPoint("status", i % 10)); + writer.addDocument(doc); + } + + writer.close(); + reader = DirectoryReader.open(directory); + when(context.getIndexReader()).thenReturn(reader); + + // Setup numeric field types + NumberFieldMapper.NumberFieldType intFieldType = mock(NumberFieldMapper.NumberFieldType.class); + when(intFieldType.numberType()).thenReturn(NumberFieldMapper.NumberType.INTEGER); + // Make parse return the input value as a Number + when(intFieldType.parse(any())).thenAnswer(invocation -> { + Object arg = invocation.getArgument(0); + if (arg instanceof Number) { + return (Number) arg; + } + return Integer.parseInt(arg.toString()); + }); + when(context.fieldMapper("age")).thenReturn(intFieldType); + when(context.fieldMapper("status")).thenReturn(intFieldType); + when(context.fieldMapper("price")).thenReturn(intFieldType); + + // Setup non-numeric field types + MappedFieldType textFieldType = mock(MappedFieldType.class); + when(context.fieldMapper("name")).thenReturn(textFieldType); + when(context.fieldMapper("description")).thenReturn(textFieldType); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + reader.close(); + directory.close(); + } + + public void testRangeQueryRewritten() { + // Test that must_not range query is rewritten to should clauses + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("type", "product")) + .mustNot(QueryBuilders.rangeQuery("age").gte(18).lte(65)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should have the original term query plus a new bool query from the rewrite + assertThat(rewrittenBool.must().size(), equalTo(2)); + + // The must_not clause should be removed + assertThat(rewrittenBool.mustNot().size(), equalTo(0)); + + // Find the nested bool query + BoolQueryBuilder nestedBool = null; + for (QueryBuilder must : rewrittenBool.must()) { + if (must instanceof BoolQueryBuilder) { + nestedBool = (BoolQueryBuilder) must; + break; + } + } + + assertNotNull(nestedBool); + assertThat(nestedBool.should().size(), equalTo(2)); // Two range queries for complement + assertThat(nestedBool.minimumShouldMatch(), equalTo("1")); + } + + public void testNumericTermQueryRewritten() { + // Test that must_not term query on numeric field is rewritten + QueryBuilder query = QueryBuilders.boolQuery().mustNot(QueryBuilders.termQuery("status", 5)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should have a new bool query from the rewrite + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.mustNot().size(), equalTo(0)); + + BoolQueryBuilder nestedBool = (BoolQueryBuilder) rewrittenBool.must().get(0); + assertThat(nestedBool.should().size(), equalTo(2)); // Two range queries for complement + assertThat(nestedBool.minimumShouldMatch(), equalTo("1")); + } + + public void testNumericTermsQueryRewritten() { + // Test that must_not terms query on numeric field is rewritten + QueryBuilder query = QueryBuilders.boolQuery().mustNot(QueryBuilders.termsQuery("status", new Object[] { 1, 2, 3 })); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should have a new bool query from the rewrite + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.mustNot().size(), equalTo(0)); + } + + public void testNumericMatchQueryRewritten() { + // Test that must_not match query on numeric field is rewritten + QueryBuilder query = QueryBuilders.boolQuery().mustNot(QueryBuilders.matchQuery("age", 25)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should have a new bool query from the rewrite + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.mustNot().size(), equalTo(0)); + } + + public void testTextFieldNotRewritten() { + // Test that must_not queries on text fields are not rewritten + QueryBuilder query = QueryBuilders.boolQuery() + .mustNot(QueryBuilders.termQuery("name", "test")) + .mustNot(QueryBuilders.matchQuery("description", "product")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes + } + + public void testMultipleQueriesOnSameFieldNotRewritten() { + // Test that multiple must_not queries on the same field are not rewritten + QueryBuilder query = QueryBuilders.boolQuery() + .mustNot(QueryBuilders.rangeQuery("age").gte(18)) + .mustNot(QueryBuilders.rangeQuery("age").lte(65)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes + } + + public void testMinimumShouldMatchHandling() { + // Test that minimumShouldMatch is properly handled + QueryBuilder query = QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("category", "A")) + .should(QueryBuilders.termQuery("category", "B")) + .mustNot(QueryBuilders.rangeQuery("age").gte(18)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Since we added a must clause, minimumShouldMatch should be set to 1 + assertThat(rewrittenBool.minimumShouldMatch(), equalTo("1")); + } + + public void testExistingMustClausesPreserved() { + // Test that existing must/filter/should clauses are preserved + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("type", "product")) + .filter(QueryBuilders.rangeQuery("price").gte(100)) + .should(QueryBuilders.termQuery("featured", true)) + .mustNot(QueryBuilders.rangeQuery("age").gte(18)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Original clauses should be preserved + assertThat(rewrittenBool.must().size(), equalTo(2)); // Original + rewritten + assertThat(rewrittenBool.filter().size(), equalTo(1)); + assertThat(rewrittenBool.should().size(), equalTo(1)); + assertThat(rewrittenBool.mustNot().size(), equalTo(0)); + } + + public void testNestedBooleanQueriesRewritten() { + // Test that nested boolean queries are also rewritten + QueryBuilder nested = QueryBuilders.boolQuery().mustNot(QueryBuilders.rangeQuery("age").gte(18)); + + QueryBuilder query = QueryBuilders.boolQuery().must(nested); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // The nested bool should be rewritten + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.must().get(0), instanceOf(BoolQueryBuilder.class)); + + BoolQueryBuilder innerBool = (BoolQueryBuilder) rewrittenBool.must().get(0); + assertThat(innerBool.must().size(), equalTo(1)); // The rewritten clause + assertThat(innerBool.mustNot().size(), equalTo(0)); + } + + public void testNoMustNotClausesNoChanges() { + // Query without must_not clauses should not be changed + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("type", "product")) + .filter(QueryBuilders.rangeQuery("price").gte(100)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testNonBoolQueryUnchanged() { + // Non-bool queries should not be changed + QueryBuilder query = QueryBuilders.termQuery("field", "value"); + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testNullContextNoRewrite() { + // With null context, no rewriting should happen + QueryBuilder query = QueryBuilders.boolQuery().mustNot(QueryBuilders.rangeQuery("age").gte(18)); + + QueryBuilder rewritten = rewriter.rewrite(query, null); + assertSame(query, rewritten); + } + + public void testRewriterPriority() { + // Verify rewriter has correct priority + assertThat(rewriter.priority(), equalTo(175)); + assertThat(rewriter.name(), equalTo("must_not_to_should")); + } + + public void testBoolQueryPropertiesPreserved() { + // All bool query properties should be preserved + QueryBuilder query = QueryBuilders.boolQuery() + .mustNot(QueryBuilders.rangeQuery("age").gte(18)) + .boost(2.0f) + .queryName("my_query") + .adjustPureNegative(false); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Properties should be preserved + assertThat(rewrittenBool.boost(), equalTo(2.0f)); + assertThat(rewrittenBool.queryName(), equalTo("my_query")); + assertThat(rewrittenBool.adjustPureNegative(), equalTo(false)); + } +} diff --git a/server/src/test/java/org/opensearch/search/query/rewriters/MustToFilterRewriterTests.java b/server/src/test/java/org/opensearch/search/query/rewriters/MustToFilterRewriterTests.java new file mode 100644 index 0000000000000..35e289813acff --- /dev/null +++ b/server/src/test/java/org/opensearch/search/query/rewriters/MustToFilterRewriterTests.java @@ -0,0 +1,309 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query.rewriters; + +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MustToFilterRewriterTests extends OpenSearchTestCase { + + private final MustToFilterRewriter rewriter = MustToFilterRewriter.INSTANCE; + private QueryShardContext context; + + @Override + public void setUp() throws Exception { + super.setUp(); + context = mock(QueryShardContext.class); + + // Setup numeric field types + NumberFieldMapper.NumberFieldType intFieldType = mock(NumberFieldMapper.NumberFieldType.class); + when(context.fieldMapper("age")).thenReturn(intFieldType); + when(context.fieldMapper("price")).thenReturn(intFieldType); + when(context.fieldMapper("count")).thenReturn(intFieldType); + when(context.fieldMapper("user_id")).thenReturn(intFieldType); + + // Setup non-numeric field types + MappedFieldType textFieldType = mock(MappedFieldType.class); + when(context.fieldMapper("name")).thenReturn(textFieldType); + when(context.fieldMapper("description")).thenReturn(textFieldType); + when(context.fieldMapper("category")).thenReturn(textFieldType); + when(context.fieldMapper("status_code")).thenReturn(textFieldType); + when(context.fieldMapper("active")).thenReturn(textFieldType); + } + + public void testRangeQueryMovedToFilter() { + // Range queries should always be moved to filter + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("price").gte(100).lte(500)) + .must(QueryBuilders.termQuery("category", "electronics")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Range query should be moved to filter + assertThat(rewrittenBool.filter().size(), equalTo(1)); + assertThat(rewrittenBool.filter().get(0), instanceOf(RangeQueryBuilder.class)); + + // Term query on text field should remain in must + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.must().get(0), instanceOf(TermQueryBuilder.class)); + } + + public void testNumericTermQueryMovedToFilter() { + // Term queries on numeric fields should be moved to filter + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("age", 25)) + .must(QueryBuilders.termQuery("name", "John")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Numeric term query should be moved to filter + assertThat(rewrittenBool.filter().size(), equalTo(1)); + TermQueryBuilder filterClause = (TermQueryBuilder) rewrittenBool.filter().get(0); + assertThat(filterClause.fieldName(), equalTo("age")); + + // Text term query should remain in must + assertThat(rewrittenBool.must().size(), equalTo(1)); + TermQueryBuilder mustClause = (TermQueryBuilder) rewrittenBool.must().get(0); + assertThat(mustClause.fieldName(), equalTo("name")); + } + + public void testNumericTermsQueryMovedToFilter() { + // Terms queries on numeric fields should be moved to filter + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.termsQuery("count", new Object[] { 1, 2, 3 })) + .must(QueryBuilders.termsQuery("category", "A", "B", "C")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Numeric terms query should be moved to filter + assertThat(rewrittenBool.filter().size(), equalTo(1)); + TermsQueryBuilder filterClause = (TermsQueryBuilder) rewrittenBool.filter().get(0); + assertThat(filterClause.fieldName(), equalTo("count")); + + // Text terms query should remain in must + assertThat(rewrittenBool.must().size(), equalTo(1)); + TermsQueryBuilder mustClause = (TermsQueryBuilder) rewrittenBool.must().get(0); + assertThat(mustClause.fieldName(), equalTo("category")); + } + + public void testNumericMatchQueryMovedToFilter() { + // Match queries on numeric fields should be moved to filter + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.matchQuery("price", 99.99)) + .must(QueryBuilders.matchQuery("description", "high quality")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Numeric match query should be moved to filter + assertThat(rewrittenBool.filter().size(), equalTo(1)); + MatchQueryBuilder filterClause = (MatchQueryBuilder) rewrittenBool.filter().get(0); + assertThat(filterClause.fieldName(), equalTo("price")); + + // Text match query should remain in must + assertThat(rewrittenBool.must().size(), equalTo(1)); + MatchQueryBuilder mustClause = (MatchQueryBuilder) rewrittenBool.must().get(0); + assertThat(mustClause.fieldName(), equalTo("description")); + } + + public void testExistingFilterClausesPreserved() { + // Existing filter clauses should be preserved + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("price").gte(100)) + .filter(QueryBuilders.termQuery("status", "active")) + .filter(QueryBuilders.existsQuery("description")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should have 3 filter clauses: moved range query + 2 existing + assertThat(rewrittenBool.filter().size(), equalTo(3)); + assertThat(rewrittenBool.must().size(), equalTo(0)); + } + + public void testShouldAndMustNotClausesUnchanged() { + // Should and must_not clauses should not be affected + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("price").gte(100)) + .should(QueryBuilders.termQuery("featured", true)) + .mustNot(QueryBuilders.termQuery("deleted", true)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Range query moved to filter + assertThat(rewrittenBool.filter().size(), equalTo(1)); + assertThat(rewrittenBool.must().size(), equalTo(0)); + + // Should and must_not unchanged + assertThat(rewrittenBool.should().size(), equalTo(1)); + assertThat(rewrittenBool.mustNot().size(), equalTo(1)); + } + + public void testNestedBooleanQueriesRewritten() { + // Nested boolean queries should also be rewritten + QueryBuilder nested = QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("age").gte(18)) + .must(QueryBuilders.termQuery("active", true)); + + QueryBuilder query = QueryBuilders.boolQuery().must(nested).must(QueryBuilders.matchQuery("name", "test")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // The nested bool should be rewritten + assertThat(rewrittenBool.must().size(), equalTo(2)); + + // Find the nested bool query + BoolQueryBuilder nestedRewritten = null; + for (QueryBuilder clause : rewrittenBool.must()) { + if (clause instanceof BoolQueryBuilder) { + nestedRewritten = (BoolQueryBuilder) clause; + break; + } + } + + assertNotNull(nestedRewritten); + // The range query in the nested bool should be moved to filter + assertThat(nestedRewritten.filter().size(), equalTo(1)); + assertThat(nestedRewritten.filter().get(0), instanceOf(RangeQueryBuilder.class)); + // The term query should remain in must + assertThat(nestedRewritten.must().size(), equalTo(1)); + assertThat(nestedRewritten.must().get(0), instanceOf(TermQueryBuilder.class)); + } + + public void testBoolQueryPropertiesPreserved() { + // All bool query properties should be preserved + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("price").gte(100)) + .must(QueryBuilders.termQuery("category", "electronics")) + .boost(2.0f) + .queryName("my_query") + .minimumShouldMatch(2) + .adjustPureNegative(false); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Properties should be preserved + assertThat(rewrittenBool.boost(), equalTo(2.0f)); + assertThat(rewrittenBool.queryName(), equalTo("my_query")); + assertThat(rewrittenBool.minimumShouldMatch(), equalTo("2")); + assertThat(rewrittenBool.adjustPureNegative(), equalTo(false)); + } + + public void testNoMustClausesNoChanges() { + // Query without must clauses should not be changed + QueryBuilder query = QueryBuilders.boolQuery() + .filter(QueryBuilders.rangeQuery("price").gte(100)) + .should(QueryBuilders.termQuery("featured", true)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testNonBoolQueryUnchanged() { + // Non-bool queries should not be changed + QueryBuilder query = QueryBuilders.termQuery("field", "value"); + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testNullContextStillMovesRangeQueries() { + // With null context, range queries should still be moved + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("price").gte(100)) + .must(QueryBuilders.termQuery("age", 25)); + + QueryBuilder rewritten = rewriter.rewrite(query, null); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Range query should be moved to filter even without context + assertThat(rewrittenBool.filter().size(), equalTo(1)); + assertThat(rewrittenBool.filter().get(0), instanceOf(RangeQueryBuilder.class)); + + // Term query stays in must (can't determine if numeric without context) + assertThat(rewrittenBool.must().size(), equalTo(1)); + assertThat(rewrittenBool.must().get(0), instanceOf(TermQueryBuilder.class)); + } + + public void testAllMustClausesMovedToFilter() { + // If all must clauses can be moved, they should all go to filter + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("price").gte(100)) + .must(QueryBuilders.rangeQuery("age").gte(18)) + .must(QueryBuilders.termQuery("count", 5)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // All clauses should be in filter + assertThat(rewrittenBool.filter().size(), equalTo(3)); + assertThat(rewrittenBool.must().size(), equalTo(0)); + } + + public void testComplexMixedQuery() { + // Complex query with mix of movable and non-movable clauses + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("created_date").gte("2024-01-01")) + .must(QueryBuilders.termQuery("user_id", 12345)) + .must(QueryBuilders.matchQuery("title", "opensearch")) + .must(QueryBuilders.termsQuery("status_code", new Object[] { 200, 201, 204 })) + .filter(QueryBuilders.existsQuery("description")) + .should(QueryBuilders.matchQuery("tags", "important")) + .mustNot(QueryBuilders.termQuery("deleted", true)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Range and numeric queries moved to filter + assertThat(rewrittenBool.filter().size(), equalTo(3)); // range + exists + user_id (numeric) + + // Text queries remain in must + assertThat(rewrittenBool.must().size(), equalTo(2)); // match title + terms status_code (text field) + + // Should and must_not unchanged + assertThat(rewrittenBool.should().size(), equalTo(1)); + assertThat(rewrittenBool.mustNot().size(), equalTo(1)); + } + + public void testRewriterPriority() { + // Verify rewriter has correct priority + assertThat(rewriter.priority(), equalTo(150)); + assertThat(rewriter.name(), equalTo("must_to_filter")); + } +} diff --git a/server/src/test/java/org/opensearch/search/query/rewriters/TermsMergingRewriterTests.java b/server/src/test/java/org/opensearch/search/query/rewriters/TermsMergingRewriterTests.java new file mode 100644 index 0000000000000..085f2c72c67c9 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/query/rewriters/TermsMergingRewriterTests.java @@ -0,0 +1,292 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query.rewriters; + +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class TermsMergingRewriterTests extends OpenSearchTestCase { + + private final TermsMergingRewriter rewriter = TermsMergingRewriter.INSTANCE; + private final QueryShardContext context = mock(QueryShardContext.class); + + public void testSimpleTermMergingBelowThreshold() { + // Few term queries on same field should NOT be merged (below threshold) + QueryBuilder query = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("status", "active")) + .filter(QueryBuilders.termQuery("status", "pending")) + .filter(QueryBuilders.termQuery("status", "approved")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes expected + } + + public void testTermMergingAboveThreshold() { + // Many term queries on same field should be merged (above threshold of 16) + BoolQueryBuilder query = QueryBuilders.boolQuery(); + // Add 20 term queries for the same field + for (int i = 0; i < 20; i++) { + query.filter(QueryBuilders.termQuery("category", "cat_" + i)); + } + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should have one terms query instead of 20 term queries + assertThat(rewrittenBool.filter().size(), equalTo(1)); + assertThat(rewrittenBool.filter().get(0), instanceOf(TermsQueryBuilder.class)); + + TermsQueryBuilder termsQuery = (TermsQueryBuilder) rewrittenBool.filter().get(0); + assertThat(termsQuery.fieldName(), equalTo("category")); + assertThat(termsQuery.values().size(), equalTo(20)); + } + + public void testMustClauseNoMerging() { + // Term queries in must clauses should NOT be merged (different semantics) + QueryBuilder query = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("category", "electronics")) + .must(QueryBuilders.termQuery("category", "computers")) + .must(QueryBuilders.rangeQuery("price").gt(100)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should have 3 queries: NO merging in must clause + assertThat(rewrittenBool.must().size(), equalTo(3)); + } + + public void testShouldClauseMergingBelowThreshold() { + // Should clauses with few terms should NOT be merged + QueryBuilder query = QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("color", "red")) + .should(QueryBuilders.termQuery("color", "blue")) + .should(QueryBuilders.termQuery("size", "large")) + .should(QueryBuilders.termQuery("size", "medium")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes expected + } + + public void testShouldClauseMergingAboveThreshold() { + // Should clauses with many terms should be merged + BoolQueryBuilder query = QueryBuilders.boolQuery(); + + // Add 20 color terms + for (int i = 0; i < 20; i++) { + query.should(QueryBuilders.termQuery("color", "color_" + i)); + } + + // Add 18 size terms + for (int i = 0; i < 18; i++) { + query.should(QueryBuilders.termQuery("size", "size_" + i)); + } + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should have 2 terms queries: one for color, one for size + assertThat(rewrittenBool.should().size(), equalTo(2)); + assertThat(rewrittenBool.should().get(0), instanceOf(TermsQueryBuilder.class)); + assertThat(rewrittenBool.should().get(1), instanceOf(TermsQueryBuilder.class)); + } + + public void testMixedFieldsNoMerging() { + // Term queries on different fields should not be merged + QueryBuilder query = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("field1", "value1")) + .filter(QueryBuilders.termQuery("field2", "value2")) + .filter(QueryBuilders.termQuery("field3", "value3")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes expected + } + + public void testExistingTermsQueryExpansionBelowThreshold() { + // Existing terms query with few additional terms should NOT be expanded (below threshold) + QueryBuilder query = QueryBuilders.boolQuery() + .filter(QueryBuilders.termsQuery("status", "active", "pending")) + .filter(QueryBuilders.termQuery("status", "approved")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes expected + } + + public void testExistingTermsQueryExpansionAboveThreshold() { + // Existing terms query should be expanded when total terms exceed threshold + String[] initialTerms = new String[14]; + for (int i = 0; i < 14; i++) { + initialTerms[i] = "status_" + i; + } + + BoolQueryBuilder query = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("status", initialTerms)); + + // Add 5 more term queries to exceed threshold + for (int i = 14; i < 19; i++) { + query.filter(QueryBuilders.termQuery("status", "status_" + i)); + } + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Should have one terms query with all values + assertThat(rewrittenBool.filter().size(), equalTo(1)); + TermsQueryBuilder termsQuery = (TermsQueryBuilder) rewrittenBool.filter().get(0); + assertThat(termsQuery.values().size(), equalTo(19)); + } + + public void testSingleTermQuery() { + // Single term query should not be converted to terms query + QueryBuilder query = QueryBuilders.boolQuery().filter(QueryBuilders.termQuery("field", "value")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testMustNotClauseNoMerging() { + // Must_not clauses should not be merged + QueryBuilder query = QueryBuilders.boolQuery() + .mustNot(QueryBuilders.termQuery("status", "deleted")) + .mustNot(QueryBuilders.termQuery("status", "archived")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes to must_not + } + + public void testNestedBooleanQuery() { + // Should handle nested boolean queries with many terms + BoolQueryBuilder nested = QueryBuilders.boolQuery(); + // Add 20 term queries to exceed threshold + for (int i = 0; i < 20; i++) { + nested.filter(QueryBuilders.termQuery("status", "status_" + i)); + } + + QueryBuilder query = QueryBuilders.boolQuery().must(nested).filter(QueryBuilders.termQuery("type", "product")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + // Nested bool should also be rewritten + assertThat(rewrittenBool.must().size(), equalTo(1)); + BoolQueryBuilder nestedRewritten = (BoolQueryBuilder) rewrittenBool.must().get(0); + assertThat(nestedRewritten.filter().size(), equalTo(1)); + assertThat(nestedRewritten.filter().get(0), instanceOf(TermsQueryBuilder.class)); + + TermsQueryBuilder termsQuery = (TermsQueryBuilder) nestedRewritten.filter().get(0); + assertThat(termsQuery.values().size(), equalTo(20)); + } + + public void testEmptyBooleanQuery() { + // Empty boolean query should not cause issues + QueryBuilder query = QueryBuilders.boolQuery(); + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testNonBooleanQuery() { + // Non-boolean queries should be returned as-is + QueryBuilder query = QueryBuilders.termQuery("field", "value"); + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); + } + + public void testBoostPreservation() { + // Boost values should be preserved when merging many terms + BoolQueryBuilder query = QueryBuilders.boolQuery(); + + // Add 20 terms with same boost + for (int i = 0; i < 20; i++) { + query.filter(QueryBuilders.termQuery("status", "status_" + i).boost(2.0f)); + } + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertThat(rewritten, instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder rewrittenBool = (BoolQueryBuilder) rewritten; + + assertThat(rewrittenBool.filter().size(), equalTo(1)); + TermsQueryBuilder termsQuery = (TermsQueryBuilder) rewrittenBool.filter().get(0); + assertThat(termsQuery.boost(), equalTo(2.0f)); + assertThat(termsQuery.values().size(), equalTo(20)); + } + + public void testMixedBoostNoMerging() { + // Different boost values should prevent merging + QueryBuilder query = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("status", "active").boost(1.0f)) + .filter(QueryBuilders.termQuery("status", "pending").boost(2.0f)); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes due to different boosts + } + + public void testLargeTermsMerging() { + // Test merging a large number of term queries + BoolQueryBuilder query = QueryBuilders.boolQuery(); + for (int i = 0; i < 50; i++) { + query.filter(QueryBuilders.termQuery("field", "value" + i)); + } + + QueryBuilder rewritten = rewriter.rewrite(query, context); + BoolQueryBuilder result = (BoolQueryBuilder) rewritten; + + assertThat(result.filter().size(), equalTo(1)); + assertThat(result.filter().get(0), instanceOf(TermsQueryBuilder.class)); + TermsQueryBuilder terms = (TermsQueryBuilder) result.filter().get(0); + assertThat(terms.values().size(), equalTo(50)); + } + + public void testMixedTermsAndTermQueriesBelowThreshold() { + // Mix of existing terms queries and term queries with few values + QueryBuilder query = QueryBuilders.boolQuery() + .filter(QueryBuilders.termsQuery("field", "v1", "v2")) + .filter(QueryBuilders.termQuery("field", "v3")) + .filter(QueryBuilders.termsQuery("field", "v4", "v5")) + .filter(QueryBuilders.termQuery("field", "v6")); + + QueryBuilder rewritten = rewriter.rewrite(query, context); + assertSame(query, rewritten); // No changes expected (total 6 values < 16) + } + + public void testMixedTermsAndTermQueriesAboveThreshold() { + // Mix of existing terms queries and term queries with many values + String[] initialValues = new String[10]; + for (int i = 0; i < 10; i++) { + initialValues[i] = "v" + i; + } + + BoolQueryBuilder query = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("field", initialValues)); + + // Add more term queries to exceed threshold + for (int i = 10; i < 20; i++) { + query.filter(QueryBuilders.termQuery("field", "v" + i)); + } + + QueryBuilder rewritten = rewriter.rewrite(query, context); + BoolQueryBuilder result = (BoolQueryBuilder) rewritten; + + // Should merge all into a single terms query + assertThat(result.filter().size(), equalTo(1)); + assertThat(result.filter().get(0), instanceOf(TermsQueryBuilder.class)); + TermsQueryBuilder merged = (TermsQueryBuilder) result.filter().get(0); + assertThat(merged.values().size(), equalTo(20)); + } + +} From 655d4d557a5ae764bfe2e5ef7a8370beaab24ffc Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Wed, 27 Aug 2025 09:58:30 -0700 Subject: [PATCH 12/27] Bump tika from 2.9.2 to 3.2.2 (#19125) * Fix tika CVE Signed-off-by: Prudhvi Godithi * Update CHANGELOG.md Signed-off-by: Prudhvi Godithi * fix html parser Signed-off-by: Prudhvi Godithi * fix html parser Signed-off-by: Prudhvi Godithi * fix html parser Signed-off-by: Prudhvi Godithi * Add license Signed-off-by: Prudhvi Godithi * Add license Signed-off-by: Prudhvi Godithi * Update checksums Signed-off-by: Prudhvi Godithi * Update shas Signed-off-by: Prudhvi Godithi * Add pdf box license Signed-off-by: Prudhvi Godithi * Fix tests Signed-off-by: Prudhvi Godithi * Update security fonts permission Signed-off-by: Prudhvi Godithi * Add dummy fonts Signed-off-by: Prudhvi Godithi * Upstream fetch Signed-off-by: Prudhvi Godithi * Fix license check error Signed-off-by: Prudhvi Godithi * Fix license check error Signed-off-by: Prudhvi Godithi --------- Signed-off-by: Prudhvi Godithi --- .gitattributes | 1 + CHANGELOG.md | 2 + distribution/tools/plugin-cli/build.gradle | 7 +- .../licenses/commons-compress-1.26.1.jar.sha1 | 1 - .../licenses/commons-compress-1.28.0.jar.sha1 | 1 + gradle/libs.versions.toml | 2 +- plugins/ingest-attachment/build.gradle | 8 +- .../ingest-attachment/licenses/Roboto-OFL.txt | 93 ++++++++++++ .../licenses/commons-compress-1.26.1.jar.sha1 | 1 - .../licenses/commons-compress-1.28.0.jar.sha1 | 1 + .../licenses/fontbox-2.0.31.jar.sha1 | 1 - .../licenses/fontbox-3.0.5.jar.sha1 | 1 + .../licenses/jsoup-1.20.1.jar.sha1 | 1 + .../licenses/jsoup-LICENSE.txt | 21 +++ .../{tagsoup-NOTICE.txt => jsoup-NOTICE.txt} | 0 .../licenses/pdfbox-2.0.31.jar.sha1 | 1 - .../licenses/pdfbox-3.0.5.jar.sha1 | 1 + .../licenses/pdfbox-io-3.0.5.jar.sha1 | 1 + ...soup-LICENSE.txt => pdfbox-io-LICENSE.txt} | 143 ++++++++++++++++++ .../licenses/pdfbox-io-NOTICE.txt | 22 +++ .../licenses/tagsoup-1.2.1.jar.sha1 | 1 - .../licenses/tika-core-2.9.2.jar.sha1 | 1 - .../licenses/tika-core-3.2.2.jar.sha1 | 1 + .../tika-langdetect-optimaize-2.9.2.jar.sha1 | 1 - .../tika-langdetect-optimaize-3.2.2.jar.sha1 | 1 + .../tika-parser-apple-module-2.9.2.jar.sha1 | 1 - .../tika-parser-apple-module-3.2.2.jar.sha1 | 1 + .../tika-parser-html-module-2.9.2.jar.sha1 | 1 - .../tika-parser-html-module-3.2.2.jar.sha1 | 1 + ...ika-parser-microsoft-module-2.9.2.jar.sha1 | 1 - ...ika-parser-microsoft-module-3.2.2.jar.sha1 | 1 + ...ka-parser-miscoffice-module-2.9.2.jar.sha1 | 1 - ...ka-parser-miscoffice-module-3.2.2.jar.sha1 | 1 + .../tika-parser-pdf-module-2.9.2.jar.sha1 | 1 - .../tika-parser-pdf-module-3.2.2.jar.sha1 | 1 + .../tika-parser-text-module-2.9.2.jar.sha1 | 1 - .../tika-parser-text-module-3.2.2.jar.sha1 | 1 + .../tika-parser-xml-module-2.9.2.jar.sha1 | 1 - .../tika-parser-xml-module-3.2.2.jar.sha1 | 1 + .../tika-parser-xmp-commons-2.9.2.jar.sha1 | 1 - .../tika-parser-xmp-commons-3.2.2.jar.sha1 | 1 + .../tika-parser-zip-commons-2.9.2.jar.sha1 | 1 - .../tika-parser-zip-commons-3.2.2.jar.sha1 | 1 + ...ka-parsers-standard-package-2.9.2.jar.sha1 | 1 - ...ka-parsers-standard-package-3.2.2.jar.sha1 | 1 + .../ingest/attachment/TikaImpl.java | 51 ++++++- .../main/resources/fonts/Roboto-Regular.ttf | Bin 0 -> 146004 bytes .../ingest/attachment/test/.checksums | 22 +-- plugins/ingestion-kafka/build.gradle | 1 + .../licenses/commons-compress-1.26.1.jar.sha1 | 1 - .../licenses/commons-compress-1.28.0.jar.sha1 | 1 + 51 files changed, 375 insertions(+), 35 deletions(-) delete mode 100644 distribution/tools/plugin-cli/licenses/commons-compress-1.26.1.jar.sha1 create mode 100644 distribution/tools/plugin-cli/licenses/commons-compress-1.28.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/Roboto-OFL.txt delete mode 100644 plugins/ingest-attachment/licenses/commons-compress-1.26.1.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/commons-compress-1.28.0.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/fontbox-2.0.31.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/fontbox-3.0.5.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/jsoup-1.20.1.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/jsoup-LICENSE.txt rename plugins/ingest-attachment/licenses/{tagsoup-NOTICE.txt => jsoup-NOTICE.txt} (100%) delete mode 100644 plugins/ingest-attachment/licenses/pdfbox-2.0.31.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/pdfbox-3.0.5.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/pdfbox-io-3.0.5.jar.sha1 rename plugins/ingest-attachment/licenses/{tagsoup-LICENSE.txt => pdfbox-io-LICENSE.txt} (60%) create mode 100644 plugins/ingest-attachment/licenses/pdfbox-io-NOTICE.txt delete mode 100644 plugins/ingest-attachment/licenses/tagsoup-1.2.1.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-core-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-core-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-langdetect-optimaize-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parser-apple-module-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parser-apple-module-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parser-html-module-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parser-html-module-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parser-microsoft-module-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parser-microsoft-module-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parser-pdf-module-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parser-pdf-module-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parser-text-module-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parser-text-module-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parser-xml-module-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parser-xml-module-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parser-xmp-commons-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parser-xmp-commons-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parser-zip-commons-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parser-zip-commons-3.2.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.9.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parsers-standard-package-3.2.2.jar.sha1 create mode 100644 plugins/ingest-attachment/src/main/resources/fonts/Roboto-Regular.ttf delete mode 100644 plugins/repository-hdfs/licenses/commons-compress-1.26.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-compress-1.28.0.jar.sha1 diff --git a/.gitattributes b/.gitattributes index 47b4a52e5726e..c19ea2202f725 100644 --- a/.gitattributes +++ b/.gitattributes @@ -10,5 +10,6 @@ *.bcfks binary *.crt binary *.p12 binary +*.ttf binary *.txt text=auto CHANGELOG.md merge=union diff --git a/CHANGELOG.md b/CHANGELOG.md index 106e8b36c3531..900771e5bbd99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.auth:google-auth-library-oauth2-http` from 1.37.1 to 1.38.0 ([#19144](https://github.com/opensearch-project/OpenSearch/pull/19144)) - Bump `com.squareup.okio:okio` from 3.15.0 to 3.16.0 ([#19146](https://github.com/opensearch-project/OpenSearch/pull/19146)) - Bump Slf4j from 1.7.36 to 2.0.17 ([#19136](https://github.com/opensearch-project/OpenSearch/pull/19136)) +- Bump `org.apache.tika` from 2.9.2 to 3.2.2 ([#19125](https://github.com/opensearch-project/OpenSearch/pull/19125)) +- Bump `org.apache.commons:commons-compress` from 1.26.1 to 1.28.0 ([#19125](https://github.com/opensearch-project/OpenSearch/pull/19125)) ### Deprecated diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 8beb17bb8bf9a..41f80eb39a81f 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -81,5 +81,10 @@ thirdPartyAudit.ignoreMissingClasses( 'org.tukaani.xz.XZOutputStream', 'org.apache.commons.codec.digest.PureJavaCrc32C', 'org.apache.commons.codec.digest.XXHash32', - 'org.apache.commons.lang3.reflect.FieldUtils' + 'org.apache.commons.lang3.reflect.FieldUtils', + 'org.apache.commons.lang3.ArrayFill', + 'org.apache.commons.lang3.ArrayUtils', + 'org.apache.commons.lang3.StringUtils', + 'org.apache.commons.lang3.SystemProperties', + 'org.apache.commons.lang3.function.Suppliers' ) diff --git a/distribution/tools/plugin-cli/licenses/commons-compress-1.26.1.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-compress-1.26.1.jar.sha1 deleted file mode 100644 index 912bda85de18a..0000000000000 --- a/distribution/tools/plugin-cli/licenses/commons-compress-1.26.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -44331c1130c370e726a2e1a3e6fba6d2558ef04a \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-compress-1.28.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-compress-1.28.0.jar.sha1 new file mode 100644 index 0000000000000..5edae62aeeb5d --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-compress-1.28.0.jar.sha1 @@ -0,0 +1 @@ +e482f2c7a88dac3c497e96aa420b6a769f59c8d7 \ No newline at end of file diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 40441dba894bb..7d0e2d31f0baf 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -50,7 +50,7 @@ httpasyncclient = "4.1.5" commonslogging = "1.2" commonscodec = "1.18.0" commonslang = "3.18.0" -commonscompress = "1.26.1" +commonscompress = "1.28.0" commonsio = "2.16.0" # plugin dependencies aws = "2.30.31" diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index f6a5f104cac79..0a6306be7daac 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -38,8 +38,8 @@ opensearchplugin { } versions << [ - 'tika' : '2.9.2', - 'pdfbox': '2.0.31', + 'tika' : '3.2.2', + 'pdfbox': '3.0.5', 'poi' : '5.4.1', 'mime4j': '0.8.11' ] @@ -75,10 +75,11 @@ dependencies { // external parser libraries // HTML - api 'org.ccil.cowan.tagsoup:tagsoup:1.2.1' + api 'org.jsoup:jsoup:1.20.1' // Adobe PDF api "org.apache.pdfbox:pdfbox:${versions.pdfbox}" api "org.apache.pdfbox:fontbox:${versions.pdfbox}" + api "org.apache.pdfbox:pdfbox-io:${versions.pdfbox}" api "org.apache.pdfbox:jempbox:1.8.17" api "commons-logging:commons-logging:${versions.commonslogging}" // OpenOffice @@ -121,6 +122,7 @@ forbiddenPatterns { exclude '**/*.pdf' exclude '**/*.epub' exclude '**/*.vsdx' + exclude '**/*.ttf' } thirdPartyAudit { diff --git a/plugins/ingest-attachment/licenses/Roboto-OFL.txt b/plugins/ingest-attachment/licenses/Roboto-OFL.txt new file mode 100644 index 0000000000000..65a3057b1f24b --- /dev/null +++ b/plugins/ingest-attachment/licenses/Roboto-OFL.txt @@ -0,0 +1,93 @@ +Copyright 2011 The Roboto Project Authors (https://github.com/googlefonts/roboto-classic) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +https://openfontlicense.org + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.26.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.26.1.jar.sha1 deleted file mode 100644 index 912bda85de18a..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-compress-1.26.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -44331c1130c370e726a2e1a3e6fba6d2558ef04a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.28.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.28.0.jar.sha1 new file mode 100644 index 0000000000000..5edae62aeeb5d --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-compress-1.28.0.jar.sha1 @@ -0,0 +1 @@ +e482f2c7a88dac3c497e96aa420b6a769f59c8d7 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.31.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.31.jar.sha1 deleted file mode 100644 index d45d45a66e072..0000000000000 --- a/plugins/ingest-attachment/licenses/fontbox-2.0.31.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -96999ecdb7324bf718b88724818fa62f81286c36 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-3.0.5.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-3.0.5.jar.sha1 new file mode 100644 index 0000000000000..241eda72e6dae --- /dev/null +++ b/plugins/ingest-attachment/licenses/fontbox-3.0.5.jar.sha1 @@ -0,0 +1 @@ +b4a068e1dba2b9832a108cdf6e9a3249680e3ce8 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/jsoup-1.20.1.jar.sha1 b/plugins/ingest-attachment/licenses/jsoup-1.20.1.jar.sha1 new file mode 100644 index 0000000000000..9a2329562aae0 --- /dev/null +++ b/plugins/ingest-attachment/licenses/jsoup-1.20.1.jar.sha1 @@ -0,0 +1 @@ +769377896610be1736f8d6d51fc52a6042d1ce82 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/jsoup-LICENSE.txt b/plugins/ingest-attachment/licenses/jsoup-LICENSE.txt new file mode 100644 index 0000000000000..e4bf2be9fb7f2 --- /dev/null +++ b/plugins/ingest-attachment/licenses/jsoup-LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2009-2025 Jonathan Hedley + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/plugins/ingest-attachment/licenses/tagsoup-NOTICE.txt b/plugins/ingest-attachment/licenses/jsoup-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/tagsoup-NOTICE.txt rename to plugins/ingest-attachment/licenses/jsoup-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.31.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.31.jar.sha1 deleted file mode 100644 index fa256ed9a65d2..0000000000000 --- a/plugins/ingest-attachment/licenses/pdfbox-2.0.31.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -29b25053099bc30784a766ccb821417e06f4b8a1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-3.0.5.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-3.0.5.jar.sha1 new file mode 100644 index 0000000000000..6a6fad5245aa2 --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-3.0.5.jar.sha1 @@ -0,0 +1 @@ +c34109061c3a0d85d871d9edc469ac0682f81856 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-io-3.0.5.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-io-3.0.5.jar.sha1 new file mode 100644 index 0000000000000..e70c851dbd9c2 --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-io-3.0.5.jar.sha1 @@ -0,0 +1 @@ +402151a8d1aa427ea879cc7160e9227e9f5088ba \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tagsoup-LICENSE.txt b/plugins/ingest-attachment/licenses/pdfbox-io-LICENSE.txt similarity index 60% rename from plugins/ingest-attachment/licenses/tagsoup-LICENSE.txt rename to plugins/ingest-attachment/licenses/pdfbox-io-LICENSE.txt index 261eeb9e9f8b2..97553f24a432a 100644 --- a/plugins/ingest-attachment/licenses/tagsoup-LICENSE.txt +++ b/plugins/ingest-attachment/licenses/pdfbox-io-LICENSE.txt @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -199,3 +200,145 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +EXTERNAL COMPONENTS + +Apache PDFBox includes a number of components with separate copyright notices +and license terms. Your use of these components is subject to the terms and +conditions of the following licenses. + +Contributions made to the original PDFBox and FontBox projects: + + Copyright (c) 2002-2007, www.pdfbox.org + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of pdfbox; nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. + +Adobe Font Metrics (AFM) for PDF Core 14 Fonts + + This file and the 14 PostScript(R) AFM files it accompanies may be used, + copied, and distributed for any purpose and without charge, with or without + modification, provided that all copyright notices are retained; that the + AFM files are not distributed without this file; that all modifications + to this file or any of the AFM files are prominently noted in the modified + file(s); and that this paragraph is not modified. Adobe Systems has no + responsibility or obligation to support the use of the AFM files. + +CMaps for PDF Fonts (http://opensource.adobe.com/wiki/display/cmap/Downloads) + + Copyright 1990-2009 Adobe Systems Incorporated. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + Neither the name of Adobe Systems Incorporated nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. + +PaDaF PDF/A preflight (http://sourceforge.net/projects/padaf) + + Copyright 2010 Atos Worldline SAS + + Licensed by Atos Worldline SAS under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + Atos Worldline SAS licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +OSXAdapter + + Version: 2.0 + + Disclaimer: IMPORTANT: This Apple software is supplied to you by + Apple Inc. ("Apple") in consideration of your agreement to the + following terms, and your use, installation, modification or + redistribution of this Apple software constitutes acceptance of these + terms. If you do not agree with these terms, please do not use, + install, modify or redistribute this Apple software. + + In consideration of your agreement to abide by the following terms, and + subject to these terms, Apple grants you a personal, non-exclusive + license, under Apple's copyrights in this original Apple software (the + "Apple Software"), to use, reproduce, modify and redistribute the Apple + Software, with or without modifications, in source and/or binary forms; + provided that if you redistribute the Apple Software in its entirety and + without modifications, you must retain this notice and the following + text and disclaimers in all such redistributions of the Apple Software. + Neither the name, trademarks, service marks or logos of Apple Inc. + may be used to endorse or promote products derived from the Apple + Software without specific prior written permission from Apple. Except + as expressly stated in this notice, no other rights or licenses, express + or implied, are granted by Apple herein, including but not limited to + any patent rights that may be infringed by your derivative works or by + other works in which the Apple Software may be incorporated. + + The Apple Software is provided by Apple on an "AS IS" basis. APPLE + MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION + THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS + FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND + OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS. + + IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL + OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, + MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED + AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE), + STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + Copyright (C) 2003-2007 Apple, Inc., All Rights Reserved diff --git a/plugins/ingest-attachment/licenses/pdfbox-io-NOTICE.txt b/plugins/ingest-attachment/licenses/pdfbox-io-NOTICE.txt new file mode 100644 index 0000000000000..3c85708256104 --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-io-NOTICE.txt @@ -0,0 +1,22 @@ +Apache PDFBox +Copyright 2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Based on source code originally developed in the PDFBox and +FontBox projects. + +Copyright (c) 2002-2007, www.pdfbox.org + +Based on source code originally developed in the PaDaF project. +Copyright (c) 2010 Atos Worldline SAS + +Includes the Adobe Glyph List +Copyright 1997, 1998, 2002, 2007, 2010 Adobe Systems Incorporated. + +Includes the Zapf Dingbats Glyph List +Copyright 2002, 2010 Adobe Systems Incorporated. + +Includes OSXAdapter +Copyright (C) 2003-2007 Apple, Inc., All Rights Reserved diff --git a/plugins/ingest-attachment/licenses/tagsoup-1.2.1.jar.sha1 b/plugins/ingest-attachment/licenses/tagsoup-1.2.1.jar.sha1 deleted file mode 100644 index 5d227b11a0fa6..0000000000000 --- a/plugins/ingest-attachment/licenses/tagsoup-1.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5584627487e984c03456266d3f8802eb85a9ce97 diff --git a/plugins/ingest-attachment/licenses/tika-core-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.9.2.jar.sha1 deleted file mode 100644 index 80635a63d29fe..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -796a21391780339e3d4862626339b49df170024e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..01df6be02361e --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-3.2.2.jar.sha1 @@ -0,0 +1 @@ +f1f16ecac7a81e145051f906927ea6b58ce7e914 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.9.2.jar.sha1 deleted file mode 100644 index a4bb6d48c6a08..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a48a287e464b456a85c79f318d7bad7db201518 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..b692ab8befa3b --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-3.2.2.jar.sha1 @@ -0,0 +1 @@ +3ee2907773fe2aaa1013829e00cd62778d6a2ff9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-apple-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-apple-module-2.9.2.jar.sha1 deleted file mode 100644 index dbaee880d1251..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parser-apple-module-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -758dac27c246c51b019562bab7e266d2da6a6e01 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-apple-module-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-apple-module-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..7ef86ac18757b --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-apple-module-3.2.2.jar.sha1 @@ -0,0 +1 @@ +fde21727740a39beead899c9ca6e642f92d86e3a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-html-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-html-module-2.9.2.jar.sha1 deleted file mode 100644 index b4806746301ef..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parser-html-module-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47f6a4c46b92616d14e82cd7ad4d05cb43077b83 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-html-module-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-html-module-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..351a9d6963000 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-html-module-3.2.2.jar.sha1 @@ -0,0 +1 @@ +e6acd314da558703977a681661c215f3ef92dbbd \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-2.9.2.jar.sha1 deleted file mode 100644 index da1ae42bac652..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -235a20823c02c699ce3d57f3d6b9550db05d91a9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..bcc475b3f4c1d --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-3.2.2.jar.sha1 @@ -0,0 +1 @@ +41ff68abccde91ab17d7b181eb7a5fccf16e8b5c \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-2.9.2.jar.sha1 deleted file mode 100644 index 7ceed9e1643b8..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7688a4220d07c32b505230479f957cd495c0bef2 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..a7ac03630fe9c --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-3.2.2.jar.sha1 @@ -0,0 +1 @@ +d4078f950ca55c5235cdfcad744235242f9edc05 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-pdf-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-pdf-module-2.9.2.jar.sha1 deleted file mode 100644 index e780c1b92d525..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parser-pdf-module-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d0f0e3f6eff184040402094f4fabbb3c5c7d09f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-pdf-module-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-pdf-module-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..c9baba749d403 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-pdf-module-3.2.2.jar.sha1 @@ -0,0 +1 @@ +a972d70ef0762b460c048c5e0e8a46c46bb170aa \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-text-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-text-module-2.9.2.jar.sha1 deleted file mode 100644 index 6e56fcffc5f88..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parser-text-module-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3a93e538ba6cb4066aba96d629febf181ec9f92 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-text-module-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-text-module-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..c84219d17252b --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-text-module-3.2.2.jar.sha1 @@ -0,0 +1 @@ +a19be47ecca1a061349dc2d019ab6f2741ff1dee \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-xml-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-xml-module-2.9.2.jar.sha1 deleted file mode 100644 index 27062077b92bf..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parser-xml-module-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ff707716c0c4748ffeb21996aefa8d269b3eab5b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-xml-module-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-xml-module-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..e63b0f71f2d19 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-xml-module-3.2.2.jar.sha1 @@ -0,0 +1 @@ +9dd2f1c52ab2663600e82dae3a8003ce6ede372f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-2.9.2.jar.sha1 deleted file mode 100644 index 396e2655b14db..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -69104107ff85194df5acf682178128771863e442 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..98b09c1785d78 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-3.2.2.jar.sha1 @@ -0,0 +1 @@ +f1dfa02a2c672153013d44501e0c21d5682aa822 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-zip-commons-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-zip-commons-2.9.2.jar.sha1 deleted file mode 100644 index bda62033e4e8c..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parser-zip-commons-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2fcea85a56f93a5c0cb81f3d6dd8673f3d81c598 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-zip-commons-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-zip-commons-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..ac860449a84dd --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-zip-commons-3.2.2.jar.sha1 @@ -0,0 +1 @@ +d46b71ea5697f575c3febfd7343e5d8b2c338bd5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.9.2.jar.sha1 deleted file mode 100644 index bb76974b6344e..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c8408deb51fa617ef4e912b4d161712e695d3a29 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-3.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..f6e9d188908cd --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-3.2.2.jar.sha1 @@ -0,0 +1 @@ +c91fb85f5ee46e2c1f1e3399b04efb9d1ff85485 \ No newline at end of file diff --git a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java index d999d20537485..068f1ae5d6d78 100644 --- a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java @@ -32,6 +32,16 @@ package org.opensearch.ingest.attachment; +import org.apache.fontbox.FontBoxFont; +import org.apache.fontbox.ttf.TTFParser; +import org.apache.fontbox.ttf.TrueTypeFont; +import org.apache.pdfbox.io.RandomAccessReadBuffer; +import org.apache.pdfbox.pdmodel.font.CIDFontMapping; +import org.apache.pdfbox.pdmodel.font.FontMapper; +import org.apache.pdfbox.pdmodel.font.FontMappers; +import org.apache.pdfbox.pdmodel.font.FontMapping; +import org.apache.pdfbox.pdmodel.font.PDCIDSystemInfo; +import org.apache.pdfbox.pdmodel.font.PDFontDescriptor; import org.apache.tika.Tika; import org.apache.tika.exception.TikaException; import org.apache.tika.metadata.Metadata; @@ -47,6 +57,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.io.UncheckedIOException; import java.lang.reflect.ReflectPermission; import java.net.URISyntaxException; @@ -75,6 +86,44 @@ */ final class TikaImpl { + static { + /* + * Stop PDFBox from consulting the OS for fonts at all, use classpath instead with dummy fonts because font + * does not matter for ingestion + */ + FontMappers.set(new FontMapper() { + @Override + public FontMapping getTrueTypeFont(String baseFont, PDFontDescriptor fd) { + try (InputStream in = TikaImpl.class.getResourceAsStream("/fonts/Roboto-Regular.ttf")) { + if (in == null) return new FontMapping<>(null, true); + byte[] bytes = in.readAllBytes(); + TrueTypeFont ttf = new TTFParser().parse(new RandomAccessReadBuffer(bytes)); + return new FontMapping<>(ttf, true); + } catch (IOException e) { + return new FontMapping<>(null, true); + } + } + + @Override + public FontMapping getFontBoxFont(String baseFont, PDFontDescriptor fd) { + try (InputStream in = TikaImpl.class.getResourceAsStream("/fonts/Roboto-Regular.ttf")) { + if (in == null) return new FontMapping<>(null, true); + byte[] bytes = in.readAllBytes(); + TrueTypeFont ttf = new TTFParser().parse(new RandomAccessReadBuffer(bytes)); + return new FontMapping<>(ttf, true); + } catch (IOException e) { + return new FontMapping<>(null, true); + } + } + + @Override + public CIDFontMapping getCIDFont(String baseFont, PDFontDescriptor fd, PDCIDSystemInfo cid) { + // No CID substitutions from the OS either; signal "fallback only". + return new CIDFontMapping(null, null, true); + } + }); + } + /** Exclude some formats */ private static final Set EXCLUDES = new HashSet<>( Arrays.asList( @@ -91,7 +140,7 @@ final class TikaImpl { /** subset of parsers for types we support */ private static final Parser PARSERS[] = new Parser[] { // documents - new org.apache.tika.parser.html.HtmlParser(), + new org.apache.tika.parser.html.JSoupParser(), new org.apache.tika.parser.pdf.PDFParser(), new org.apache.tika.parser.txt.TXTParser(), new org.apache.tika.parser.microsoft.rtf.RTFParser(), diff --git a/plugins/ingest-attachment/src/main/resources/fonts/Roboto-Regular.ttf b/plugins/ingest-attachment/src/main/resources/fonts/Roboto-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..7e3bb2f8ce7ae5b69e9f32c1481a06f16ebcfe71 GIT binary patch literal 146004 zcmb@v2VfL8(?2{ad+uON$H1LAY>MfWC3h031oSW{sOP-O*@UYa+3X zh(|PQ->#F#SC?)Obx$XXINhvM=Z4Rg-K;X~U-`jO6UuosEl$l6d1-Zm@3U3hJ z!)#QOh+V*akGR3H!_EZ9eof?goX9dgZp+e`D`8HtMx;G_fhYUa6b@s6(D@gNDY%Zd=Yu5EUFoLlX&~^;&DO`-s;9~S1orU{8E=C{YV)Q8~+5dvRg!?rv zMwh@ZM%QpLx`~U?ZTbc7U3v!h1ujM=NU1^Mad?6@pus6^3hL#o!i)ObJ$+ zg#xe4s=%$rs==+z>cDNl8pCbIn!|0y+Q99=I=~G_|75H?12m%v@l!2v-6Toq>oxGLTgpThlI zdxi1Bb$B(?I;_U0)}b{_>yYX_tb?n6Y8_PFv<|FZ z);gei0c-y%rnO%s)7rPvr`A4|O>6IprZv8TX^pFpWbIXGogS~~^zuyzbKtsR1s ztnEWgYrBxi*0#k>Ya4s0wY5FT+RAQPTiUx=Tj0KVG1J=2W?GxtMp&B!nbyWZBdm?A zrnO0;Y(^|iPFo<0~F@GXQ5e_O2E{=iUNW^xX!0uL*0QoQT9~MXLRtZX`CrT zLkmEvokNWpY1KH#v@b0f>s8kv50wCYz>nU6Zcm2JJ4NT|bGilIy<|CAE*8v6unMdO z3uB#FPu7>c!zQtXYzf=U9P9ym#!Mc_tMf>HoWIXMdnR9+<_p*&G{`Lq1p z;6|>ZRJ2~v$wfC6eN^-bI6yXUo4+l$Ex*lX3kC-&*=o8tFv_;ow!@YL4ji!^e}x0x zTpY-ijRW!E00RdYIPe@CfFPK^n>W~F^Idk^JOkOd1h@$J67V_TQ^3c7bAb2Fe5QwK zy!`X!UG9bZmM~L$ziiCvz4-3Mw=X_?ap1+S7du~Ud9nUQ0$5v+v=qMZMj3xSfBF0w z(etB#RKQNa=I5K>t_Cay%tfk3fH2@ah;F{PdE(}fo2zf~n=5awx;En4$ZJDk(+#>d z@LK!i>@kxGZeemX1;h=Y_S4#B29Dh`V@Xr!a! zm^dy@h?8_yoQC~z4wlGy^!yL#L;46d-6!H*Wz`8&d_Z4`59w?4-;c#7uu(41Mfyg3 zE=hbr-@>B*4w~(2`d(as&G!TSC@zZ6=(6}md@H^a-@}smLHr0?@QS!Bexf_D|1#)T zafR+d$375O=^;I$$Mi&8qu<2O;yV2 z%tP6Wa-9@Xviz(7E655-Lt3P#^pI;=8CI5+W93-|Wg|g%RZ^BCY$tD471m>QRs&jh zlk}0ka*13j{bUZgS#FVQSZmfsu9B-^U-`>ia+zGty0ES+LKc>RvWP4ytxa^VJrSn9ZN3&JrE(s_DNm~qvGdy=nl!G(KKh_Tm& z`(UoL#)bQ4!~Iw>InEWHgR04fF1%nCZBP)tvYIm-_PVz$%x7I&ruhihYE`gE<7)l z6k}X?J}N3ATzGyeCfqa^Ae+eN3NJ{-`6CxzhzfIeS%DP7UHr$~qbMwNCp{q)0KMeI zOOP-0loKyW-h6-yFGcxyxC<{$#V`+WrU|72ypRholht0VEEU2$!P<%oYx-5ON%rGBXA zztt^^8~^y!>a*{ZlQnsQ9N8*=sczTpmJgAs-{q zHnHIP-zDzvA+~H%_@>w_8r*e$Q>yq3hPfJsTNi} z)ogSaVuw&4txHC^o^@-Ka=PvNk~9C2WTU38bT;Y)tPk=UtNUVa#Pvt6w%2lqN62X8 zue8(%;EEoVwiC6W*6`c8%Fmv&s#lz*Y9!n;NYzC@Jrdzcrs-?0&VZ%D?2s||aG{u1f$xP zYq{44Ubnmpde`xu?S0HAk58men$I`B(l^w1u@A&)sxA1?@Va-9yy6VNR)=e(A@F?o~n`R41CZ*V>{f0_K-@;@zLDPS*fyuhagsbFxy=z>QJepV<) zp|XXV6&hA(R-to+zAW@}p~rzTFg$Q`;Hkm|3U?^Hw@B+Edy3pBTBGRvqQ6)RS<6{t ztaGfptT${GZR>)9gIWcx4oWZ9qS&TlSL~(i?d+@VY4&gIKNY9qfyG-DA5%QF_!q^W z2m1#X4=x)V5j;A0VeppV%fUB;pNE8oObba2`7-2QiDD&2mq;y{tK`IzmrMDS8eQs# z(q&5bDSf8&?a*4GiJ{laR4g;G%x7g@maSHHR@wKo|RTsx==Z|a@)$&EAOs+sY?DTU8>Bj@T5Q>XWOdR=-oDV2uVf`qr3MGk?wcHAmK5RrAAI9<`d*npo?@+9hiDt^IBt|2hNf z9H{f8Zo#@k>YlA9>P6Q(RPSN^()Hh}|8ZEkumufdg9#1(XgH$bca2InifOc}(Tm2> zO{mG%ranzaH9g$)&t@f?HElM$+016^npbH)qxt6MCz}7&qE3s=E#7N!v!$(Nx0XX% zPH8#6<))TDwF+$2rPZQVTU#A!b-Q(e)?Hg~Y~$HxXq%~RmbZDo&C|AiZEbDi+wN%l zW4k)-*0j6czI6M#?c2AX)&6w*uiM{h|5pd!4&6HJ=z zxL^4A@Ee`OI&bWJw@a-qGrN4zHCNY;U5|D>-}S4mPyhLf;1S*tfe~dQnnuJ#OpI6_ zu`l9S#QPDCBLgDqL`Fo$M~;eI5Y;fMPt?q)^-(*b_C=*dor(G=>g%Ws1?H1Cl zeYc6-9Nms|d$-%i-7a*y-0fDk``!MGX3^f!xuXk52S>M#9vOWu`g!;8?lIldy1(qv zzQ>v#cY2oU*}Lban4&TLVm^%Ju`Od`Vu#1hie2A}_bS}0U9Xs4bK?reb&5-kyAq!( zzES+x`04SR;*Z9kkAKm-X7A;_|LoJL&(l6n`#$Jbt>3_Y3;TW2zi9u3{df2OYe3xr zy$8HMkPR$2u;;+J1CI~9J*d>6HiO0wT0ZFE;EIE14o(~Va!Ao3ZHM$9vUGJMg4 z`fGIe(X&S%9sOubnK9kREF5!U%+s-z#?~7#QGCwPyFhw3UAGR>-)FsyglyiPu_m=PQ`cLdT04N&n7jU)OFILNv9_F zn4C5x$CL(BMo!r}<(sLwrgoZ|I`!wNFQ%287C&vyG{>}i(<@Jpo<3{(h3S{4-43-fEw zpFMxk{O1co7K~i5W5Kh9r5A#|3x_W}x=1W)w5Z>rZHo>s`e@PRMGqFs#RV3ZTijrA z_~O2cCoEpDc=O@|i{D@T!{U2O*pfU;N-U|hr0tT}C8L(iTC#S@-X&+2Tv&2z$)8Jo zm)e$ATiRl2^wObArz~B*bm!6|OFvn9b?KvJ#yZq&fTq}yNsJ5cTis%(XS4>&4Y{kwMM^}8Z;>wCgD~*)}Rt{eI z!^(TBXjSf2!K-SlYPqWWs$r|9uUfe(an*@cU#z;m>i5-Ns|&9#zq-NdPOJN@9#psvHh%5cwR6{QT${S~?AmYF-dX!%UCwpI)>U2CVqN!j!`4k*w_;u5x)bZZSa*Hh zAM3r>7hYd}eS`Jk>-(;sxPIaKt?LhO@Yv96!{`mOH>};TXTzBd7dG79@N}c!M%%_p z8=Gv5*f?FH*_&9==|HaFWGwRzCy$(xsM-o80)i`SOITgq)|uqAv;pDp9IBy3r~<^EQ- zHTTw#tu?o{*&4HTs-rII@+qG?vw|i_axIK7#_3bUTM{ggtefsvVx8K_S=MJA8MR!!((P&4P9RqfZ z*^#hg!;X|4=XPA$@$-&5J09(Lx|8qp-Wjm7(9U8zOYf|-v)0arJ6rDTv2(=E89P_) zOxk&3=a)Ob-+684FFPOZ{A-urF59lkyPEEb+%box1zn?oW4LOe~OSODvgKC$U{(*TkNQ zBNHbkE=b&zxIgiH;`fOeNtBc)sYFt(q=rc?le#4JPa2*yJ!xf9a?+Wk3rV+=o+kSx z=S?n>Trs&xa);#Z$-|PTC9g={ot%<7D8h2-1G_xJEUdH0msQ+H33J#F^H>>0Ud z)}DoXw(dE+=i@zB_B`5a?9IP7bZ`B=o%Z(KJ9O{Zy>s_&+`E78`Muxoy}Q@k7qBm6 zU#)#@_r>lTy>HIG#rwAJJF@SSeOLB9c32#R9AzC19N~_Bj){&{jsuPl96vf9q{x&4 zDP>Z^Qo>V)rz}X>l5!~J!;~LW?x(WUys0Hq>!h|zjY}Pqnvl9a^=RrhslTK?-|xS_ z`2HIETkr3=f5iTo`&aMZv;Xw|3;S>He|8|pfzSiZ4n!Rod|>i{r3ZE#IC9{V13w*j zbkK6J(7|#C8ypNj*!SRsgYyq=I=KJf`GemdynE0*l>1PLL$wdJJrsLr)S;P&wjMfk z=)*%l9=d;+AI^KY9HKg ziXE$VtmUyD$3`5Rd2G$GJ;zQTyKpT1*z@E5$BQ4YalG~MnB$|4&pN*5_}=619{=X} zo#QW01e^#tQS(Hr6FpCiJTdFU+7tUuym#W;6W34NKk@9O$I09$ttUfIRz6w#WTTU< zPIfxk?PT1^fhR|voOp8D$+;(&o?Lr!-^q7RUObt8^1;bJPx+q8bE?Ftnx`6^YH_N= zsmN1KHcW@>eHJ~?>T+?^jD`ZoxXMYuQR@93Y-Z& zQ}0a2Gd<1>KQsNz$}@>)4xM@T%r{{zSFc*RV%E$V)2B_HGI`QFZ@)Eh!uWAx$BZ5| za>VdqLx&6=G;l!wetrA&j*sgV8`HB#_vmg>kr7?HbPn&-p?$lyZCbZ#*`j%~rX{WZ z-d<%`ijQ|Ad!ydoWy?^Cw+~RCvSpaV(#YYV10C%`ZFq$k5!9+=? zLk2fd5F4Km=Z=U(7I+HJB8yg??5#RPMcA4o#OS9G68_4aGroq(QOCJ32X7P};bH~&(0SYZwdfY^XuHip?GqB>Q%D3Oo3OwXrV*pj+(^_U((dRLY7eqU^hQ=G zSY8bZk7)$V*Nrh-Gn8OU^QT^bxUTjz=E9;P9JZJ~kkXSZrWIkUhv28JT9DDXdc4gOC@ehADP7yF*GCo82iY z0);wkv%@1&Ipd9D@M5+&;v&*)&O}GT@SZ3xjXT5qb-I)gRS;r0Vx%)BOvfNhBFxhnHrbiFq${;2V*FLi5p1|1 zunsgk|G*_QWSF-R<{9P{=8GK|XbTmTijboW+m{&a_ho)8Fa>#{1_(}LlT*CH0zoZu zat0m6>n1Oz!X{^ifpV2BTSh3-S@F(UHRv1_vEP@Fzjl$Lq53OZrb!BK8*0xI>kbiU zz$PiIZD6MoE_~!rnX39+{FSPr|S!$i|)$ zYYB-7O^9$PAycAd_pb#(6~Qgy>`h}-3Un0ww?I%_7h8;@S4=2Sn}5>;MBCzGRcgwY z!bNZjlfkSW)dQz2z7B7D?}iQ^d&A6_`c&T;V^J|4_J$6Y&&lT|_9nIh{S)Hsy&!X8 z?IZdI_KA!|-i|PPtV7xx2Bt`Ai2hW7p%P6}sBI`}(+X18F0_3&bW2rzTS7u(TS}M= ziH(a@_l<+l(-T}#_Qs7>H+4VJ#FpR)i;asxiYAddaoI8;A)$#q))o(Sf$D-+o$PA! zL6j;bJSrl=H{Kp^2T#Mo5@J#9KwDg7U_xY^<}t_tX;ik1k)@AZ>WHge9~{>Q4mv`w z7<(^gfa;I0hxL6upbt`I53{#a#evwCifZ@?E$vO>kwQUiyhA|lf^6~F5ut>H+G|bm zkBN{)W+EHfRA-*xUt39vx;&p_g*v5+IF^?&xhs zaIS=a1Y6An=&9~du`(pCOAH1A>_BL0(@h-)nin-tv&d{GH98aupG9wiu^}~wW=$hIc6Y7cl z{{B>g=YLliL_xQv!%GvQv{8tsBJ7`w!X^mSKrK}P@VK)YEmc(vFdBV2At5dnHdXf= zs)zc9Qoa|tLds6c=HzO-CmUr8c<<<8y=ym?*k#w!sCMofAH~)Q?z{$>5qz^ zfkJc9oo1Sffv(g})`7psPs_P0T6YYUnR;$)Xe6+v3Sy9|sRCC|@^ST2-`BLhD|=^~ zdc7KH&&*5>^!Cgg)m;knh50K34U`yS^9Q?X>81-oO>m!3D}{N4xDr4j{ZY+Y2?;)S z=a6Do>i-B)7-@5oBB7mM4RE}TrcX%l^m}uR=j-YH^aHB%qm}(KeU*!=J9sy8_%u@X z3mVYjrSwoawC~&Jl(<1Ev^kM&IMhL?maDTm2gw48_h2+00mp}GWu_9k`PN5wX=iq? zhlWSIjY1Ts&Vd>JSfxJ)=?PL0s92|E&VimWG&Is_a=oo&W2){_Q$uaG{;;zeF<4s| zD1qCc4UpP1L@T)j7(D%BF*HLDDZLX}0Jc;|We>yVxBJ_$`?-#DB4l?;TqleHGB~16 zV9iL(B+|@Z3M+jD)^H5+fbayH%|92S5^MpOS~zB?t|(n`b{&RM+!EqSrD`xEG$Fy6 zPFdl;JfT%5FhR}HylVt{gQms9TzuR9H=w zvdW#3S?*Yrt0+it7o0|Y-&m{?Io3_N`d?QyjW8P9orK5`SJ?^C2~n6J2BlF^RlJKH zb)TcKQsbJSrHUXTF%NIoTz^dMu>){_+KfGKbL6iyR!*WJ#*frrKBYda5B1|GXeR%V zN{A6uSSCwSYS^n%82iWCP;R6j2}lC;0Q3Y@1*`$g0JH@R2gI`nG=rs3DVd9! z%XnHP%hN3E>uTjuo(3B=sEB+{yN#+e&IqI3avoq1?nlxD;~?$kW$CaSKvfL`;q7R* zMWsc!F@?q&zS4Mmgoj)RkLlsc1o^;Co}A zPMBCnl8>jFGKvPvcW9ohK;7ju)S(`A=O0jAgvH8!G@m`91?-M_Qs&V3IuA*6s%7U;HI#m2grLlJ{AN)vukiys#a=_1H45w+p=SsY@ zl{={$>eF9Vp`0v(mdSMLCL*b~r3FQcWuW;C^_D|ulBFfWlV~CK?N5`;>kEpT71oNS6LXnDp@+j?; z@6tTWyVOpm&^vMoHA7y}avd#~A5c$YE%paJqjho=tp)GxyfoR3xwHjomUwKY{vM4f z7UgX=W>F7*)Vyv?qCRpdMH`F2&&Sw{D9r=12E~c9lqeR^0!6plG(m0wG%_z)no}zaQK-?ICVDQRN}k)Xr*0b6GY*i$ z@S&|1NtHbsQ6S{8y`sxVqzZ;7wXu|?aN`o~GbYeVk6BoDI!z8s0OhjWMH+*)BEMxG zCFn3@H%bnsUf|;f5e>dypmLTV$|wCP0$*ILmz@BcsEl!+Hb{I~q{77aRMPki^*f3C zDC%b!PwOokQJ+7ksHGaxMF84Tyk$20c<`g2c^mhAE$G{p8+c}p`9kI6;YI5#WoU}* zYX0C+hSH3`(3iial90EY9=`OE#g|T4zM`eZ&$QCZpu-+tLNQ)926; z2WW|BMezFxEdk$#gQr8l=UTEfmG&r0RgCUrHICCtxq|u`!{863>G0=RDo}lS(!8wm zj-s{3OR6C6gZKMUCgcU}w_9eQ45gRq(FWBIy^}+th;zkhUx;L$~aFqR+ z%7`EuCiCEnMZBwb`y93p#~yFB;~Dyvcm?2F07}d$|qI{aNK4xQT+I<70^Lwda32fjy*fKzk8O6A-7KnFw&|u>awC34LklLRQOS+UYq4 zZF?7ZDC7g<5Bir?+9?$JD#&O~-Hmo=*L3K?�egE!tu*v~K}gWeK5h%eT<6pQ9}X zpua7m0meeK63uq{FLRa~t&-p_h{Go@P zuo`|G>e5_>QVto8ewr8Y{jj(7Z5l00KwsIYG2|yf9tZv1F_s*s79QM)pJ#-B6e@*z)tPCI#2a1@)mQbp8OooJ;0vcz7%WZ2aS(u z494OT@-2$>Xh;o)Cc~{|Ir+36f+64KU2K}59e6I(+(_5B9zu8XH*%IjSZKxlMb_ttZ@K9#G@18n@Xq^DjMqqaFs)IM^t)waro)a=VneL0@;18zDan z&}Qp^!}dXZtQ>~EJ)XMCZ>S$+iCJoq7rq5_%UA^^XWDjzEok(hUfQy;-owD(=gR>GX9wnjnNm#WT+o`3O`{j1vI zAAQ)s=(F>|uS2NETKJ#BPlcZW|4Z;QKYL_8g`WufG|wsA#bH~GXXj&dpT}g>Gf9t zx*p{&1tbDK1T6+&mcUq`$Ap74O^pw3(DW%9U#C-zEpAX_Ms`qqaYObxWWz0Lyi#>i zpvEy5=zjCSoM$O$uHlQ#O~!KbsZq%sWArhf^Ht_r9gjJl?jL9;C3}$J!%Ai?ug(3G zjHq@sFwawZL$|TA`<1N^`%K&Tu;caoMfGhpPr}?x={J;hL)rMc&%@U?zUq^hUnm(> zW$HZD+!N!2Dqq?0ne?f2NDul_U!hCc{JscRdI0n(+O&RBeO}q`iVoL&(=|6Nt@MFV zwm$j?`r!hlm-L(yw!TW^mgU!dw`?;tS66d*8|Ly#_rs=BGs&q#{)YKb`^<}DI)4YwZCJjEPJ@xuf28Z{Tu?eUr(boU{*UemTP=6(|3 zel9a_HD6OSxP3JjRl3AIhj9Ct6RCNN;=AI*-+auaz}tRm4yETgn7_O0`)1$#@Xg_G z(C4q!&;6Xce*f&7_hdaY?VsWm@6?zKe;e9*=Ii=1+db@W^WALsP9C{=<(_N*oBz7r zTm(Iexw(6e{!hMJw*KaCh7N~Z>p7>3|EkX@`E}2i)VO3A<^=_a6C&Dt0si^OlIGWL zc~bpCV0}dQ)j9LyhHo$KI|TYjQ3xT(|W&{xzVth$9S-LTAnwr$i8N0 z`NI4|$H9J6`d;f$t>0nO!@gIxex}}5a{#52UGoJ!_5j!8gtGIYztvnog}cWBgxNFm zS9Yu_12(+QSIrsJc=b7amA4v;RQ{NYXxm%Gt1y(IZE|J%E1O@<8`3bxg3YL(N4aV& zaDz_o>RXw81mm768~p-SBqmMXELmVG+qh}#a5gM%{D?tpSlzv4M@wwrSVeMoZc@=whRoj_vTlcDeA5tq_YZnoYdCQqy5h0((O& zIC};gEpUQKPV%Q*6hOHt59LLg{Pk~LvCbJ#}>hW*2 z$e!NgU(!qDt!j?dh0HSka|wZQl}5&~wsCQThq0#dgZlSnVevzT4raCbjEIe66%Y{1 zN^7^McJmDyGI}uc$GJy?n74MNa_OaZ|3u^ndNdRsWeij9t&xM{hSAlLBP&#-OK>aE z1-O;zGq_dg1GrV`UAWcgINa)VXyoW#Bgrvx^steXI7;QPP1nWAg(4a+s@(wX=F+ae zc5`Z1ood6tRpsKFcDPC&nTsd6HC|Y|7VUay*U+w~c9o=Y@*=FqK-QG+#PRJaLWGtv zNEq@A)UFR)buNnHzADR_xA2vTg0>XqqSq7tOtv8Nj^j&6Z?>E!5Wdr4E7(f5imhgA*xGC$gnq{G=c=uytP z6vQ)%cZ&1qdu$oQm-6h2`tBb&)JL7&c{8%GDY#?ki|#OvuUYcQWVwgWmAm9_oYIiQ zC(FIypgPweANZFCeYFTm%!`#94k`+%zKeU%z~gy;;(Rb4OrCrwA4gs|dt}n?uG$(jbR^$fGy%n1ru!2djKRKX-CUQ|tX|E__rrDo2CPv7oen7B!kS(1Sei{j=gX zD4UG-oXh88+}#DLa5j&tmGI>>PVJh_=WulflfXA>t@!k8u|@HvaZeu0V|Xtf$EWbA zd>Wt5XYiSP7E<|A2`WjURGF$$Q+ykR^>BRW7EYbBU0QW z6Enj&dV$3#X4sA5Mz9fLlt7GyU*zBL+dN$i5F;~FA{=>xGr}&4XO(WLMAa;0{BC3z zmyBJ;3S*uTWrQK8Jn|0YY@3`a`^he{kn|My#95IlCW|p*kch&m4MlO1gExPGvd-dk ziGKKUs}3*5{n=0K6PC(mvoUNC&ZUZAZCP=8N?+j2t+()97S19wCWxEZihH8tyJR5$+yXm9MA7hE7W6zr#(Gz2GMCGjMldEnB5T z-SGC$jWx@}nrC87HKyeO>!m0|G|(a0d9er~iC8T|?&3GNyW~8$d-&&YcSADrz^3pN z_jSzADn>liG0t3n0-h+Zs&KS;8JuiqL%+o{@)W|7MFkx{8Ez8)8163EmMZs8;O+)M zQt|dfS1-8B!&?p&7NWw0O{c}V4d@mAI4$7>?vtgG?>z#&5-AGl6soON-`M>c zCC5~Hyz^C*_JfB>lNdFuChY^-6yTDC^rK;#x z9M~m0!rjA9!gX`n(H;VaYrVZlbIMHwo*=D&Jag_h7v% zE8puXM1<*(?6R{EA&GK6+$8Y??k>DBP|qHKn^}9DN}*z|sTlD<$2fU-8F-?+qQY@* zR27VjC8?ORmGRaOw2AUMg&F~pxC8DktS+hiHJ9X%ke0;Ex+_h&N0icacejYD{~i&n zTf_={H#Cu3ijLzx8LPo6MW9ZhMt0q+@GebZuhVo?J%M+;it4^_lXw!`U9vaaJvb%=r#j4{)5=$3R8QI?xQdZV zu3wjU)uzc@_2xvpk5`oLfV)e^!QI1m!gbbD$^ISyDB$ST$W3wi_1u&sBx7!-a_a;) ziEn|s3$t96+g7;lTD+%1@TSQvf2xl6O5A3 zChnMi6i;=KpR$mZUFN>f!*m)of~tH3=tfN_nV3Emkm1e(3fsVKi%at7*dJiv6rNwO zl5khtB`)Kk4^%o;mP*@}KS%ss9qKBd{YZEDJl=-4=B;r4VM7eCCVR8U98=UKpo0 z7UfoM!}*QHaC&2L9?Wy#Ogw*{iwE%ByfZJsOY%~@w2>R9I+n#3*X4OdUYRcztwkHr zR_Wm?ox+8DgfGC1#5` zI7x0UPEwnXv(y%fMMe>^80X3@!?|k9#R{xzCNF->|;$2mMKZ!H#-H&*=rd#JP14WyW!`s6I0ar$tp@RdDiC4OWZQVGUVh z)|9nmZCDgviBq+*o#%zG>yPsj{3JicPxCYUUH%^Iw{uzNdu5&N#lPTR@e8o;zT=np zkNhWomH*6dV0M_!@9+$M54PVU{)GR||HPc}InMsVK~};L9ylA&2j_m}#F<}taMD)+ zQAiYqwP?favbYF=!diP&g~NK z;@r@);+!}y-WNZKE8?oSCVm#z#SL*&+!D9lr)P-_@vFEm9*W1}H|Z^XapqNFX_uvN z9^q=aMsAQ>U?<}=Q{AIIp%b6dQ+!c0kKJW=*e@)d-DbDgO?HD_XFva+`ZVVPrx&14 z^To~{t$H)JID0Yru(S7O?~QpfdofO4{fFNBrXE`XeYO&MZB<^4*WlGfOVR58+<*Uv z-Yeeuf9}8Q-_V0~pH^05+4?XqsC%)Y^q}tPO8@yn1ERk} z3+8&Ye`ogYtiJseW0p6@s_g4=fv`l1U~CJ~bI(#3t3oldl*I^D9wSploK{g8V_j8@ zb=9c`&ibf@F|Upu^VG>6Q!t`Ur+KuH7UNtyoK=Ujkq_Y1<3n_q(g2u}i zTl$W^Cw-C}T_&6xM_1_@{Y=;C2Hm7vbeq!Y7o6=@8E3gw$ElRHaV}*e)`Ydd`M<3( zql#oH>@YjdPT_>z6*x6-P1f0dd=uZyxA3ie8{f`%@SS`Y-<|!ezpQiq_?P@^%oM-n z-}4`Es^=AcjbGm~Z-{rsJ1l5Q9G5?MK!T;jV_zV6LC;M?Bg++J@Z=CCw184f> z#z}tpML`iLiU_L+!pt%l=lPYwd46R@c~MbR7F9)cQB%|wbwzyyG5e&Tt9WHpE}bIv&{YKG(T}jpX2udP8M~Y%ZC$2KNnx%j7^-zCobR= zzE@7*!|8kX!~^k2Jds|~2WQ?DlO<(2oOZWfZpIwBxbF8@_l2I`&S?%8qc~;b6~fcWRY4D~^*9Hw-7Y*BjV-#1N{c-3r<*sa+eAs&x~U ztMZ*%NadjJun?_7j4t^qJP-a6Cp#-!6gmpp-WzTWUDjEOfsb`Sb2-wJIUCSLVXywF zaQ&y&$SmZ8Ia&bDz)oe;*$g(5&BEM#4ohGQ*h2BU_ycFxaip-|Vo2))*B_U%^KyZ% z09+i>oJvxi`G@s2BHT2WE+SGBg18>fS(0<)X%fhWoa#HuEn4wwO$ z1wg$)4+*TsqkKZ?2%y}i4JCn>`CBVqdPS?8a~<+|i|m-)=s$gmx)XKY|EfImqBF=` zW8O4>apDNQ;|jfF{$~D~?MbyB`L{pwhC9f-?#8nDYHb+5JfIx&H{~iH*Aw$Tu!n%p z%-QA^9exaV*mtM@e9g1wZroos-*<)l2hV(Hp7;;Ze~U78?UDO^HwOIHE6LRAz5X*F zC@(9WGxW`_`Ph8)=9vFJ6m=Q*_tZ`*UJZA*jJv(uvF>p9{XhBU-PfOdHKpk~?e*2r z|3B{B<(l`v*^Jk7%;MqSM%@(94Ws&}L*7FpryOb*Mk2 z_#r}mMYzM&cOVd^z&JP8Uqhqdg`?Z*R5NxygByu zP}P%FYrxHtyZ}YN^AB|V%b$4*J^gaz{pr_x>ML=N z%?sv1^DFe08|GKe81q2(mNl&^)chg)Q<>=w{yp?xL(qc%%b)q7J9YM!Hq+gq*?sin zF;3diqxGM89&gDr|6U$PO|p$QW(^gk`UP~un9O7<>~8kE{|!fb=rZ8n&C18!`?B(K z^IP@5fBO4>)5(9YhjTo2_v?SRKirb=y6u3R{|DP4tAAu+|5W2QhI6cfC_7SFX3CCK z>x3M0s47^WtAVvaf2^l8!<*F>xC&u4r8QOvJ79&dFjfe=z|M=p6$Il6oV|G*?Uz$`D6=^GGNtJ0EW=d6QJ7!DOX$NLZHEAbi zO|@wkW=>6LH|F=9u=}Zp=tak|+BSgBV3lnsPBot_rsGue*&=~H!dlr<`V=#udpObj z0oL_D$4uxEeIcH}7XA{p_)EGdYhv~J5`L8+jtSXYj=)c@Opp^;fP7oN&G4n2oW%0r z_Zk+ly!fqxLo86H$uwq@N98dVBu~f_%r4K!_gHaxPM%{WSHW+rN$kyp!)W+0eYSj zz<$7Q^5n#N@UK(=>%#Z&CER^>p8~N`{1Ce!l$007%JE~In*AI54a=8G;ti}Zn*@1> z#9OiM+zPmocppf74E7}8eL7q<56cT#--K!VX1*CANgTTdF#q3=y$T2LMkohX1&?9n z`#AaoM}Ihn-3#Z@D?G3Q_!(B*Kj&ZI9sQU5OXTns{|YI;=6IWe6~PO@FY=4Pzv15i z|CWD?oWE0Vx3HS*=ddxPJAdyC(K zo6ghW-a&8VScABW^cg$@_^B}GZ#s&D7keLJ`A+qre$_QdLD1+wTK z?um8IO2Ab=_r!{472vA3dt&Xg8gSL;J+TT}1Gwt>o>&j91zh$2Tr8^`ymJ)EylrBdc?pA{9=4fs7dK|(>fmnNAkEfJ=$))v6 zF0Eg3LBBi!Rlng~wXdG{`{{YVpLi~wqZM9=7g!H?3GE}HeQJ`BweUVu%G!9tBxD`D z=ajOptP8vzc2`PSU)Bd6Cc}W^_)p*sWkcYNWFz2>Wn`WmDkIWHaE+@wK0n zEo2MeEwQ6q%2u)!@Yb?5@HVmy@V40HEAhKD?SQwJ*nuHCsyDVWT)o4^9)~V)BV+{J zDD3y;GFnE%?ScKjBhNN93qDRAByj{7=8_A81Uh8IPej21n`mAWx(VpISToY#_j_q$Ee*3axC^E zFgZ@`U%;>Ej0X-)jr0>Gc7MpX)LsUqwegz-@8Au-Md@zfljUUKQzhP*$?0-B+?jGF z+}UzA+yt2bcdnd^-2n6CJnXxeFXsbaAQu2%C>H`(d)0mQnoVB0U+%~L$^#Oo>&t`k zAUJkd;@vmKgQK9~m^=o0PGMhxMV`f1PF$W>Z@T3d@(VommHZ0s*Yazm{7!yH{_=bI zJ-$G~sDN*}f0RFh+RO4X6_h{8pHSizymu}ruga^yui@QvE{qx1u`}xi_DST2X3cRjfWdi9pU4ygiLLya8% zdgSo<+sIK=j~ss4Mh<~DKus||HNzEvF{B07-<0LzsYek{J&IWLD3VuOF}d}~;iE^6 zVtVB8ag7|xqRFc*8k-&=g7nznt49T2t?LVE{q6;e<|#D(GuSmnTv}e)HC|fFduzKU zhqh(>wSMPXzvsZJ?{?_o9eBI#)bbu$%lm09FQDa*<98lTK-U{u*L!MR@6_@>TFY~- z<%QPrTx)ruwLI5aUT7_!Lu>f}t>trQEgzt@d=9PU1GJXUp|yMftg36EM`?KjE6dk` zD_w73jrk^UrST1{Hs1!W^uB?0=U;#;?Qg&qdyHB<;ZHDD{RX>?X}gSRyNqeOjA^?J zKgt2U@1gCo0@^OisqM0y+Ahnf?XsM(%dmG#j}4w!x6T1vjS!w#!S)BPewoHoj}R6; zHdyqiV9|D5Zf(cq)^=QOt?T`@uJ_lv-e2o_53S`rw0`%{n%zU|bPuh~J+kR>Kdr}o zv>x~Suk<+H>p+LL7OgS1DQ)hp^|+VT-QGgkfL^cxCu7(56j*}!vuav{wb1&SYke)WzUEqA3$3rY*4IMoYp(UR(E6HdeJ!-U=2~A1*#SEooLbwEov`yk zYTa$f&e#hfwFWn2SJ@S~(&L82kIMm9+T4)cWH;bSryH_6_L@tr*$w>mTujV25YuQ@EdTI^JwT2b)kURu>V0R-{X;_Q0yP+}9;Ct6V{DRuM*oXSQd>^qN z$Pb`HKa?L*0cC^3g$=I0@xbvTlT8=e}`3?4x>*YzvCBUkGzu&QMh+=cU7CTjZfGg}j{E9!nDS_31g`)z1_jFBxdLblT*WG9S@5f~G@VJz&4(GTx% z)VPOHPrZeDgq<2wN7<2<; z&n>kx7GupF{v_M`Oy_Qefb73c=GwL3e4DA>VAjighxw+r?O8ixobS_zX6|uu?rvEM zeXQS2LjUTwZjg)@(6F_kP3uE*HiDLH294MX+OHinT}NoRaA>wJ&}b3RWKqyy(a>By zps`|LiS~g7+7FiJ09c%ZU}+A4g*gnCq~O*X)OOIizOr&?E0*-u?kJ z#>dbUUqCB-4V~~Uw7_N9U{_#!U4z9X*nRUi_Q1R+)?!tO0mGg&xQE$qSOHb{4{`M9 zujWmp!+s|7F24W$Mf5dqily+^nh(V~;Onsps8Zc?<@OM%9wF5|q3}Z21Xi@WnOA5Hxck8T6gH&=>yJKwD6FS; zHy^X<<|8%(FbgmnFvonvKQ|xqFU-fH9^f~??`VfV08h49HUkf1I_|aXY3>ak08(f;6@&5fcZr;te7_kv;edMbOywNQ_JA5Hq-I8+aDbB z$J=gyyzTbK+iri9(ovgq)CTr8;0_=I za1ZbR@W@OTzhhnF55QB{1s%k>h27c+nQn z4$vOZ0nic93D6JFA20v_85NLG0T~4qmqEp4P;nVlTm}^vLB$PFaYJ51+3Gu%9H`w! z@TZ%(4O%4}9D4}vKLmds>$W(9yIZ(Rr=956`OvTPQ80F&lzaIn=F`~qYyJf3y$k8R z3+es;*n97QD6ahvbk5A|E_JC3f&v1{Djk+ymkt6-u?q+&U;)9kw}`z)jlH+Dt0@{g zdSgk9i5fLAxkil|6HB}j6H_#ava|1V&h8=_bFcUJ{(KKTJF~Mhr+m-1f4}EUGqATA z^xX^$ZU!80bNw(+z#y&^WjU^`L0N}uy9~X-A6&p6T)-b(z#m+Anc*m(kFwU#n_q{r z9%Tc{MwCq`n^E=}u3^>fHGV(J0hA_`gD8hk{t2vkALTI05tO4SCr~~{`2^)8$|;o7 z*a_rQlrt!2QOi|O^h}47UOV?$1wrNL>!ZFOvX`*V+xL` zIHuv4j$;OnnK)+Qn2lo&j@@v~#W4@Zd>p&u*aOD`9E%Li;0n#)3eDgO&EN{n;0n#) z3eDgO&EVC|;ML9G)y?45&EVC|;ML9G)y?1-&EOc#;26!|+s)t_&EiXjyJ92Cbd;GW zvr*=v%tu)WZn6kv3CbR{k2wdIX$HS<7LVZf<2YhoVZ}H@Zry?0x8y*`-hyNC%HxJ4wkWiGh;TQd2ZehdhsMSk31q8k8Rsid|Ul&eSh=aj^?eHqseXWX`2bq_PY5T z9O$V(6V%5)b4HRdp3VF9;Jy=r6JvrYe+)QmV#b0 zXbd`hrkn3GBpJ^PM^PX0V9XI~%<4tdhYW+Ze=L@cD7!IkjyOC0L#!P!c;qF}@1|e; zke{*v87%JstLzOsaDAEK0CHN8(Sq|Gn5*~b8T~`N{2rqyj?O*4-vTBW$3efdYxH{C zzyE{>ZSOOlKMR$=-tjlB+<4BNt(kSt`|ZW9{TKS6kx6PDuWZfxeIHr=A3w2kVcjAN z{HeUx|CaT72)Xaakn_%LMe!C^JKRNv^snL{;3J48;opDG)^tMt@or$y9{iJI{lOt{ zocHn120c55zS4JKi`;4alf6)0Cyh2bU=(uB7WwQ7c%zM(o?C=1h*iCg-NY2g=6wSm z#BBoBD`7qP+t4F-3A=E8udo-__X+zDFFPO{z1%r!%`xyUJ%w>6gM8=xxQD{X2pt83ixq>51%rqcaN!oBEOh)bHhxPsav5;rSNQb)0EW<&6tTkjbPsc5{204E@2UN)QLgeU4cc^lk+6Y-I(Kk2#`F?(G@Nqvh^(=1IJErXUQx; zkLG%B16J&4lihs)7{IWcj)*%2nLP_SwFP$o*?5N`y*~h-ISy;?l;Jw}Dfs3YaJRFN z?zT)jH*4hwp!pw0{?QS`OWcQs8t#N)4)?L43QujsQ^WAoC_GgLE#NTd_&D^`k5Lbg zRzAW#bj{lVA0 zIqdhqAxE1-OWy<2=SjGTOh7TCbqiA6P$5pm(3(+DVjC&{IxBdjM9T zh0AE+GFrHd7A~WO%jnyEwDJ>Lxq?=HLMuO^mCI=5GFrI|uM@?zc4*@Y+W6Kqr?=p6^!X6;_z~JUfiZr9drqR9g64DDa20Ky#r%GW`TZL0ToDezoAN#^vm=HJXzyFJ zcNLJofHslwfT!+(&r-~zGx;&bfLX-#xeM@~cS;J^)H+GJK9-D3yHz8rh5A_G9G7(Nk#3z=LP+vEl0U zN4Uw-6#P-h`laIIBB$4#ee6qu$vOqz%v6jCL=FQvCojn`m@J8TSr^cRy}Os}fx@AQvm&l>I< zvrr{iGx)|V92^06^Pk}p!#n14dPHXB`&5TE{z-K>IvVETIx@lW`8vuWdWYc^b_c^K zc9qT}FpFLN+Yf3X{q2wR`5R2H1pfT>?@31KUw@-l3=_az{)1*~xb!#A-D6MRZvC~@ z`ZQkwzrr6lz;pfpQ&Z+YZJVw}fp^S=&w#xn)~!G?(6uSgvjz?08Ld2H@fe$VetS=M z0UQZ4hQe$dhU2C=Hs1%^7Pc<_J=F$g@VPKHKlP%==VhNi|5?~h8z>i|s{jnU4St3u zaQb%0`ag~D%#t#^ggYivXhB}W-eM~OuwKAiAH|IS%OCQ@skLXM056IKPjbTH&BPz& zLFSxhXT18jcA3Cz|#wc-qvdt(h3Y<{{DW3$Poe5$iX z?0sHFyU`AO>gH#@{J(kDSQWGo{5?&36nFm5f42YUC;#R_M)xLp^S{&4-&ESVGVRIl zG_(EhN8Tzg|HADJcb??)8*4L31UpJ~)Tdy&$~+#_g8F@lKU6;6?d7t!dA>ayN8E@z3C1eg zP^_bkXkAI0hL!O>NpGy8Eg_|(>?s@F&A2myKv$fe7CsfuJhR@^xZ3ox@U?ISYfb4& z^rzRM-!nya4I&X`q8w{bZLkVeDcXtlqJ!utI*HDrN^}w3u%A)~(L?kUy+l7zEe41| zSf?5yhKgZgxTq1AiYvucSh2nyD_1vR?W!P?VRbWIlero2kV7}3S#Tod9I%#X{_G4*bbh)E0Rgln=+QXACzk#r%7S5Yj)1=mS1gDH zV#&4yS>D8zxZ=DWX~#KXR@|^>v^#N!-=RHekLw=91D~G66E%1dFPwW5Z+!X?A6y9_ z$O6X~Vlyys#=d78%kB$SNl!i6r4Og&M`@DMJnAqylwT zlWIIQj0{67!^v=5sVDWg(m)ze4_zB;%`*B#mhEl9^7=%U?QOxby)Bqm!411{C}FeN z@yJoejvUzi3j27taCX?2Lyb>=y#K@oJ97l!*FZjy?t_b6Ti51bGL)*V7VCOJk=hyhWB3wbX*j3>wd=b}#Yn%$$ik*G1cfxge7`_$0 zMb`3n!gn~oA>6?An*wt4u%E&$_-k$p$i~LL3g6?`JHj1gmmz-z=l6trCSQ!mGRH(r zICe-8@ef%>GEv640_)}QODIGIu3O+e1_G$S8lN^IRy2beY>}s=6qUHgUc{!$(tjGt2sBrEgy5QVhbjMSm6?}T)Z547*j2G_r#V)0IE2xOw96&*Ss9!Cr z;r-|$cENd95$jGwe-S&kfwls0Js3OQD?wo)Xg5?0#d(+*hVyVS9OoKQgYyV60$Hb# zVkCU|(PA`ej)Av>2Q@B*M`^jZ9KWs*SK!xG;wt>QMcjh1(ACY>pidd+4jQ$=9^!T~ z%q(^xu;*;B6M+MEj&_zgbfQjyW54LBaP-YFg0at8df!E_+P3I1n=PfB}sjOy$e)wwXLdF3I@Rd z44`-{XBh0lFqmgp>%_2D#jw_uVJ*+FR$y2wGOV>@SnJ5J){bGVBlx=;>ZG_UXSge8 zxGON+6&b!dFnqOV*l7>$9|Z3V#ZfDUqddb=D~6*y!%-`SqefhGhICkod_IbW){qiw zvDf%|aXoyW8^jH`z6ozzkTL9Zh2#)81;b7^hMn#VFWZB^Ia3Rpo26Q=ny#9x8mk(j zN>$l7ZFHRLkZM2LzQ`^{*-hEbcBOrheUWV*{j@K#skFXnUu1pII>G9M)oYfIEZ18u zwfN2AM~mYY-4rnjANfsre*p2C!bdh&RwlC%FUYQ;XA_a1RB3Ts;UiqMT&iqm@gw!3 z3R~@q=sB%dSZ=x=E?i{a@vCi~xE?DkjNg|d7t4a5sBFjg<-3a$6+Wo%N4~q|Qn3=> zQ9FJO6f4a&itG8lwD8^OKi1-Mnx(1mL9LbM!cV0A;eD(R0@JpF(-LkLU}D21oAcBl zMXA*R! zzUd3_O<#a-`U3pW7vP7!06+8vL>y=g{729X^|{eri=Y*E!*3)4Jxxa`#P>2B8{oIS ziDwxk#c;mKpBowhieU3b@mM+7&lOELr!9MU}%?ApR|F5wGlg@c%UFE zg}J`gtj&dWe&<*lzE7;(wL4CxnOpP>p0@*j}n0MK>QYr z^9aK|;Tmw^8)Tb(3v9Z9<4weTZsT|l$A|Fs|BmtqF&=`fGuYAKLOa2Qc0&8M7M(Fp z7Zi7V?||ZgqBdLrN7@OFbOD;OH8f;vXvWsyPCI4jkIcq!PloiZb=eT?`%MtmP5y^oPJV+76Uc{6%_AH8lyukWMB_tC@q=-qwv zj`rw2dUPK>x{n?;qeu7Aqh|EtK6=oM9yFtMx;vj7bZZTIoQX0EWe&<*lzGT6hC~9D zJph%_EN04w)X<f>g|k4!Cv?dK{8qfl(*X&mTEgjLVqM`w+Bjq~ZtQ36vL6E=kAxm;7GP<^#|s9ru&G zUcd)*`T%tL0Cf5Qbou~!`#*r=P~4_m=RVGVM)?(m?m6@T##4heA2n_c)>-B9CI` zV7@r-j1rBq3hi2O=P}C{fKjn18Ne3g-HEoyPhN`S7Vt|C-0>avC3J@iI9`Hp>N3ue z*~ndkSN|T4zu-v`MFUwG3#v@RF$2c}91C&16vt|mdK6@-37;W{{R*OMG&|BB#Si+HTrb73 z8l{%CdJC=IHMM#Jt=_`;Xv??I@~vlDzJW2{1lHe1wCW+UU@2zA8O}m7eh;bm4W!}c zkcK~?=FNcj=QO4a53Dr`auqq`8a?%-8`C7tk}N#^H#Gpys0MfxbafPTbrf`U6n$+5 zjvWPU9R+P26=jA`(9dQ-s2LDy79;T1&*f+<4mPbFAaxr$v=WfIjUDftVB0F;eNw`@ zwMLI_p-0&54p@E<-lku0|3f_aCnDW+Y$EhDYQegJLu)uCpm`hP{T`kjB_MhmJ--F& zyMx}}0tMay3~s`&qXaj%gYQNOc-#g&Zo_M%1WaxNCf~zfqXd_?gWYKjkBJ@T`Zhcy zN{r$*Ab%Sk2_-xccJM&h!P}sOMP@C+LWY;X8Y8;}s=WgoxQ!9s0_EPpjNC@gFChbg z9-E=J-9v=wCrEZZW`savQ$U~VfLRKLYz+nG#o)UZr5m)md}y!(4evvD`Ubj_1J{vj z56V9eUiBp?^#YEU0h_BhM^-*y^)q1c8!+oPz~X0M)^C8r&%mq)z^dOc-d`}@Uw~Z? zFy3E)(+@CPzhaC(1G9d^7=ObU?*qGj19ts}S^E{U_A6$MQq<2F%Y8`eOBe?-1Thm| z0oq@opI-p77tmL#aehxKA)f{_$$Wun(D9gOd-VA__XXzdOPpW8In}DJ;{03CBs5yg zuM?BRe`4fR`dVPVm}d}2H(YCvqX&u?iZ@CuuBG6Zfn%28Tg>xy%=5SC&2{wTTgu)jF*8!hzxJ-u86q_$&rmlfgev464ET*#MN6gss zWy@zwwwUD##cs1qaRW@LT+yJ%5tvOXUyPW3wpGTY;kR`BX2g1m@pt|rZ%T1b8Sbe> zeS;y7j2I6O3v%ljmP0O><^KZ~on-KOKCHg^ zTUc3QT<(x49l^QHvN+ao7w?*2`OZPg8)68$!9r^ zN)+?hLvS93qCqi^`!JK&g{|E6JmSq4P%eXh+HzVWMx6W`j5znVqokbKh!OXJ5hslp zAyMhy79*&2aFS%;dRq?u?5Im2d&_X&GqT}tIr|mq0eHpzRtViPoNR?qTdq$v8Or%> z4etPJ?gML10&A!xa3b8_yu)R16nc!?nILH7c2DFLh2rz^8Gy&&HC{)IzD8bOEL-2bj zj$t^45cY;bCKX!bWzkTU@bi-Tpw!E)kY zIdQO@@B`}Rpc`o&+SB^_jc}Hp- zd>y<|oN!KvFL6LvN3hh7Z6C!Sk8;(I<>%{n#PNmfL$)(2>QRAgCo8j~qGTnH7Rp8& z3oOwspyNL%ouH(opn#+#)HhFb;yZynWazQYZtM<^*G6d0grg#r;Oh|S%|3~X zXFXZxSuf}2O6y3;EA(^Fe0XTbftJ2#Gh$z@12#dnRVq0vOH1qmrQ;lF&m3YyG_-^} zXhOp(FnDhi;vI<%og%#vf^NS=x{0WChs8Lod<-=>_c3fw6ATK3Sqt>@=L4!E456Z< zd_9A2A{V9YAS&YGECTF(Mdy&nNO_2@j__Vy&hGAlvkc9i(S$f?9AjhIiKA;QJ;%7{ zJfM&WKRuJT{&|FDUF&u z^ z;YXPv`v`W2FV-()ak(5In-iWZi%;(w?yBr4lj*{3DP+Tshua?y*Erx`XjrjvFfJ}C zWAFX^qUmU8?;|5ITsl1R0{1u(H5r;cyQFMZcGj%Y(mB}+e6ypXGJSlrq9U_=`M4)P z&CX#zW#Ol2{B%YiJB0sO%*`*JTU0c+IKPk{Hs%KxX*5N_`60y`O>s*>_ab_=$avV? zJs4MuLb`|0dxZSQi&8}$xG_i975^$b&ZTmlw9dYcoShGmyN8AbI5@;R=@jk)1|J>} z9ugiN;^0bWH5y}#U?XG`2qH7jEx>`fRLGQ=QwoeEI;Xh6{)0mNC`8EG%gGrfJ>v?x z1a=SS-@B$iojW#dz-uFhuj?7!FJAg4JJl&^YI)C1FXg`B*CWx=$};!s5wCoq>#;!S zvM!)cUVKGVzsR8SA#p7y_}BU_o8L{hp{Un_;>`Mzz^@|9ySZeI&B|Z=@q|G;S0!1t zvn(tec5r;j!ijX8Lqs1=V)SS6o&efAfoTNIIYmmUX++MAkd}~vQ*qyH$)8^-9}bRU z@t)j?3jwKPx*Z*(qFt3dj|q1K+A93~cz$-}>VIUZ<0n)lB~?v`S7-fWbtRu^b(vHU zs~zfzK^t#NpB+{nmd@RIHMBv!gIL0BMEz>iA8o4NNzUp|Q91LxT%k_l9f3YP$R*mv z#nF+EHa0N6Iw^JNxJY9I1y)~68>QcOs2hT|z@>u*i01B=z>(@5(nDzz>hHi;3cY3T z!N1_FwX!7&QB>NWxD zA1BWJSJsoh($@Ml1EilzNk<7v9gnwuEJc36KE@U7_bTwT9Iu!=p|L+f*&*e)pms5$ z#hou`5^Udnmu!1{MeIb!>>*u6L>;*pfT_rFA!kCvs(>qW#F2=#mlO}}80dOs#emc4 zIe`NzqE^#q{P6x3CBGl7VZP7~$d0zbE2Vb){?)5-#}aVpv4~Gvb5^?rOEBjEaMN*Z zSn`tin3#B4Dnn9ILV~qgTCbB~h5(L-dM$S$w8e!aqhesO;Go~Ki7%J!{PPQW5Jo!+ zbYTs;aK#=U;Qs+~FGrb9ZsY}&2Vl~a;A6o8rYs4J%To{>B7y~9AE3X3v!_7uU$oYa z-!`o0qLBsuDdV>d?X|d}hyOv>z_dP5snr>sT!T~lM5b3~_y~E|c0XL1=3lzcMzudrcnPm*p`uB^-Sj;P1(h1^@1j7zcu(3NcVV*edtL-`Xr z1nA2mJM)*Cqy@Z#?7*XJ@$B$9xo-aPp#iyrVx)|D(BV3 zW{=6to!w{BrxPl6jM2`R{!*gUkK)ZhtiFDP);nNjslV2xeFsl3Ac2R6qb4MztF6w~ z))J6KPr^eepP}LlG6dsc?DTov@%2?jGErij0m^1f%$MFx=FF^^njF*6yZgAbriRnA zXPy|L8NPko#*GX14GqYc@{d6S-)zioN}JNF*QB((1!aOJWn@-QU0h;AUj4q>(*1Lz zr_^S2sZSj-Bc*!dh?uCFRTX)<va_Tb+zI)@gzZ>Qp;)zFEI3u*;-^s?8%4 zVs-C~>APW4zST~f>@nR_t1~U$Q7d6BN z_nw%SF}815IK`b==$VIX7xrmrueDVMw)61`6gW{AXrctFa8PQ{s2Jv}m=JI63MFw+ z0+sTNSU~O(j!~V2WPAH%Cq`?63#y{EgEKmHOdXV-KdYC#Qkg$$aY5b2>ae&`|EM3k zb97wN8e%WowPE0<6V(?gR#ZoX^_!TJGGSnJ?u0DTu&FAfXx>}>Yxc~|tJr#?y3bnD z<^Cet=kAyzKR{DXDVN|4DWXnbRKoP46cxzbJ5u!1zy2ZLzvTzn&LNKu$l8JHh}=kw z)(*I84?komr{%o0_O^+Mor6>=F)~P|^APEIs1$sCNtn~-Pinga^%xwZtzuJ|mQ>eEBlb-hy}qP&V^vt3Zd?77o%InOg=>G` z>$I-_>LXR(3|c!RN>eg9CuL$~Y*??__~I=yd-PoRPQM}h7IYu{kMs3uBme#7n-zHf z$Sm;qTI>qs$2lTErQjS54~0><-W3w1C$6h;<(e=GGi`4}sWTh<0Z$aM_a)-LE?OHR zD3B>?A;SyAAVNYIUnwRk64{w3BFV@6?v}GcxW0R%5YBIw?i?qRMtv#WRE{Rl7_g@; z-VJgYIa<`pQ0|p090lHIlt2`1I*>vli3;guDk5~(#|ph$w(}=n#>aS~IwH*;bBVN9jh1H79^}G1Fb?z8 z5qmmf#t7-u$=_P%;9%>*)H2F29W+$<`X=$wPk6TiUyZDrsa|cZ6yEC~@y<&Q&JPV6 zm>D-jOPct)gQKSIF3Ztrqy0XJiHwSgiHeLNYs)89r-T$&q-P9{^6!&5a%=CBx5o5e zH8VleC`~XMHRxfT6fZv~TZPPqA)H$4=qQtA6*=iD1`oC_E%oq7=o$*aO*uq}1|MMY zj8_0{pzudKX#}|mx1yNf#AH#q2~Epr67bPcj!{vaDF1MjnQwJ9J)IFq5oV~U54O{$$7)61hw z8(p3sL4Gf3T3Z@cGQEKJ*I&pf33E)YU)FQQqYq|h<}W#=U%K+~NAt4$JL!S~i#OdM zK5xH9e13SVr02$)(se`Th&;7N(%{VOqRBl2l1FbDG<4U(N=!JYXPt zf^$*qvE{mIokfMh%GMTvYa1KhLf|10EQ}n8%E{PRK-CCSuryeDYM8rJNjffhe#8e% zC9g;x!c;!!o0hY(kVl_0J1=C~qXNGD#*Gxewt?l%MQd*0b!f}2s~@jc6F)ztjUBJ+ z(7wHrwM8e6wuZKhhMl+Cn_4=Q(RoAZ@MioSs3v$vKlvfr+hpAFJEg@u=}|m6BXLT( zbn_Uw+OTtyw&c~i?owVI$tsza9yhsCy2q38U575sA9s=8wk~F9PC-@3qaS5Eg9a|G z?*B@Ot-i|!?U;hXvD$SpmR0M^<{qk`e4g$nbP@ES2BlfKsq6(^Cr)Mw_UL$>mywvrd4NVyRX03F9WG@EVl)Uq+wCUJ} z8TU5zoATkD%#8zg>o@kLw^+W{X~ZOo-MG;{VhuHf-1+bD|oFe$j>vybeeru=+z<>&NI|&+%5)*1$!E zifnYi4nP~Dpmnk`?3m!IAI<-yH+dKIBR6j37o$ePO~kBS#jFKTSOxg|%ZxL}CN3QF z2HDDLqjLv4i>VAu75&TXMLS>wSKE(0_v)-~UhS8^^76}Xemm9im~H-|hN|V|A@L*D zl-4w6J4rkEf9adM49Y3pO~|VwUnSh`(ov_T^_bqH+x+8`M!j@mLAGz03&~theU*SG z-lk@TCwhoo90h8g@j3=f=3_C^9L7pvNohbVZ~4LVm^kpLkVGM8_A6nu`+=zAS$#Hp z;XWb^BTT>=f6O$ExR352_9^ajqB@Jcjg7)mN8LKWo)GFJ0s@&83A45<*c=^uZuacY z#vV(b&^>QLCfOz`9{;)IL`}&*7nh8UNttFEjf}NU`AG+@yEy}f;iTqRfjso%L&Kj!uJM`hycN8xuX6&T)nx@Rrfzi18tx zUTdkAcW@ANa>{Th6`&3n@>Fy^g|&`Ct14yIrDRq`YrmQ`?W+yFd#(R!+N`hE6(5PL zURK)wm718Cp|A8WTUr$%$V*<6TK1Kf?<3-CC9e@-e|h6Y|6wfw#YxobONo3Z@g()cuM@s)V#=E_K zbL`lgeEf#&#-iMrg_H&BPEK2Rq6RZH!eAwnWAp;#MF_PdJF2J}5+GOsRXOM)fB`kZ ztMDF9@`8{MmJ{7{X*v1#QvkWRmI7pHS-)3?03b{IQGmQDeLbFd^IyL(SWI|%3UFx$ zx$3KRZ0~L<*w|QGa~<5;S?I6~fua|>O&N|+HS}f%FGQK*X&L7YicoDM-@SHYT;Zyr zQBpildJmbOnfh|Q^vyA$SlW1!)W~)Y{&3OY*GF|wZOECN+w-N2M?dn*iGCdPQocXDdxjC~ES-5P&1b5UK*>=bqJwEmO#jET*jeP~qGhnof-jjNiL zS+y{SG-!Hg<2*a|sGixY*OK80Q%>rNrsgCKOApS~M!EU+9zLg6za`btP)F)=hi7`n z3;`Tr2#Tu!M=E1L-&k2g*`bk@nRGhJJirh64-fN8AFnfV3dZdhV&vQLtcuxU4svVo zQARP)Trk=vY>jd_lNm4EJ-YPgmzV#v;7HF^gR55c1U0K)yIfay`8DBEi(0olJ9{~N zmps*$L_O-J+7d9IlUoo=NIi?TbX~=G266Qz{8aMJ=lWYKq%CAyyoDNb1K0SmRj8$1k*FybkU_NJ-XBs_ZasVDc&vU-&_M zskSk(#ZNfcGJ2e_V0=+a3B-Un=h2lVvW^T>mPw!R*78PbZS8?;5tdts$+@5cUsicL z@YY9;NOR?ltN$FoT5$kl>Iz?8tg$b)N?l>9FKV!K9_hJE3u4Qmx9t%szb03e2{ivL}>S*I9+LU!IlYtz?+;C0Lu3PaNTWYVVV0>1mC=M; zc+h^kup*{*eO1l6VaYZ&$-~xGS8p61bI>!jwnt`7c96Aw&ZGgk!_zv-xku;E^M0za zCzlsx%|1M?Wc=YdSv{7XennNdxGp)guqOKF%xOi5wak-L3aH-$)YVkp!6)ae(22ka z=4>|lD}mz>tkM6`(%_R)8?t3PW#f*Hnz(mdeEhh*6Q>@o!-bUkZoNjOd2~n{H8W~p zV)^S;y;pu%=U?~X%HB2aHKf(1q>Y3rnKi#5jp7IOqyF3mVz%C*=wc0+DRhJZj0q9+ z2lVHQ@Dnw8ATv}ZgV}8IL`O1Xn-`)FZr8U<9J!&Qdd=`88|&oZYikB>7@2s`JF_;o zWL&nhO;A=%W**$m=Vif&X`|NJ@CaRq7pBFpjKCCcCV({dF@-?RkJjI&Cvvy~Ga0lt}Vt-{yA==C=G z6(Dq7(Jiv2sn_Na9aFrlRFUrSy#jCR_la+h*-~s_u}UU}^iS0-T7=oDka`KfVBAjd z;U+M>f%#FeTy?k6lqhZl?h4GM=vmu zJohqC?mFX3?o?tp@Ge#goxtgM1}>An3u)nG_VD7I@fw)3nN2U}6Yc0Nl?9ChLJx+Q zXJ(8n3fc|OA5gYpxIUi$xK~X`T=fhp%PF}HW!j}3XTM)zE67bvouwc{ z;6Z zS(b9Z%!&amJRrH%(6Z#!(MCHf>!8p>>LUGN_dY3zm0l_*Ek-~|agsv%@uTm^;KwI; z@pU~`Wpgl4NpIGQ&ED3jFhU0qF+NB8umGCO1mL0wX%P+|wnJA(KG?^{)fzwrC7Pzt zfqHcCVXQzS{BM7 z4LulJla*Me^%e%TY!Krl8{>qREUDIq@SpUmrW5Yr*YCB)+~9%BtM#$`$pKYe{d4e! zAtTP2`t7E*b>#ojV`gQcH_z)W&~As%U-3>TZh7ZPH~xZmbf4--v`K9``~4jptaWNR zlcY@eJYi2J0fa!mQQ&}c8M#$SdgH#4y8YAA)2Hp#>Gn-aZ}Q6@ou4-o~{SQnircd9eYxs*$l=PXCa?xSh&w*08aR2Fk8s(6v)S;`bvOC2gTJs{TEy{j- zOMP6!rt0c-by^#1ZQX{N!JF#i(a*Zvl1bg1twXvE&CDB-C%pGO76yP8 z%z!K!QXHnOhpx)-iC@k7=>qv}E4Q?iL4db%7L$(3pxN2vNmeL~dJ>Rn5B5TE?dqO|7)MKCmBKb^DiZvV=>%w8rSCTb_kp!Y9OSxof&k49);2YC zO&W?*w27o5spUs87UT9k_?~-y(BQ43d(0@)GYuuK54pXEPqVl@~(uv7m5PPq&($#iHg^BOAj6=hd_u*rPFopGE#W`WY z4yr@>V@#*fkEg6aIjlhQpjdp|39X?ym?4B%-MfED+DPgyJbLsMsg*W?;f*Ep^>_7G z$VzD#-wB&*09GziuCNpJI>93XdT@17!3Jz6*V$P>MPbmQ6;q3u83SsyQ9X&qkl@mJ zp9Gzdy4!{24e#cXt|S(|isZhDE=!18=qyRsAe?&a)ups&q>Z?e2;TW=;`){YAEx6) zbHFpC0SLR%u2DL6(AqMaQBG|}ITrRMQ$h4xgHhPsa*uzgj~K~+Jhn*hv3m7t!K)=_ zuCQwjOe$Du(o%ktJP~rfGsa`(VQFR6p@X9nfh8D@=m@+z%%g&E65Q-?%teON{H&-b zCk4|y;aG{J4j-DX#E(~x>tDEI(a1q5>vpK;u1g6l&0VykP(47NsOx-W(z(H}mQQS~ zJ2!Oyfx^#zQ9Z6B5&*X1I;vii*GuIU}F$yRR(={+v1@4TGo;;@MP0Kt;v&#j5iOb>|&>z~YbpFKFb zTS|CXR5@U=Kw3qr={pJGnXuVS6xd%XB;@K@nxUj|LeLz>D29s|-v(uyyh>@6sCwKi z%MVOS3Jip@Cm@5*8rdb-{RDo45U#V#G-z~R|2M->sFngBX z9XSSanT&Y3$aFTgwmi=gdRWZJ?7gj^*_*UDxzLOXJ`(DxF*(QV9TbN={#~?`mg&n8 z$UfjgVq)I)?dsq%`sCCZ2P%)I)h4A5Pu@VzZykLa$LOBb z!6{Rqnh!PHk_`}l26n2U3x!~<&Cm|uhGng*9OkIxqaZk?Kfx5y7^Xl`CB*nm(R{S`}o7NbI(ZI@gW4BoqHBs z+YWH;#Apy}qnVu-q;;_dN-G>yDhnm`P&qi;=qw#tGj%9h0Mut`W~Wi|(dboYDlZ6u z-+}tWghMUw3u!IylOd%3Us8`r(tR@0W1^JL*Bs@)oiatbI(hOH!KLMGp=ZmDXrZUR zcgrKZ6`ALjgCpdddSknX&=3-reDxZ2??8QXpu8c!si{f&1@GE>LvO`D#4`gV2XO-O zQA4m#fTz}}I2ns>14<*qLhZb5dZc?eJ1`-tVQd=Q3_;lc>MbT<7=Q5|N1Ft0o9FpM z#iNsai{@9?y)`Zg89@zeu8eKUnmcId>=duOc|#lC9iN;$=5^^_aczWG_XWf2-yVy* zcGs65T-C2BVbHkv@;O;P1#_$F-W{(^XxLagHlidhBr87J%QIu(tkU8+6KYJv$W6LvarCxZ%p8O)M-6p$`e8| zVxv4gv&!d`b{p3-7%-|5f98LZoq%+7rJh1dOFI=<7WgN4N1N=w&TY=C%r+d^VKy6O z15D;ySF;%>aCnD$ZlmWGsc#Y z2M%Ino_bR*aOb}e3sRpn;i8Kp4UUMuKArq{;q21gYlf}s)ob0bHG7s0EJ^Jj9Na&( zq)bm3eSGz5Ny)2UF?ty_;y5A8mJ#ydh!G!ROOR!f;rIx?_l6CX?~WQZ>fOrq8wT}G z9g{mcZ%k_M?`Pe9yS)7E+p}ihd~4vqw{FhfMC?zG9C=#0ziE?n|BOy|hS*d2xs2Z< z*aO>K@orG}yT^F(ii_>KM0%j_E;@YP!`#A>*ftTPaityF@>-^iMtZBU_3! zIeLs?Dj6>v7Q7k%cie|01f=6pH!Dq6Fy*DnC=nlSIVL1G3Eo?`=1NHWd zW41j(ZE5gpM{3)9wQtu^1P3N|U2Sb6Ba3b9>~e~objdkv@@Z-xn|ms%r;Hb`NuJ-QbE)h8y z-ChJ?2)9_~Dg?`Dp2|}>xk9kMLiRemU_3WNILc1}M%h94x7+JvbJR^_sPrkceh{Zo zXqf`p(F}0_6cEjf%+VPcy7Y8iM#kvO$@x)HIXO{L`QkC;bJPE0GBU9>=PK@qOn z_}r|G*|W>~H>QX5j^81)Z>%MD{;82Jp-FkM*<-WfhUP?;lteBCbdvcc{5rJe2L2ML zwdv4~Z~xaxcjAaR&L`V<6Nwrbb_O<1J%oEsGJcdoYd-QlXB^&7L4)C zB%Vh0g~+Bi9lbNU_MTmqH?CKQ&ZB)23xfNN(z@$24jiEVzDIgqcXUyAb+2^dT{At? zJ$mq5e)%Im+Rt=!A_Y>{hEwjNp)=#c81xm+T?QhGh2BES!9JU=j2wAo6LH`zCVV+( z_Ls={!`tlnS~5Y%fouVWTY`-|EmM}q88gb`_)9FMJQ?zt8!hFN?TGL=aSr>ets0$p z4kk@_IK!Y(8ig^1+E>k_HM{_;XW7IJROgM%RRre-`DDdJTIKZ#$&dCN?i}dr?dOE> z6gP=qM}EQ>x65FOo_G51aXZvD5IzY}~zDP=n^^8W#I~bkT~S zQ_^@m6;dyW85HJb(u*i?f<|Nff} z6egC(#FQr%CeQ=6PU32~HNpg8FSznDDDVoh3_C;8>(~WEwp8|xs3zA9m-*G4nk>i8 zmljCrh~qD?I|^{eNp{C_tb?||9oG&2!kr58jo``dXuusm$_}B9<>LmY1@#=9kuz8mI3Q!> z>wWugA3tE-tT^BNzGQ^iG{O2+?60Eei+L2$dmGLk`vJ7$!nEx;S62&#vy%e8)-EBgakGyUA|qeccleCJH|K z`TWQFPO|ooZUINickGbnu3jx`N5jnerK?x-BM@{1jYs;DDAspXS%X;a^FT9KkRNBKR0?h)1QX2_pk1cfjkJJVzrH#@tl3Y70F>zL{bcZDgEf{fh(!`@9R`r;ip7~M%D40e!hshn0?*TuH zZ~G_<_}ndsu03^vp*^Gi2mxgd%EA>!!C_ub)*6=ptf&i`F?6W$B>h0_kR^TVwpFUb z)tN(vW)6C{A!_^}cYohmncXV_=@tJd|M1??8I=)Y-^c$D5uvS*$zCzKV0dA7O?01v z{%%p^rur z?wXn5+?f-)x@Nd$*m=RbnU|Dg4meZS4Xh}ZUEPYaPvXvM^AH`7naV5?N{7f-HtLJU zym?{{1-i17NE!+>#gd>sG1nlA1>OrQ&*(NH;Xu;J4dr9s8?-=r+-Gb;(16USkptsN zdHXT@r|LEp#g6>P2>$w{e>SGl#U8b^XrU^JVWd@;&Q4kV*d zXioODOJ^1*jjM=DtXtN5@J?cJc#?;-bTFyI>}PRrV<*moIHMYz(|$K6 zta}xxkM8P;aQhQxov4jzg@`GfG(cPkS23u?oy;-INb^y!KX8D=g=)2-!O6)jQtMhQ zYHv-zin1m~m&S}TB#t4Hz;TE*LJu;gMHzWXYlO5FaDNTO2w&-kPU2VTIb%qsF+IKU zZ$dI+)1#v@t-kMz1ZD(o1>!PYnC?R}-w@jku$C8EHWd~NQC$fdTNXyX~6C5j9CJj z@HTk@Gzvu0=h#sL2dl}92pMNT%4acJX;ZsZjSd?Qkgt!$vf z*;<*>pD2@lqM1VkQZJr2ifrDa6=DK%aF2=ukBN&HFP3&ZlZQ)Vr@fdD8nIJzY#FFU zC#+YKF^J6cS9C|)>Z?XSY!nJ>q&!>VA#)s;B~!QZaBl(=&=}0^*(6NU|NEy7`^Z9f zsc;s!=xI;#Wvzivw!9Tb$BLn{kum|n)VCCsjEQY;U5OYKNbldyH-p^1VU z4an1Hwo19;&nDI8QEN+g_bI!3I zORK%2Je3Zqc7cfg-5#|&Fj-gE9iP5vuYUXh+k`%GnEqPJIK zpIBszbTWm4<@~5)i@JwaZLV+8H>@vESZ$EY{fiR?F1IGJLtOQ2coMNLTww{F+PBpg zs?oZK1PA+tchT^=u4+FEoyyVC#my4yGXy{o`o05J1_37kD;mmR+C&OgsHk9y(G>@# z5LdK{CI}<^Kt(?#EIPAhg^pjIzcMX#MUP(dDl`$5^Llk(k&?DNfAX2-1qI7K6@PA< zsxSM<)?%ZLT`bZxrL%Q!jEuFj*{D$V+4S{H5<2tiO?@6+G-F4bT>c7-ii2$T=d^eu zy;|#p)wx*-NYF_ravBh|2+s*S_-J_QlwBi7?U|C2GG(_;w|h$J!A?01 z`S}ewojP?JnV;8?+ma+FYfU>%4q?JlhkUwNh!NVDUf9 zJ4IdN2UsK0E}tQB2j57I$=lfq7ZqQAI)6!(R|M9)WGDj?z2ADPaP`nmS)E**G#>HY z{SYO7{Fss1a?pL>L{YX#hwLdE%cU||$beKGs0Vux$TrAhkUQfIjTsucOfK)*mBY^d zx=4EpJ|?dmV$EJx7RifxG3!En_-4;-Q+t$l6`r;)_ARcCG};$5%N+K_q%3M*$YZAM z8#qThQSbXdSQu23`9VA=zK0QbAqxOeZ$GH|*Z{}ARgVEY;V(A!{6%ybvGaqPDWn~(Mu9vYbyOlK9ZYu^Uez|<%G?QG`t zek@jRGNffwHx}<-RXs9L8vb;;ww!)`B+>_=;4Kbq1( zOfGe1=fDC`Gs}mu1EXC8OWDK@uoVi&_~rCB7{3(U^qZq}eG{s>byer-a_d%x=Y%(j z1&kYxJpt2%al_Sa*)MfZ81`~OV8P476B#!|pSFv)1f(#-58=EYd*O>jXtba%yl@)* z!c;q{eANoyNGIOzkF_gTtbTyBUj4Z}5M}40zgcACWgI=q7O?&Q9-)wtL_uhSB#DC-=Fp8C*%r#056^8 zF2Ry6%Ed&ru|pQM26_6ndSiHQg1DDg%L|d8hc^@KaTOaKtc^{xhfP)tHDlU3rYFQ! zBo3I&J_e;|G%2*mrp1-TOza(38acXeU{*$ONLCgdS-8}lECc^ip>_+qMwW%Zl+`Mj zy2dDpDVv6}P!iECpnJG)fOmwQ(k(x}us%{38{07>KhV32lf^_kkBYLWVU?&Aepew` zb_aciyhby`%28|Qp6w`Ww8i#6t`vII&xLS3c(>8v<;uXwu*ej$VJ0;H!IZd)L``Z$ ztiONkl-SCIfRwnTF8*<{JJvZP5~2 zk8Nztl_&ZUc-Od{>AqH9bqac|AMwOlZ7{UV$9>&pcSHsEdJAI59MPAeg=O;5@P63u zb>_K7*xBM*-bLT^=W1UM<*pxvM;!EQYy< zXZYu3SO6X^YVoE)F8k30ONG!#H$egQuv%Gel13uiq;uaAY30Ln;!R>DJtW}CL?0t< zm7EM?+WMn2+CWerW5)D{<|roN4v0UI_3~u2X0?k08k#-z!hxuFN8`5wiF(3x;?Kei z`8=FaOCQ-YPHt|t6j;c_peW^j#^j~KM9bPV**2_7-V6QbU@6wz2R_OiW|d7N7SbPn z&qP?k4NU%ZaD(xT8|;E56bj2j2EL1zrszcGk2AU%pcrEbVk*}3ZKUha(9@2v5=Z0A zxH=kk6MdmH%tIR+8Be1>M&*U=y#endvZJCkL`@x~y#D41zIp=LA@TBweERCg7i3)? zU1X+7Xyc;-EJQ)=HIU(SpAw^A6SarXm5fh$HcdKQ3hI=-kZ)5ss&5gJUP$v00#>c+ z{NhRnTa|@(o3yTeLdpRJM(YA`_~d}Jt#IH^Nzvzcrl z9pF(_>HBl;y)%>Ed+(Fpd+&`@QV1cW0U?znLqg~T2uSac01^oTBB(S46A+0A>RPs` zt7}Q<#?&oOUt5%82&gQvUSBv)p$RSNQ#~w~ygp7pvvLzM=&dMM+`S zs4!VQD~TIyEwkFmbxpWb18c#y81u-+JTkTOFq0uS#)L-OP>*gz(>G8UifyKpDSd?x z1A8IaPqfU4L`4Xh6NUWxiF$60=CvO3x#-$6JUlG77#`L{+iAjgax=J`ofrogA&i!! zY!{4UWo?a$B0aUMi;IDwFSUB#X7t3qVjO0*MIwSP;h87|Zz(7Th5+rIWmpir7a#uQ zx_1xG+(XFI&#d{?!SY?=gn0JE`qSm1bvx^xTXVW1q<#;v9zR#S`;BGC{aJH{%XH&G$Kppnrq}7-u-4$5nRGY9;Dvh1=)XoG2SCri0xt6pf zDCz>HLG4N@nUc+o&PS+!<%syVv(9Ti7_PdW(7I=q^NP`h;$^J5=DW*xH_X`c-e%53 z^WD5dtZXOXFZlP8o=B{ZKw?oDCx{-e_=y@$-XyNu6lc4 zY2Lua*7S}dfAK@|#hvryuiX1Ry%&}xB`v$q+tYt*dztZe<25(GU8COd)zdXKPk*&T zz53gmYfQGAlx@GIc^XHVp5^&K;a&>Qf(@*(G}O`=2P+~I5}Yjz)xNsyZp;|vVu+71 zvz5QEP6iHYZJi7Pk~$Z(&UBw2+cw-(w5mSSU*9mXY2Up0N7`e@+%no`C7- zwdNJHWx8>ZM~rNp%?!+ywyue8A^DMxcMn$t&t7w^p={~Pn{o@cz5JMU;l`%8 z#DxPn>3Lf^(=wMFCLd0G6CCeuEO1+Fj67#7tKfX8R2>ofg(B>T6Dq)5Qca>{J}Q-y zQ>Zg(K2_l~o zwF)krPimfCQoK5Y1n-?0R~~E?UY)b^>Rfm!n75*4u!^M34NdmSgOj2&&(puCExkaU z?=5^kXG5-|^AqO&?n!=D!J%PxVMl)Rm`(Ti&Jxrrb=&m4vd&uWP|~cKZBw|ZB{8vO zQ=v`j%GNyNq}I)NCt%t0e?^w0D>^d47b$a3PUd-=r~swf2&G@rTR|vEyq;n(NsNeX z>{P%Wxx~G+9vWTNAbZ6K&iLcE%evz|v*x9gF3otcXx&IzXf&+8SUPMNuDX8PSLZ%s@9hI29_@?$ZW~=&i0?t7*p6= z>=#(RBp;!akQx4-2?2I^mkzO?e-*sU7dvE~s&CB&BJ)9VFH+YCBU@`iRb_067CeMy zJ0cXMV#)J}*b;CLzN=guDQt_e@JaGtm*1O_Q0mp_8er>|5nVm2FW)_O#eDG)?>zB^ z{M=HpvgX*SrqJvN4}-=OdyDh0f7(ahMK%caM>c5v zkxqzf8rji$83gX&k~Bv=oyP4TQ9+qgy^%OIRMVFHkCsL8olnfC{>M@3b1d%4ai8XM z^sPP8d*_TWGk<=9LqhXo)c3jqR7<@^(C>k<jza9RBrVqmR8i+#C_nJp3+x{Pp1lVS~v_pX-i^sotk9 z^u067zohp-wMtceptr=I+B!Gx{*x=v+RF+$$*J}-<2I9p12@K|77b(~-~G2Sv6lM$ z0i{U{3I;ft4{(bUpdAayigrl_286&1oSR&xHbq)C%XnwOGnVrXb;g!J{kA2{q|ptc zB0Sv$_|U+?9K{dbCMVX3qp>p+d`-w16DJQleQTw{Fu*oxR=T_R&Jt7i02e(MePWu( zt0q1aij2rJcEPE>cTT&j%n2D1^qgnS;_Zm5PoO0sJLF1A8%^R8{xtT<1sV!sd#>iD zrta<$az`cxY1)x=29a4lc?juwghbZH_juj$1b!r?Ze%4egF;8#$sE7k!FxubZ_CCM z@1m8nw(nj%wlO!e|3YU-ZN*!=x34@~Q_`Q;IdgF`NhtMfa|v~F&4`Vkcf2pH{NOL; zNA?+*8AMfthn2>=_C!=K%|^XSuz`iCg`y5Kb`8tOPBBe^OrzA_zdrz$u{022I@o{Zr|UGvE%fIDMmq?mI7% zd1GAu{TqJpAmh{1wjDII2T?UQ$mkGy)?EA)s6(N$Sqi5?qX4g#;^R};v&w}Fo5Gfv zXPw`Zy>etu&$GSp(XEH)H>1?a5Uj;22c=EzS?(_Btp(ZhlAQ_ilD=4bV<4mA@Voui z>X)|_WUjrmn0R}ktSN56#zN~^hnHk#b*)dDaK+pfh+FvIV{SpnfOgcEGw01)j73rD zDe+Hw@~{N=?Io)$NancYW!E~+MR%0aBFy69CB>`qNPMYhqpQD-TV~9W54uv8ofC>` z&aTSnNM%*N)jONrtb;0MMyeZ}%RQrGRW3dbMxL90v5l1j(;cGnZ6$WdP8KW+r+T%& zA{<^^E3F)$>&0koxlRCX11<$5t0)}8CcH)?L7 zcCCASR|)SOWD>CT`8J#RFYYRzUcWYDQ)^;k>n8G=RFeUu3;wr^j%bywf!x5H(hv4l zn;N2}pa-aj|LsH{sTp++g-T=Y7hDxm&=D`hA2=}aZgjl0nZbH@%fx|p?949oUimkk z??P9=Olc3!W6vHgQH)kugV+X^6{@KwqwKpzU1IVB@;hhvaM=+zTq51#mRwjqXYaxo zAzsr}(w*%#WA(wpO+>yT`SF*!7XSLmx~PuRy%?`b^pI0d$Kulga=AU~WjstVBF(U3 z7AKVnELJdOH2h987L=0a7|m2ks90P_O81O;XEzpPAN>C8nZNDJj8~V>PssP=gPWdS z@$~DniSLX>d2SvUGAesPY{~AMJ)LhpQB!yP*NeNZABh`??B9|Jy{ur>wZ^hHI32 z#=d`^+x~}Rm7Fk&z^G`q=6pwZLI2^Rjf7jCbofeV@4F}Jau0kxLjLsQHyD38GDaNO z_$lBUM#R>@z%SZWp^nyW3$+fgMpHLKYot)^%7^ogfElmN1!Crl30NrP8`eDpB^0$3viw@5WtKT|X)41=gmei%ET1X)$p0C}Qle(da8;&iFviB~_ zvS>KDz)Z&9MGB_SDSrvv!UlEduoBGl^<8~N>dt<;s&nI`ICEAS)^xztGD24X6@r&k?D{iH}Q1QRCC<#HQ)Y<<4(GGn#hGgQ)8D>%y0(i#u0SG z1I<5+TKYb6-pMosF~~$y$}>z^4r82T=u^#$N*;-dr~(AVJVL9Z^Y>E`{mVTnQaIiY z6_RgwCHdG#RxBy5+*m_NsU*}lF~HGSD4j^VkvNk#b__`L4K2~qQc+)dl)X=i5I;0T zsj23fy^E44IsJHt5>sGFdw=!=)O6>^>9oYl0`C4xegPV#~Vhm&0v_jNEpTGU|CQK1q~8I@r~k=q6gN6ANz7u%E~J%xymIu zZuz_0Q&umtxz4?L=K#0mDQgd_wzu~-pIDqU=wH;HR<|zG!kuR}J=`)lt5n;h*lRp= za712sj?;Ixx8l`)fuPZ>Sa&sWF;gqB+Cee#La{*Uzo}6~)l|aMOt| z`c21yq1Qz*^qk|XmlZAFGlLI4azq@_0_p8KczY5^Z}YXDJ(Y*o)kPNWp)xKUA&La7 z7{zD^W)W(4cl}@+8^GM_so|mV)TSd)|Btk4lK+D@f%_vWCRqSZ6Xq>t@PPu{!D;RR zUy;7+?n?K>`mGI-zOkioUe*fkZ<;P%k8D5skvett<)xa>#z|euGYs*1`K&UH#aP< z!Wxb*8%s+aE{?=MSRMtXW)}N@#9W0r6F<6+3+@k}8y^Q(TOZ5ne4_2@%DIm(PQ`?N z$X}L~wImPoNrvC(U96)u^BiHP?TMYj+}zH|nNtJ%ipgT;%mgQ+s25U_0`=?_ST?xFE5+y(J>Ne&Ler!=EgfxG~QC^3EW6y?afhfrY)<6Q=eS2D5J-=f9-0 zk*MsPcu+3G=+u*l{7`)b;napass^Y?vcQRkOj=BL2LHS<)VWuzFD2Zf7^z*$AXu&OF8N;V|}qS7H1C&(daZKCt(KJ6y?o zv6>%ar(e8(rsZz4yqEh3qZk$c>^;QBI=4{|Rb z*R12cu4}5tT@*gCxie=6RV6!QWJKu0p{3Dqyha4fwB_(H+w)5Uzk6|aONPl2r`D(c zbrS322)}(aTPcb^LHl7~hBoHxqTym*VViIcM_j%lr+^58{{$8c3|S7pqrrj@&`sR&Q@leYL3}gO3Y;OAGlZ*-!U5 zr6klMNwI>zb&7_iLqTLNM^A3yvnLmAO3vKTRlccYylB(K`MuXRd%Sd4{i1`V0c9&nPjXjQBrnR%TAY>AnLBrXOKep0p{5xF^W!SQ>$lIL z*xz9bk9L6uvX&KO%uA(K0bYsGHLEn2%M=_Cv;^sFQ@UpUcI>t}q3{f9(Db+_;`((` zBw%eMAVJD2LP6F4*3uKTw2a`3*HhCH7Ceq_5X->O$~$@xBkplGp;Xhvr;ck94_ z6J4~p!Y(+g6q=J11%;HeT ztLx_-RVR1cT;GUqWWkRgD40+6tT-YcV&{f>BZ0b!0E!?0@{<)^NLztZjMM8GCD(E+ z`H;o4J0})Uo^?!fm((3QrsG?5wc$t--oO(n=LPSxrCb+6OJcfId!%-n*7|{ynV~bG z8i6X}gX8HN=QCy-*08WQ=g=oh`0jBy>czP3e(+ic(_vEw@Y-9)$!iGNfF93*Jt2w( zu7tVE^-=j|h@n`FL`or)#$rN{PX630*2CP`0F@d_rS2+-|1nbd!&v3xOuBb8kbPhU(8y!*L2F z)!D~KDA1e{k6bsKv1diYj+!91)VfuLUE61vUN`Ok;>2Sg94sw8@XP+QU$5c4hlj<} z3-=V3?zq~vXYLbgszXcmHtqUoW&MTQ2X`I(@qGOnYL>_@BkSlRHa{hzSrL#2mLbS` zHSw}X*b?P0_9$DDG-9nvOsy=s@>CJgMqB?EvMO0R&Y6lQ<(<>;L>~sQiW7hx(kvJn zGX2SrqAVUKEWpOvpt=qIc%UH2Wco%%_7JZ$P)a=l0N$g{K)S2IOkma_llrJ2qz*H8 zIYcf0OL1Y{@aa{59wxsU7yo{ABxhSQ`WRjohusgYwIB+Y7fcQW|WMPsq0^9f|!;ktGhv+6Hoc;q%(vC zIg&ZXW3@Jv6zAYGh7uR9&8pnn5@tA*v+iPhQCGr8ttWeubWS1*M?>$mFBCVePo3f| zQkXX`PIfc3DBSkhiJF>GVs)*{+SD^qbj=^Q*m5t{VILaqRDGy!u=a4ZO@S2bD6q*~ zUJL&tCxdw+_`w>aHASe}=ZIr~pPw(mf2PJ}3bl)~vnlfhGmTHGkAlQy+P@B~6*599 z!IebGL-EMb!)LR2gd-l6i{stX8`8cRB_|i2=uP%5Qs?K2)ior)uqh&=aCt%N>Eu&)CloNM=c1e$_!9wzB!=Ei-^cm?TbbFM|wd|p$g~yFa%#N z-b2c0P%t<{o#Pb5%;$8;9tq8nIgWrGoeQAS)KLMfz|AFkFK}1YbCADes?}<&nBX%D+z98QEnw%xaNBK~(N&J)Z zhs4f7yxB*dBfsJ1iK$OoiZ)yq&6I~v?#Pqmx-Mm&lU2d1_$=aJ&gdaS^+;EaOGvN^ z4@N1HsUa|(aPoBQ?GbvU)=7hdF-4EW`Mjq`!Upo>++TaKu-Ye}wm7vpX|(&LuG;<0 zF_Dd1#oxcyuy1}uQrF?ymRtR!$(?yc%~7HA3SZ>pYE^cK|BR5J!q9m;i~Ndv3u78* z_-A|PwqzD8D-X;a>T2&kOHD&Nx}K_M(ppT;Ne(;zFzKgY&IV^q(0 z^VZld>xsYMqu>MdDgW0R1$SMILggsGMsruxIK4~S97J^=hxxD-pIW3q{R1+@VW@XT ziU&|F)j-lx83Ft^EtRj=e9t{Ms`+%xN$DC@yeK!`Hz3L1(G>J0#R4=xEE^ptQ$5M^ z9vQ8B8t6OW{L9NQ9%^2Z8;JZ9@KpuHTr%XfkW18Vp?q}#jSB-%mWl=t9(RXyT@YRR zMYjvlqx_w*ANWlAI`{tRR*Y!hhGnXdM;0Hl^Ump#ULQ^+X#p#uk~Au=CmXe~IGHb84hM?QZwY(C_5EAm{- z&1Xg=8>%Ce>MEIfmbN}gO4HEJO7f0zDsu+#rz^^Y5}<{S^F|~At^kA*%vrP%imKL? z9b*nQOT(R5dU;Fuygl{BOUuIa^~1`R6c??kOK}?oBIK;t#Nt2$qu`>1?0G59cb=!Z zLs8-4l28Nv(30Mw`aSa^#vBvsvlEMgjSPZ{lCm2U90``$fCgp?17?jVl_<{kSl{WMTKHS!SY$blUWolp$=SwW0(t)DDK=< z9{o=}l`KcZmxXm$UnMMKnlnatB#g-nXpS!`yuq8bd$kyEM3veYrtKdYCEtzpTb$UA zfMqRz?9QzrQAN^*@VsX1*Gobo_MYLbn_B7X6L{Mt98s-?pN2iRM2OGR11tv5)MjAt zFO4pbUA!j0|7z1{bn}edHNDb?il#%>?v}G(*g_J;SF5|CEZrntA*Z<#Q?+{>2Z>z4 za4C7fl9<%PG2?AxWb5edm!v66)21u1M|@e)fhWn~SDV!C4XAdHX;Uw&6v%j}cCYBb z8r*4_e73X<{4x6Udn34jTU=)}tkdqD1VQrRviWN3Nhm>sm>=n%`%&~JXz0BD`MHs> z?_XCU0^$w&{kAzzESRyr5iy&I+W4CAu-X*79pxNrF!mJv-cE*wDU)mU z)AW1qd*eoqV%&Ov|9U!Z^(c4ifnhl#c6hN%7@Zf{=uE$V$jEr-N7)sR81o?m;ysbm z#D9*MBt>V!E~0Y=_1yxzPo_Uj0Af69JA3Nl5;}ceE9&4?ralv@RdaetojK>1UT;^Q zTb7)%>}>b4mpjg)2MH35`AgAt$qkdh#DkU1=Q|Oy%n1Seu=LgXB%RIhx=6lwt_1YY2AmHAcJ6bX|#s zg{390FHm?org2K1W~R|o!Nc?|=A59~JZhSy5I<>tU3~3z@-BIGRQ$epez1Ra4k;6# zox5v(WK7FJPS8xyqs||XDh~=QjT{P}y(}kZX>}Oz!T*dZxX-}Hsn0tQ-XY|GgogM6 zz|2~vwlV-lu>{TsM#bVzU?w!q#F|7h-w2_+=0WgQy1~597%f(ab}=>0HVsH}6Wtz$ z%uDh$>&Y$6=TpiU6tf=**2}$ul1+|g%OVa-cRYY{RRLAPoUyx|Qq59UBuBSS0un?D z{s%(#WzGBKD#cklEAN}cSzAU?vd;n~dj>_xcFtJLHG>H_{SSaO9w$Vm*oYmKq6o-c zvXlxvkzu63WPLwf(hoMLpS zM5Z%lZOtMFJPRaDY8G_1Oy7n8khm#3LyzDvS`8+QWIu$bj&UPUYglr|!=}UoUr9EJ z0Zp-7j9r|yDA49oVvdd`A5DP|$ATvHn)Uw$51M;TLa8_+KF+jd9x@QVG?_vH=|@i{ zYd1pACA2}PS2}ML`5yZ6DJr-p4iAZfd{`@>IN9TfH7;R3pioe&NuaB~+6ol-) z0VPO4mfeEWMWr^*G?m)aMS{FZJbyB_d+#dm^LS$Wo>{Z@btJvw8dn&c)0x8U9A9*8 z82}gKANNQq4@+N&Sk|3STZWq>Bb$d?1dltPrZ=X$#rIUvAGckpx$i2K)oX!D^&FEY zVbV+uF;hb}Q;BN-uQLU|=f)01HLNcxTHg>o<`9zNlUN(WC5$$nY0qC(6LG*PGTYxT zJJK1n>RY?LHaHkxxDPbGvE?yNVe@9tA6K6&n|jqLraTsNz55a9V?tpAJ;;&TuEqvH zHZgE;bTlv|YJ*9&no=y|1QLycc`=nmmwW2YPMaq?KQ+$}u;Zf>zNAN!$$edavaRLJ z0)@@(bt&^>q84UxYd{{n?8Up^C`M4DOKp`{5XP|QyPOwLCx-Z7Sx=*ybzti?mRutr-32ud_;pV7S}A=BVn3(`zrUY_E?@JK z#6t5c)vVEg=F;d-;iCK{rr-$FIuQUR0~t^K0_K=C0Cv!f4u_GJ8<@&baKO?ESm1(j zC#*F!&{+8e4SG@hHSU6#c#AV$K{~|@`EpLUt4StTUy@thM*B@HkJCN`jjEq~GD7nq zr@~ki$JvY$xX8Yvnu0 z)sHpLl65a~8Vzh(0IF+95}XU;Lqk5|0s@zfLiRI!`2p{VC?$JO>c5XnbWiKtMtI^rmhyDZGwihdiD754oMNWq=*3aJF!NJSX)z#C}(idqAmc+>m^*SmQ z(a{W@8l9Q$zsbsL#w;)`8kz=fj5XsD%<|Xaa+31#o)3q}?+9TEaqbZI-Lk_vqxcBH2$8Q2Zod;Qm) z5x$-1v>)ewf&CdIUmW~!@pZaC&U>cpkGSmK{*3d1gM-`vrSjV)KYg#1v>$>p0eeN? zm1fxZvTBv&R2Od#PtTwrrEg%MW0>|`sawT5G*n+-A@Pp;-&NW`?fGRY)lc3<;dc`* zRioVTpSqV^4Q!N8>8ZJM>nH9jFDntV`P0gcpvzQQo-82M%Ffs*HB}Z6sGpUQk)E5@ zpJNN{Atn4%ZBcIuGqV`pkkp_PYR#13I_fv?MO;~A!WOB^4B(SRg=kW{r=_m6cYuU9 zpY6{kI*xLfUd7mN-_I;WxU*?l)ZftLqmL?0z$ji=5@6WXZ^OjyaJe+oAhCskBmOb#& zc%lE>ORptCwNLb>rS_d{+P%HLFWLo7EyQ1M+ueO`SxVZHqqSkBnUimQpvf;`ZkmTr zetXV#)bXJV`i>CqU$QtizA@i#v2SjuTa2HbIO3JvlwPnDtzW0TwtY~Nw|}y~l~8y3 zv>^(oEzx0ZvKrAAR870aSs3}XhP$e!9_7?lfbW=gzfK0EB6UiKUwspj$S_TN6${alz)Y$}RH}^az>whH%6n zwdpbWt6I1<+Lv$&^4jru^-~>Yo)+dFW{aL$9Dk=Xq32Aep|zozi%Hw@t~k64MMsE) z-eV2BhV})rFm!KKIku`)hBBL0dW`QohUp) zX6uX~J+4+6T+o)D)m9Ls&>U3+6|@UQQ=4v}`v$N;h9au_GBf+CBPQO7oVy~2+F?}h zM*IVraW~+5wxpJhiG`lr63GC(+Jc3rSv;L8HptMkRkC;~WIXNq#yv(j_mZ~!U}EGE z;b$eT+%LXDY8v;83bKUvYhIf`PCF*Y+*t}s;5CtA95IiBV1)Isz-dM&B3m>TurW{@ z$+Zvx6ztKGWdiD|lvslSI+oE9r^316&QbQ(a$8d?KMzCk{pG*5Yoe3j`?3-2x#ZY!d+d@=?)oEgWdRJ;SPRw^SR6fEreHW>cOkN`XS zeHAUn0gGpR@TV2J+5pO$J2H)+7>PX$V6mTv<`48b)(+%kjq&HbP^-X??qv;p`-^HH)4Y_ei+(o)3(#74Rli2?x zPm{o`&`#w{b`H{H3uF0LM zM_>P0n>uQPB9i;pEJ|!`TO|$^xO)zVW{b}z)x~IS+)1Oi^)l}Q<2Ny}W8bXGJ?)8) zd!^5g3NDOubBilfyt~FH%qJ?w!fR2xPq>d*72}(us%!EoN>vZ1t)H*CHKQ}%3wAJh zMl7f1acuW#H5Whh#)9GDIQ*!&r9RyqORZyi1GSmGkh#yr(@%j4R>MXl>o>yTuj+`J{$8 zIE19i7q7wkMaAkCvN*;!*M}`+*C5px;ZR6*qxSgMUsKyXV&UP1#AD0l>cPQ?rbEr* zr}#aBn(kkJP0e|?)W&q;K|g{M#C#%3wAlq$j)g{H9jVes`8aUzl|k-$dH{rRGSw!C z(WG%{5hWT#BBQOlwWo3lb=~ueJi=QNCcef85b=dmhLaTZXt#-CsJ(qv+yBLN!b_eL z`FZv}ank!3PJJH*-iM}1z$ftk`##jCGEU!z7cZVt{uJkmJgdEZ&9?uOgN2hV5eLw- z^QO!lb_k*iHWtRb+FK^`B6M3Qmt$Sd7g%en0cj#XaM_{gK=LEm1MsTUt~)mG%;tv3 zlKpS5JpT6qa-Eb6byjSu4R=ebQeVEl=}2PNp=<58AN%6zxl2AefZpc0Yfd#x6m9FR z2+>S%fPX`raUF|0Pz@Wiur@bRs)5AdLFi&mn+1(5Fu66YL<&wcI*lZ%H=*jO8--D4 z(>k+gVU&C3q6?$zhbjlBbUoD8L*;Dk%0MSgLUX8kAe(o~lOdV)&YL33mhyC7uJq>ofXl)UfHiF=m#$)!`Di@(bx-fxSaW)sh=C;mjd^Y{R9-!m&ui!YNMXI7jc z9cNaZAv465Gb`}?i6-IXU6p(Slw*Bafh-6D2zcc3G6-HOKr-B|RWa@=+YdE=JV`hw zazgI4ntatx=QhzLx4kTAN0_B4^<+~w0DwzV6?FQq3FS(iV*i4D~KmA$UnZB*0T*};cW-;RdPvwmc~J0 z*T$ggq6}E-oibS(f<<(iQU?Pv?KjJ5zwy(4n||l?-=^O={VAHI-Q5k1-CaS^QGr~E z8b7PM17jis$E3^nG3e&KA2ru?cSimZ$DLL-=P|OG}w=Qj&N>`-A3l z-4DOi{pqUq2jM&2pB}ge>lGl*BW?0qkT?{pissKcqs9u+g{%l$(4MCXkTOv*a%*pE zrFRtiop{@#QIcGR4k&yKiLtd*YEEo6TwcF#lhN{e@_{iayzO8tUX;&j;7D5Lo%RM! zyq-noHk$8C<8L;Y?@v!O5ua7?CS>M>&sL+Ajf3XMQkCFlppScGjJW zd7OA9hoUQ8K&JikN_)Xa_A&5ORI(8gM$(B=W+tq;dlkAR2o6{?LzTpo5K>zy3y2Je zq@Oz?9uzLiU%}_Tl|CmMx%(ITIey*b=ZNletF)h+^!Tcww>rukQjHu89ige%+8P@h zNVP@KQQo0+|NrUgRp0iJT)q5UDH4GG?E=korPwUU{~?@Q#w*Y@$HELsx}>4sgDj*I zAn0%mDK+V21_#p3iEk!di^Sb=Q#zkNbse40zgW`y2mW2Lt%>Xt-+8bBI;ep5=ksL# zTRr$zB4ysGRx&=6aG~A~kSuU8pxvipGTlOg#*pSh-G34P8yJ@RkmPrWFNh!guf~0o z0I_6)xMd+(!WI0427Z$`v7GD}*3%1h&9oyIY6f99^2lFgjkB?VjCEng03F9y!E5ZY zCVgBf19a$@q85w?a$yfXo+0m>_6&uZHC}t*8GOdrB_zJd`$T{8zIa1Qf5S5_h@T28 zv9EHREXWohy;xdI!2x1Tys(079+tqXkTUzE+9<$?Gkl;Y-rxSo;gZC@^SxYYXQm5@ zY&wKy?5v_=;@gp;{qJvD_vW^|y>Tsjs}~$@jixgf|G>Wt%g72@oHWyj5JOuB)RDpl zqmW=UB?N8+I1xZV@Zllj4wc#}>c@i@5-W0<=S&`keEtjj5-W6xgQ9P(zCqIWXhjdt?rZ zdNOUqgNh^}9(Vmsx=Zf#kh?C32joD>5m3yEzv5Yf=Ut8VT(4!H2KGE1gRf_w=FK6| zq%lCp$Fci&wLU`X(+^3t1tz+ok$}EzhJd>@1Ru~->q((8Dg4c3VF+?tDQzI@gvQ*c z&mFm!${6B#9Ayr^`}cERUm)es#+h8y?JG9-_q5^%+~ys zJA7S8*E}Ykb|%mOf;6ABb5F|84GmEbCf<}=ODkZWo;ZtF1#X8-zG}+*P8}7&{VCP; z4sK$Jt4tcAU<2@S0S;l{gPnp_Y=oh`O?Jq0c z_p@dH+<*64YwI=5-e)%c@l5^k?A1@tdrtkzuF{!%Zm9oLxou%gT=UkMU#|Ye{!-Wn zi;x$81c;mW-UyyF`bNy$V~!KN7IFn(DNTDJs7I4m{>%FaD8ai>7dUfCUfz^lX3bk&ud>%!HcZC21*L2ddf=TZ)5J=qB4%d!##=N`QEr%m1*`o2WgP zzhUQ+P)Nf`IpL4H6zufYNOy66y#Fpp=16fwv5%czeehcBx4yBl9x%q`W=50jj^_XA zcpaHm>BN6Ck)x>BRy)0e@=UO}@9ryk;$^Pl%W_*5g+Z@W${)ZRj1A;a1(ZrNfu&F} zqSBr+kTK02y_adpeW6Jt7GvbQ(X}Wf5)#1DZr+!4iB%Z{PlH&8T>M-% zPg#{_tV*4bh`WB&u3fTpmm?rcwepXjtkGiqK&U>+^9BaiKt;9|fUf2!Fgd`g(Mi z(0Dg^BcTOrwkQs?Wo~a>?#9DvgrU(2SH{yfNcF)eSwnsB$4e%5j*pYq)U-JGUcs6e zhY=8+y8D&<8Y+PFfpmoYCb=^q&dv%aCj&2(f`%CAoJ}-@11>0O8g$8N7u1Kf2)XC! zpm{nJa3f_Xa#7{_i`D$;{eK!RE#H4rz46V>SrgH`zW2KpLbtgpIx-^^qk&P9g!2i;W~Mjgod1y)&lS?a>aq)eD%0Q~|yVGnP17uU- zgxcEZOXXLzCY<4PB0 zI+KW|!TBt!h=R^M%7 ztxxmk4WNJ$`)1-dn@PtI zGW*a1st+O{Le(VAzLCb)4n%ESH_vj9ZG}3cX>~m1x}lRg( zA)R&}2>yZ&WZ*B~Nq6zj!7JWwzqt8{n8Jp^XfksH zQ#A^dB?dwl@xP9^XO#S73>_0(qMS`lvy9QTK{TbvF9E1|m zDcL#hwiM3PouGlF1P4O(lj%bV8TQ($PR;^+$tGknCWugaga5^hH3#CLp zA+%>QW5_h9uALQiG*1>2-ao4aoanHUEY<)ZW%tM*m*uU$*jD+}W2K`x{S^(5&S{#wlbdS*NOD>MV?uX0oNS?EvnI{y^A?vH08$q~9mtsTn1c%p&0{L)wv$r?{%$WX^XU=1L`lAMYf4OZL|qk8>t)DeD=gA6jy>@JY2LU zFKF(e2Ag`6?b zNYjXLp)KvWT(VtU+%}MuG|)!suWLS&ur0WMY~P+gFOnmArnaVI7OrUb(EHNECN{DC zaY8bdm8!}r%T$q_-O)R))qxw&zx$PQ>;V4*)_EAOH*q|=eQ{m(DNDS4QzFaG#pqz!-3_{g-jQAHuWw+(4 z5M_6H-qD}q3OAgtYkRy(hoxGz`e=Pk-?hhbq$;2d^J0tpXZ#~(VQFakl7r%ZgqQE6 zrPfE2*ycTR=D#_=;mw6xf4(V`%YPEP5)FNXqNKP1(+?8SSEWMq%OpHQUmb63!jwR~ zBui;z5>b~v0}cWCE!l{?5~Tnzh%Lc6;2&bp;H=K~{`V!X+}Newn(C3axKuny7A_rK zmF5y((S^pYh+EiwmDF8qvEoFdzh_2pVr?oS{T?n?90E8lZ{^Y13-*_B%ZB6B6>&8g zp2Jbi2N%RPmZgQ685P?`+QE(sxt(3%mOV z;VZ}pXH<$o^GZ%ia>T0|`}=!D7^3UAR#1e8u$PtOKbjg%dQVl7@01o5)07f}-W(R*Rw3S25Tng6&-c!4%i`p6-ouR45Z{>CiUc=osN3@nfjCON z`(|b|Wqb3XwOO89kED^Y1HSoT5hWq}3wA9CpP3qJj=&1Qt-mI!aN<|co>_RJu<%$( z*Qw4J;t)SKtspeht1Y8v257fTe4nRToUZWBQp8k~__&lnBOGXVH#b6H?c?33fs5b{ z)p2w?N;C9y7Pqvccu&T98N#16Vrp!E;lWifgFK3C`}kPjj@0Ch9pX2{@`>MeJI7UY zX7YMU-of77#QfydB4Zc(Yc7t8)FqsKO<9Xupe?s-D6yxkq$hQ#_Qg%H4Kvfj%!~{P zJg-7W-Kq9Xensi__D(f(90rQ}(#4wnbKcC%3!KvsJw$b-W#TPZ3KZil0@yythK+S| zb;S-Q2N-B~kD>qK-KdcON5SOQ#pn{Duy1tt9`aTwuGhWQTjCGzRswvqwNm^7$}L4f z!Omq1`htz6v9I@*BWVT>wl6!{$U|$%=X1(h@mgdQyA>W;5_Etnrx7#LLd}SZgKf&S z<*qJmuuF(Cx3tL4GOLR&^#9lNx5`34qHgRKtWZMQ zANo@F*Jn`WNfO#q7z;HNw%m`6!u?LOj+;Gh8w?S12PX8aPweajg2GI8Q)n zt+;b0q$t)UcHyq-dyFIh7;^IvJ>Nck#dEso&#ki>uMt&T3(`YaA~DmJl*>O(4PhWVeyu3Ps?vFR%REvo?Cs6m4b`v-c$%uJP|K0I$t(ih%wR@Q zHzRc?P*RYlrd>}(Fvri;-1HseE{@p-rl?|P(e?Ad`hnwQ_8urNSyL0i&4;V-eP$YH z7cJJ6giT*H;#;*HTEY@&Z^z&@3!w253qu6ry`5QWCoB?`gsiWVwx6ezwuKnHG%d(% zt>ojl(Y5a#Dl0qm-rB{_r6;8Bo;7FZ{K&}pJLlA{%j7s`+bfnTj@$IH_1vdE-#`=_ zK7VR%V8B*)&)n4~YsLT6J+&qmOv#;01BTwa};y!32${r14X;APnb z?OAS~dA;k2|BT4k6>;%>2^~ALXARzH_iw*(V3vpLE=QNl?k!pV*;~3ZA#J=MS95W$ zX~zcC?O0hEIn&G%t$$rhR@l7h8+T7P5RTvJCm)U5;J{|Jbi@Lh*M;5C)f_zny&KZ?KJ@MzFOODlnHw6>xN}bXvFhu!Puy%; z{j0+j`8$42GGAWy>h^-7J#Vev{oU^Ca}L$5I$EGAAE+7S&6c^Q&dZ2v$n^!lRoBSE zu(*yVn&xb;53h)uKQN=|P)k(f3#)QFQ-F>YJv-eKT6L&6L;ft&s$m~f?O8xP1=je9GTHud+rMFemBO;wxfBac*-^e9%se6Yc!IiFOfnKkTUuLKs=9umWm<-g!AdVQE)%2Co8qP;2W3zoM9y;BX;qS<;3T zL3!z0OIKnhp@I#`giC^vmuX)8WcjFA>FY#C0eAy3}m$l63 zqM01cyh6fhZ<{Hz11$OE>uD!SK9?r^!&9l-)`sTg?5jL|?RqZj6zg-$@3KQ@KiHOduEpic%BG2#{g5%aQHh}B1GKW#{L3q$$@!W&pXoG{-~ z07L50`MT(H*t3R5x#9^={*wt$`MLj`Q0YU>tt)YUHHKK85Z>ZnZY*`yJ zeEd=^>nbb5n+ppw{rnQbjcrZPCSHyrD(J!N3%*MTGmLndeNVT{6ABfov8mgek|s5A zVDF$}o5HDp7d`qO`A@iqpto@*dVB%QHfO`!xf}Dgdgew)=XiML#6;zIa?SWt)vAn) zm9y^u6PcO0y0UgdE@?>kfIQJ&#M3OH@@)Bj&wmysk2>nkaOo>KzvVM zW`VqUIT9%J(ANmrX%bSsDvIz(5s%{B#XIMSs*lKbh#&GVh>M6Z84&x#4zj@*&#A_9 zOtsGe&(YJPOhgKm^buQYf+{U*Wi6WbNj}F*9LAMs?l5;%eomp}04>9= z)Zm^|>`H?6N+qs5BOk*(@TjnR67Z>$?4Bg;m15lU1bs4xhzq+XpWDanXIH={fL{eP zNNy+hrM!gkSDI1iCEJEg(Zb?fN!`Dom6f<52pA|9^5vnlsZ!(%D;ruTAHnp6CX>(+gPkKn20WG5L> z6iU3yTtP8BrPL3QIE^SU|2%4A`0~1emd&7b=Ian~O2RaXZr zTOU`Kx-7D79DIY$6wf^J;4`&6Uh8F4#kla~2z{tuk@Qf%hN@Wp2lJx9`e! z%Xx=;%AI;_=O1_j3n*L8ZRLKHkAdRs(V5!@DHDdAIW=ihq*w{%h7p!hpIjVG{H8mu zUZ>Yu1pDMIOfBwBJ?)him{ja7mua^0ZsJx_zM~~RWmCH-kdN~!ywes@2~a9-;D!lk zf>MdN3s+Vk&;9S@B(8jhE4y(8neW^gavoQHr@fLQZsZQrD_orRN;K}dN^2IIOes%@4#jccyPjg?A zR?O#dnQX!VGc39L7I#WkgeylTuOKf&_A;&v(<^lMHGd`}ToYpRklto2v$Ri=-fZWQ zPCI4iBa406#=Dpgf3?@S$D{Y`O3Q)gI(<5AYIj_tv{a(`iX72C%}mNu)yhD&=jik> zb_7WoOjxJ=Qhbu3(qvK>jr|ygZ8R?j!R^HS=)yhJov7 zYav(bLpB&mRj`tWmeP3xycopN28sj*9V6@JYat(1k1n~EY?g`o**GK|el1hs)Nogs^e#aYBnonbJQ6k>P6p%}or0w{o{M8H2|BRqufOi;^Y`!T86PFfPrOxI z`xYOodGY4xWCxS5Om!v~HB`Paf5u}Kx4)F1gO9iWXFZ%8yjgkY&6rpVeST1B9F4{% zpunf^{#DPM>CDlP=StZup#t9(bk*cMu%a&bcMwHKctGN2Yz-~Zn~x`!C=9|E1nTe;&&%JsT_%6_b{W~4aejTq^Wv|s zM6D3tiesVr_<8vweyMu$`$-zO*M)CpwfvB4BL*i1P2yF`mn4j>!rh3#w zfauMCMBW4ELGrNt3FhzcfWN>LuoUK2dRBVo1_1DsEUFX4AL=JH_jG4& zbE-a2Gu?#S#)Xvh&j}rKiYbW_Um4?H)ck6cBSP+$wf?av$2L7VWyZG^IDmDy%EMWbK^Lu$r}MjWPF6jqxT6v!f@qGKy=1ATar{d1amC~t@L#E;l05r--totTo-IZrMQM@oKe9Q|9HgLBYgacnASR&GvIB?z1T%9%EohGq z=EWp3r8lM@E8SzDt4(n#N&z4&EwQ%5ANe1HC+dSanda~0U#(lS)3>X||Ac(_+!tMQ z-+P|?^`&5qRj^P1qy2fVb!E=-f{L|y!!57xSU$c;^QsX84HsnYcfS!F+5DtDDRhVu z1m2Vw8uLh^!zsWK0O?~j2n-m+?x32gVra+?1&hE3B~j$P>s-#uWrsf45HdIPYW?2% zk=tl90!^u87ih|=o)jl8*Bk2X&f4_G0Jrplcya^8G9qi({NXz*}qcsqO6$6S|L+F~WERK`22iyq?CE z`>JDGM#@&Tc=9^uaN_s9y}v)N@#GSYWcSa?SyxV5D|WxOcH3($n<+lKfGz-kk*|l1 z;12I)7OII65lB~s=H$eM8mnF9CV6=_COA#hOM+Gx+HY8U*1sAC0qg*+`Y+WBOm?^U zDRzM+)Kbo6Yx;^=vsPz8i=VYJV{2wkNQ$dlYDi8xF{9=HQ|#~8`QMUMS(mJyteKli zsdk^;lO0tSQ5HgV`&k;_pQ5)D8inQC6qe{NV0GEv&W?B)L2y%@kFr=@irCb?L%e6y zy}&TGp8E8-x0;6Lhx;NFkuTw!ax}IHFW)?z+V@lw_tore zjgbLmJ^6kMKm6H9-MQY;UsMP9CHh!8#?Fk@*k-jC`eHwhK{(u!UkAUS9ooaNALap} z!TucYXyFB9ywpifGo5Vgt^njz2fgDtHG%Vy&1 zq4FnHrAdj!D&OLy#1fTw^)1cWvA4J~E&$T(xAPVruB<$~aNfd0m6eBnHZQ3rBBCZ~ zUSdsTWDPNbWL|V<5B#}1g>I^wHX|Mz3pzG}CIy5LeE|s^21w>c4_)|_0a+^rUYIbG z(8kSq2Iw6D!37(h%4W|f{?o0I+p4>E?d;NA*f(b{KS;KsMs$ma5Q?ZKJ2Zdh>`TR1 z@_H#o9D*F!3%{o`G$Y$o0}oqU!No-B2(z3nly)<9%%4>y(cEz>76DTF=5TT$tMUI9K9 z%JL@`-2GlrCVyT~K~whO_R)LE33?CieV%y+=Hre$`ElHlAk*V^vOA~~h~9CUJq@K< z+NV`xWDUDF3Am`zy>8ljYp0HkRcnP;G6IO~e(*{n+sj`jvxstqkKF?OK3QQLI|OO) z4hJLEle%n@kr&~@(R>LhNP)wj4B%j`VI<{D1oK={&B=JZ(409faZ$B%Ld?7?)g_D0 z4O9jC&s;K8qrSK*EoIrW{e|6mUS7G~1x4NYUUHx84qhzGsM^FR)0Hj$Q+S5S804AI=-`yzvIf9Tr_cd3Iw(oK zEUk7xtZ6C+vY>=#?tH=jV#0(IHA#(Jz2<1+lEFcR&*0$1Y5R!_JGi&GU+=`sPH|iL zzdYz?SZJ67hpDcDtB2)qMGH-t-_mUIO~g|3�PJ}HG?Q&#wR5zLv9qnRi zs4mLL2$16TED;l5rbq5sR<90pk4NMl7fd+0Hp7EgMI{TA2$WKC49X#_U2q|po0rvp zp`-oDzLGFwezAfP*ugo{To#NstpG=R#Y9jxyi9~=c444 z#`anVfBU)Z^HNe4?JUvWcYxiuUwdE4&P6FH^V;Xy`#aROH>M;n+6ij8dsdvm?Ld{D ztIX`OqZL6(^moV=VKwQLCoUj%f)W2ctn~<)BdYv`egpH= zFqeL>01%qPC*cmCN-e<4;Ps7&wgHwn6fCVRLRDH11^>vuEF>Wt(n{u}Bvj<5tqdU{ zE7S5T5>jeP(gs2w`4{f>mm(6l05=!k%*2->;#Dqg0bHi&i67}jRc7Kfb}^7c#tknU zl0DA6bxVV&GE4l!W1M`oAcPNVQx zRBvO0YWd)uZRFK$Najl+Xt6`aDes(nx0cM3{T-f5g9}1GKyYczDD_lm0}!)v@>{&U z|6W{tDJd^*X?7{KW#Nkv{;jZv4?CaA6j?rp_s7VPH2m8*S!S%h!7>#phR6L@&Bs|kQQfA>~y3xWiq&odhx%`9MzC!$t9f)CJTtx)N zr4fH0nLo5g)c$!o5p}S?m9_M$LAJKi1gHeT9^l`USxmAM`qJnR^N5ug8Lc?F$Suv) zz&gqyp(5~G%`;evx(mzmw&b}5Ezg-;96D1yy+OHviK^M^JY>1X#d8wzXGHbAEK zqEolB3ewKqPdjzGcoN+rUvzQ>A3pbouIsef|3h;Ivw!qbPk(_&Ek4e05_G-8Sbd7P zOg%qXdk-MLTcS|R$;sFl{hjUZg?1@RVib@@R~n2QwwjiegVwO_5BL3$b>bDGnf*!4 zs7rTHLaG1g%t7v^=JKs6zMrh_OiB*qqv+sYE;F+Vp9>mcG$VRH=>8F?r$SMouwXW2 zm~5deEY&d~JavBk978q9%1c$MeS!?tE@rSZtqimss@eL($VF`ZZi`i>eiR^4VJl$`c~yu z)wuY$a7Q%bS4TZ!Tx=taiA_#89A09KXIoqK?gi0tE&JxId$IBVG56+iRbAH}_}%B6 zdoPm=GRdSO0y2ZlAoDzffFn2pDxeI4Gft?9voX#_j2g{sCXIrbY^Ek@o8+Zw``V^y z`b&Cm=TqTIvpyY@c!0BTy_@ALcP_xh4>VcoUYUVH7e*R`E?Ok{}H(eC$+N+Omr9qON<=bC;&h%BKz(U+&vCYw&1#o{x=! z_*llCmG)EelHFb7iW_n=J7-O;J6<1An-ot+_Ls*P$oJEwGZ;FNp= zcHLaH)BL6JB4nCwnUEY%KLUyYMd5r+#Jd0)xc^gtYR;92SDi3CVlY(05Ofdl4Ocfmh!J>p&Q5B-jQw*F23J$`3~Nb&R@72tw(fxI*h(HZ{S zgf$d`^V)23!`lpi;D5spZk}!)c&`?kl`6-;eR7@g1z1v)NP|&L_Vp8EEiZEBVJ8S@zYz4|$Gqkoil+uGVJ(QrB32mLyf8v219Aeb9_J32bd z_qY9}uBCF;^7^FckOa$*j<8j!_`mx3FJ#Y;os<>D6v@G{KYC|eY;4?WcGS_q9F{VE znYwLL(~>Qlo0e>@E6&O+F3HL)Qto4ZU0v$`eE$cQ`gTXh+bXW&cbH~&-R%1Qd+D1S zH*U=9kTSmiUK!Wb)iwC#;2>Lb$<94vSjDPCn7vz@*vsSJ?qJMdjP$$Mbf3U2(>EXO*_#-h85dXP?VnPvUVH2@ zOQE9Jolq2FbGJ?G1^udlr;GA>$ohelE`2tbLoL}LV6S{`0Ts`_!uV{BqX zoM&cgNkL!Xj(Kx7=SIy)eQNXOR^}9%9q$pHUYcCEsvu=qQG8up{6UPg9!K+6vOD6# z0h#U!Y9)~~-ei!RV9Gyb7=ju^oZ_eT9h!HN1!ZQ2RrJ*ZzhaB%X$Wa{@|G^LJFABN zF7<(8$^bh&=ZQow464^~81eiQR{_?j0m28GYP9BfrM{wVvu1BEF4{h8_O{}_iD}`u z4KOhyJbc;&_TKK!!$n1hJH>z0e_Y6^4GF2uxsWp>G;{{p6*Gpumrp9EP|F98+->~$ zAQofi5VUcmM?SL1UOb2cKZ5dXoG#8ne$^1a2tX(9O;0pM1ZCGo7cEVk+EkiQmlq<( znW_)9Y<^_=f`RSJPcNOCy7%t`uJ@-l@2h^hptUG4uQNBZGe01$G^8PAZPoJUmbPDQ zE86k$0DZ^hgWp)%Ig$Q&n!nrV{**K2`FB$h7UyKlXp)N zCEwfyvhqtyiMv|6D`|^wzUA-o#jKUbXRm&xJ-y>ZUEPV583WT=3JRN3Qd)`%no}Q0 zY2IHu?H?EN)D_*@nu*=VnStjc%_h7`4wd^XC(lc6Oj%vE{Fx=~mzoQE9;&a|@z>{?K;IV7_d4bjK$7KeYb$xV z*c+uDeX%gPU{`+>Dh}^K5-2Krqq9yEbC!H47pa852l=lkT=(pvIsN;audKFvcFdMV zki2h+bHn z-Iy}4tbfhi!>uWi3y&^6b!zR|`5~EIC+F54Tb0|N+EQ53k{p=7C{21GYI=NPWq3$Q zZ2f`hp*8EPVw-AW!&`P!I0m<9LD4RguUBF zc|5{JSQvUCh`fpsI?_rv$qP!6nf%URAA6#-HlTg+toHbUC9myS|LpwWyt;Vx>ak{R~JnkkhniHowE9&M;Ox;Sx3LsOFazoco7gL!?*0m!)BbEY1NBpf0Dm1v9)l==qL z%j~bpft&5}Q;Yf#Sd$PN`J4PWXmp3Y;CR{BWn(aWSla1aE0Xr%72YD)1+56ijSO+g zkEbp_xnTauwrQztC+07BU`6V{_|#b`s41N=VcM*ul$oi1(&>x6*Egl7Z@S)lvGkn9dy7xB5;G5}yt_!%8+hazgl+yW} zb6O0DxJS{a*Z}+_|6%ZgoHh8bHL~5RqQU<@4cAZ8>yCuv;6sN>e^FnJA+K*VyI3$C(HuuVXqrMepb zI%xTW1j%HuwhYQtj6kw)XSP$AVr!HQz>9i%!doOE2?@mbe`e48*o2P|zCHet`U~|} zzhy^M$h=y{p0m7Uxy-Jqc~Xgl!&RX@W&9dg(d!7cg6I*M={Qz#W{!#jvr$Z}2R)5M zA7ee(;0kv-Ko)sV%!L0G%-;IfmPqxxjc48r-GsZ2Ij>(;f3fM0-=%L<2cwvWy!xMm z&C1@v6Y78C-w((Qv;zlss9(rO{t5X&gjT81c;|H4EQ4F9OQB&QAfoFqwO=e}nQy3x z`_zwFdeuG^eJ|u@j_0WQTYMQD>K^)=Vvir)4NjLGTug=@L?tssr+%^_$pjqv9!$Jk zHPG|^#@=_=T*=!oy?T8vX!>s3b93fAw@r2!{BA~De0&>DRtfQ1gKPm_$|$-#PlarT z9+(3eTF(UtK9pa)wBo8%Fc_}vC87`$WT*!FoBTV-eKN9Ic6LEd2{B>g+?=3VeiF+s zc9D|-VgF&bK^}3mohzeTnNGol3^FtBM9^Lf>r_LK8V;jc#recWDQ3Wwf@(7ydp|%Mt2UYnnxl5 z*XEe}F7{aSh0fHxEwdAmKwzP@?a2u%=lpFz`cVD(t1MdC8#QO!j2YYKj`KMdKQ}pJ zUi_c~RFVo^0xr)PN>FBjZ*)UEiuNetUSVN&cG5)mjo70?*^6c;4uUVvUuG z`XRDBX{w!r3nd3k1ft!e0*6#!ZIj9W(4V9}we4{ib64hK(kt~zU%RL93Qf9rO)adM zwZ-LVi7PF+n+c2g&c=Mv&)|94#>mW8nIKWtGhNE;e#=8>NLojGL2ey9gpGAbZpC_n zU&b}~z6n|kkK1M1N{)6$2ed&XHHM65Y%EC@^cbngSX?ilMmH`$xybHeJ1(kD_gH>* z%IB(5(bo@*9rAwKX%?fpg2rJ7!7JdGcLP7|RHw@(SyFmPV0hsM9fovaLEdfI6VXrl z`|tF9L6tI-|3qhkCv`%yH#*pu?3f@-=vO-6{E>)HXl;!)nQTk5jsTpz*2p zRo7Yvl9uF7n;Xlj<&?oU+D_M&oLH4Nb4tuy;6sp=RS9~Z6`F{1--T6yFC*qeD@B~Z zi>v4|CCfN4Y?K~2BqTc`Ne6>TZx#d7bbesJ{Ih;(_wnPBM2I*tjMxAfOn?lA(wc>Y zh8n$`H%=zcMi+ILQCo0yCq#;PB*y#4D+yl9F7N5OzPBv5_r*1bUSIBX#XhsUZu+{a z;OM!VDmHYE!^4kgwLP&ssbv4V+iH4$cA#Ybg_g8M$q7pj&z}LiDIwnD%L8J?;Okgt z=bed;`*^MnH`j+1^RRMem?Qruw_mz$`H9qP>Cu*I=-4rdK~)1|?vSheJ0p3$WT+Di5!ooNtpeuPKaqEIr$vy6|FGZBxYL!ey{MU_HqPwe@u1abDzXc-@YRo1Lu- zHcV{B*kmfM9a*UpNNPg1T2gWheq2mP(;mxM+m#Tj7`3|lBCh9^Ti1WuS=FBJDh%D$N^bh zWbAnmXU!81chhrq#O$HMOB92jk}IbLSA$e;2x`nLw#Ez^{7m(n1Y z=(`xR#4eWX?0aN#DRkAsofvHk%O!)~N^e_MOFtR>78+b~JARz~0R}ifSriy>eE-_x zG`2J1nGl0aRN?>leN*6|(}Yf9`J$x_Evxzml)Y$pV^wsrArslLw=^uyh;)w~V`D4Z znVg*+%{_LKgDfbc)g3OT7^Y9lRLgG-UYExVz9-qL(^^@j`gH4YtnqhfzV)V;<&FL9 z3cIo&bS}hMXu?=jbwc;YY(4Ncd(U`>P)z=(aQ!*W6!zdpJok}!@*g)_DE^^)q@;bM z9kbD0y{6{yTivs}e|)&Q`tXmtyU*3up6j05eQw5#bKTJc%D8E(FLri5y(T?v_0yf5 zPpwWnnYDYxialA`dsnR3nZ=xiu9t`|bdDhF&(DA?J<2v<;*IfjH2C?AA$DT!WP9Pi zkU(*Oi5frr3?r4(#fZn2{@#CHbl>#muEPHCS-rE|H=M0f|B4ez{qghawvzjPelJT> zf821gC2`@k?JS88FB?{2^o`I_lr4!f1ZTQN#Mwp0+D#cdHpInc>eLuJlb`U-X%YB3 za5r+4)9C3QNnQAbKS{L{NS>!l3_B4@&0YRcaMgwxwHvB}C*yt0<&Sht%k6z-{rXq7 z<)(ICXq~$G>Rc4N+@q_*0^&0WKCEBPLG_a8gwxqF)r z&JI2{E`C+5n8PXv%fcvE5*`~7nZ}&9yynUdo z@lto$iq!Ze2kV#2y01AoX*p@PDahyD1eaXkSw&Dy>y(fX-*7(`(KEr<4mWuD=BItD z*(l=jF`ivk5(9aR7o0*y0rs?3rO7+eu_*&?icf6yn(72OBrKi3c$>KMdSi0igGY2ecY;k{9 zQU@2P{-pNW!cCAQyobv*ppc~5mv*ZsS>v3JI0%tq*sY#WaBEAn1Zo*iB+C)jqO0N&Fb6nA0?cb()u+QAAMz>YhkpE<>aTl1$g|c_F1AncW z#yTr(T@l^aN^*JNKggA8b7rgktYBtqY}Zz37xuFy54NQ=U4PEfEB)$tN_}GOHZt=Q z7VW5NJv*08z~jBLAyR!p`3pvc2LcZ_Ak*2~&(GG=&d%4()(#JDb7zEWc+5-O%i{On zDZ7MQ&bPe$J^hNaF*u`QZd9Fd%EXNL0OK_2H+Xy*Pk;9h3|_kibyvLWgHs(9e#e4l zY4+*H_qBW>9fuw%g#Yp(^vHOe*c2zl<%jfi zcf?8U-EWPPj_B!tvLf7;)s!%FFe;d}vy6vHaa3^MI1(XL$@$>EIqk7YU3J;5S^Zfn zPt2KrZe3o^>b}npc0b;hP~7{-{K6C4r}rE4J96i2%nrzF%-$!3cZF3Xpijl*;*^rr zH4%YjYpe4*s>1W4s#j#p++00nV%gS(*_D&StCM3(V#bl)A!%F0IZZwhBGUs;?;SIe2B>rd6VTX%R}m~qvU6} zrg=j724fmIAE{sB7?&|UC>V0Kw~-`%Dv+wtMsMW35@Sbb8|9m^7Y-dpd+SmEQaMvVo-bnj*X#Ef(q1z8g03?>eT zCxmJSLhxm``h*E%>l}jRm^s#E%oz6`1y9O2J3BbIy4rF^@kQcY{Aj%}md-7*%&a4t zy2qYBAd684uhFQtaZTf#EoA(cd{*8h59s~vG}g`q-Zq78v@BIMojkYdX_5W0WtZ79 zuBMmAb_+F)W3cHuNuXLXtfuE(Xn~eSy%TY9iLw_RmM2lY6SF8VjzmCzT@amJI1h^Fqz7hCnY!EH)~1#!6osjE6TbrHI^OV7S#T-re{&(4Y?Zn zo5_PyCf;~P&D&(JjZu#F@C;36p$3F$X*Dj%+_;<2sI*;etL!)KzSTcOWOOY>Mk6C< zQe?C`Ov1Bw>>pe`{rYa)MDP>gFD-H&KszD8peSBG9z9+>+MroY;3$H9Mt=%~)BkhB zor@@HG|1EOmrB&PUuRkD6SddL(Zx-6bd>8?Cnv4wYhv#Vs9$YqL&qGJkN5Znr2Hmu zLt&`q^_;z~sA|*9NIGs?XC>7{kRG+izv$!_Q8AnV)PNe_Fk9VzC^yaMqCMz*D(^qoDl{iO}&$w~!W5+r>IoR5|noM4H zV^EW?AK?7_CT%dhSHTH>Qvf3~>iszC95)3#@`xqlKb__36YQ_DSfdd~$f)z2I_W>5 zl#ieqT152XHjOd&XtdM0r6~~2(ilmd_54!GAAWf7pSPZ2cP4_Qn9fQo|A}9SvteXA z11hko3oMQA$e#!lvZ3FD!q_;CWm*$(2DdDcV_=64AFk+E(k!1*=kc{{yEs7QZp$T+ zU>bFZDtHsqV00fbYuNH))))~hBT{Lo@38iAq7;X_e;^;zI^W6YCtD)fjht}Zayqm{ z6k*he3{jlF8taq?QRhIm24hj<>?a2Vq>LeL0_Ngh)=Mw9xU0hn(SOqy*;sdY@bz7V zaSiv?)g5Szn}6}E`*#2KcvaTghnq4Q@&n_S9ADi2_WmCtMpZwoleMxO+ zKG+h}ePwe_=EnXtDffT%c-{2VpWZ)V$GiKA`l1&fYV=?Eg^TFC%d~YG3&RB|1gd(V zP(pOfN+>NoihzMNcW?J-Pmf{-Fz6Z4z1f37i;pzK9*L^|}Gb(J({)VNyqm#YrcR82s`_Zc2)vxU@E8g|m`W^9$_sqy^O7dwsT$RwUXGU+; zJ&O}!JPtaAOn?P24<~dpXyp+z^ceNBx1T(!R)(C6;iMRLuqdt?KCG!~Mk?I&_P&y` zJuj@-0_x;_mhhs(9mQ*{R7E9wFF2r_D%vn7{+)O7gMv`d*e)kAwRkBDvAn>1w%^J1Nnbl_i?Vl> zn%u==)K}7$^kmJucV_6MvTX~qtAnHJ^D<{g(>Z62AG<^A(W*My2R29eHAh77Hy?3< zfH(Km-mVeQ&Kj3J#=QRCQ2&@7#Gi ztFZ4atMJh#zNQUKJj9czBeJO{%MQ2NxZq5*X*pCjcyIZ!N z@S+uRYnz8|aTxKBG>|$s&L+Y$wh6~Ew0iR7T0^6 z91lA=XKs37{R`_~+MMm;c*xm4V)nL~FU{49p}>j>syX(mJhOi_?F{kEi=qYP_+GY^`fIQW*l8q zy^#KMsXA$PZ0zi$>cm-bakH4ssMIm`8KQ;?d;ILy&?y zW!)(RCtb00Nc#~iqPQDHi$pYTt)-WpKbF~$kldI#hzgR0kF-?X+mJZ>=uwQDfr`$P zsOY4dL|&P8J_ZAx)wXdCpA3%uWGcn|xL>cuan8Wls$ISUuFgAL6T6(!JMxKF@pO7aYHthPmtFNzYaL{;-8~b#=qE{QW_p&xd;=|FN zhfg+w(R5Tvt{(P_cH{(QhlZw4D##7X50ye?RNK{dBo*||nYXhfx#NO7p*MNeoUWwO zprFht=_USgzENSb)AQF9m#r@-+q)of>A6lcM`5UOH%J$Fg|NeOsO;pG$h@u>mJB58 zf_nA>o2RDA?sL$>Z&*|J;^w+)+YTx z9V_nhh}xymf7ciwoi5u^Q?sL@Y)5VFjG6HrMY^bX0DJbZvtn4Z5 z^NSBm%=AkLOw9OINn>nmV@d9!goH&&0Wm&4F#(gN`uI#`Thr@fW9!ok7RAOcs+tht zoe(`f(mRP}Up(}^bQp7q#F~ut@v#eF5n={N$|7FIrC%%b)GCW{Zj7od#?pvP*87eP z8&U$(r-Y{YPe_l5$PQ#t(zK?Wnyyr=N&U9`xYma{oYus(PCuAfI5{*gWJ+exq&V-0 z$eBqwtINwamElHU?6Na$$)IXKZhJc1;kKuPrB}P{d0NSo+HfyRrr0(1udVBtF{pi% zy{Ney6yCU>YH_*5ShIOVd#^<6HifLQi3eZ&?RSm&d5ah4S9p5xbC|f}RpSCXblI%QQJ5g$P%Z%wf{7basgtknq!mrzt;W zSsKwV{nYZZbkMRwk6)=?c(eS^{GRCqwODb%J=3u`+2^7-JvP(3e!i(QFlgL3r_8LZ z$m(h*Cu4DVZ6L1Pp`h;$vrjFahJ)W|MlJc4(JE@Q5qdY(}fjEV%LSX5DnEMo8h z*4lCA=^tFb{QAbt*IUy&PR-x+qvic+D~gMjPfJ@-RJbB7c%Dz@q?CXswq99OuMnX+H~+BorDdzfjt}Cqe@h|xq@1sXuAlMRE_AZO~tI%&f1bE-XEixTkq#e z*3Q~SSJZ{Xy7ETaO~;zk9qiLvjxT9A)tWRgF{h;@V_r_UqkU-3f=u2>JHGsx=FHl| z9qjM!n}2??wrJ<|wIyq=?&8d$u$BRhB1$vkPi<&Ty>Nxqgpk$w3r z2>+9j-ZO|apmTkY4bmgx=#x4fZLBSKpx&tncSI>3520uZ(Mh^P=V3H5X_DB|u00v) z+gjA;uS;Vd{7pyAgPr4QJRW|b++n=;8{@rA8LOo3Q>mTP%eykDlg0e2_tqb5^!9OW zL)YZ(Qz~-2ynO2Gyth=W$|AcHv>L~QRuK!Ih<7e*Y$b|^BOFfc1BqOce4>`f#W6I( zpYhdHOOxRS;y7xG<=P4MI6|S)dCP2VCdy5HCf=EjCVR7u zgAqY}31eU$)LDu9v)UZA5io~|mMZm2`!!NSm?i#PrE%~%!jCz~ z-=NRC&MA#cGz&I*7X0AGC~%oa%p>0N+Y=tM*{%dNxkbvN9!{ol>Lkm+arPvpVYuJ2 z9I^vlq)db^qFg>Iw|u;vBKq67wrunv}G> z@}qusWN}}|w9z?3}glT1yslGF)e9iQN z?jp10#be>qlT&BK>_Iv5rlRVeno!6D)fZ7rMO2fc0(OkSBs(}T#3_x`FonAmRDuSE zQl%vI2VzvNh0@ZMFS3u2lA{xwaTG`Kzn5LoT z+wNK8X7oIyJIEtCr=AYan$R-IN7=t6wR?eY^|YnwYamTqm5-(0n|9+Qbv2B=Jl56D zFotRXH~e<$FUH~f!!2rrjT;;s>nRl+FS@sW-o3@ga%(E{EVO5RaYuAx`G*ZuG*dJ}xc}gd9Rj-U$AO0X#KBh>YxD%@~5^5s0%} zMS!|ECLt>&d{T5xXh}>&Zg!~uRP}GYk3VlbJT7{AOi)m4kb87>Y*1iq_8y8F&6OpV~??S_x|`T<&Vsy z-stOlhDoZ5=t;fuu3TX1<(Oc3n=@@}WYcE!2+?ryM}Y}d01HHuFkB0n{KjLCzRo78 zC-&`q%lHfR`_reH`AJn}66ji{{7L@8^d7KzL*I{a8Ecd2Z95txc;b!bKz#8Dt1$q^ zFSow<69_2Sk>c6d$6tT#G3Ly^SjoZ@yn`}>O_>XGCRN88|EhjJFu=?`sB-u6@bVi| z?-`aFn3almQ25vdrk`W%@u+^n9R{=WIEPF(S65#L2h(Ou$or>=kYX{s(MXtIbkD-r z=77M&A6Od6i$!ueBEByRD~y;rEk0yITv=FAcxZZB@Ps(yufFT+`)<%?S5i~&1B5qo27(5_FMVl4kFK_Qx*YIjR<7;X(dry6f z!fI5rcdPWexZ-YxCcWc+aVP18P>q(}_d1S0bgT4YPyAkb2L;Sd=ngO?)5XWv*ClZ5 zSQi&tAh)$eSHleQs!4v)Pl5g;To|iGuk94Een^FD>xTtQy1!uS+&S9{4(3*r=M-&b zF=^wy!c+atg)54J!g7O%kob7MjWT*92TwO=Pe;>q_n`QR!O`Q4ypN=`7u`m$qTk{l zF#ThQ;3i1N&~5ZdAg6Al^qV@sJi>oN-VOa`27dE|@d{w5+bI1e72mqRVGsx9zo`Js zX~N9AjS@_;bRW8n(r+>d1{!7~V&CJ@~q*PV8J{#eUP4|Jr@x$r;tp8saQ(~q2HpXgY0sy#KdtaV#y_n|8G?ylc# z%IFxC~L!WpYHT|g1Nlh(Dn9~ir$KSU)U#~m#9tcUGcr0KxVE(ArIYPxFNK!3 zY%iH}YIRO{xPcQ=MNUKBq;&K|^KzJ+t!MEPl1 zL4CD$KaQVaLRZlxxwrb&0}{&0a4J8%`{B3oP4t(7rS_nKdiL8PmG8b$o;Xpz%17~c zUN4}jc~M(K`$mM4%}kj%cn|xmB-?pvLs`YT{Qgxx+eS~l`NWsTsQZr{+s3YD%`Fa^ zIx`XRmX3k;xVm-u(e-m?g{!benZZdthnq@!A$_Q&hw>jUdpKDu7DaP5ZfQXD+>!bg z;dQ<4gC9Hrtc|U*tdv)(o7%4|uQ~S9bMe{wVs|7AZIVEpbaa~nR10DP@?)Eo&o8PKS$+_52v1x8}NO@bqjK%S6v5P$J zakbkUhGi}`--?$HR3)bFii=KaxVJKQ)$FMOnLK);J}*;kiwU<<7zxGj{Wk4d@Hdt- z*QA*iTqas}e!jke?(POUV4`Ai3gdX; z2Mf*@{1=ncI|Yiv#r21d=CmV)+v?_SD?FTCS(%+&bBpIu!Dt*DO;9mf9sCoDq}gVm2bglV~Qir0C@U>_%=568HqUDqkI!O zzM*Y_xAl&b&%6~46C$dPY>u{GgNojAM0#svumxrUghs#MB^#9^Ha7YVFdLgedu#Ec z9*tp!t?Hvnrg8?cLANUg_X!i??L*>`T;b6jsHQwp+PoDj-DjohX!=DWKm4^q2Q6I% zoyzwU`+fb1j`Wa<&Z5-D(y-vtmYnIU%Yv45y}G*fiFKI;n=UM9dwzRiO7s2CHfA5n z44T^dO2PgQ_cEJB_b;9rS+{d;-lj##F_rb<(!JLf6z{mYq6cNod3!$G|INNnE_SE- zPT1#4cS(nyS4-GZ+~akD{XYhtG(yA_#3Ns4L_t^l#R2NR73b&Zl;?<-5RZS;{y?SK z?v%+CTAfCgfqG7X5>rf4Q#e^1E3xm~ls)*o)R8_AR^DsUOqul#@`aBQZ_T_(H{#*LofxkN$Uqjz_ zU*5OxJJ^hV9A85X{yuaGvuwia7$yU{QV)LyJpWApX0=KRKBK$}fB#;c#pd915PfDQ z!$WVPGU0i&?l2f~tZz_``VJ(2H#%D0fP~AaNs7TPb#n>|8WRv;>w_nW6l`Kw+l~B! zG#81H@0nqvcxs6P$_u)}vxHQ;S{tEpPgA>D&KHH{T)ldA<}>&7zEYu{SoY=knvCjs zL8b8%Y7%qmr%qctr>1-1skYR{=em;GrLQec($~`dmlw2@s+BD)*fY3nHIKPyVAbUXOF!=0M_V zmz-?QaB#?IKGD?l(8~0Ipwf=g{3V4E_RdA?=azJo1R;3zU;iZqyRUld@brQmFRiXy zgO~6s4!(V#d(HmVY0*_n62H#hGCjR@AH)fLzdlD^vmCk{y&wI&yzpu*dYp)NYw5M1 zD6(Z>_9p(@2#J)L(_UzN%DDQKmFu2glR9n9<#n50T8>|2wUyMa%=7chUD=yBKW)MJ z#u-OmT^_pp)gv>OJky@jnw8VBvmmr!cY6+TY%Vwk0oOS;#$Qo9?7=fL9_Jv2fnS^+ zsAIgMpjGkS5)1{Y@xmCC3kJ>%-ppBhZei2$7KB1GT23sPe{OBgKyZ0y$?%hcE6aob z%NTjXAi212c;{e6!96dn!P|j53Z@@=YqxvVzSgvI?d`y6tp_-jYAtTLmm}H`g34p` z8;cIK?@YlfOO7Tq5JXU)(@85!_fQ;hNDSK~E_Xq7!>RPOPjyuMxFRY2!SaTUX%@Gv zHRl#%3VhK|EIFUKtd$u$N^=UfUv2N~-(FC#t-qrK|LqU0TwPk)T@?~qxvH$RyE0UA z@x(ey>?FtGJwx7g z?7!JFE@IEH{DIT5P5MggnZ6ZS4i4GNAHbgJN*kC`(IxhbTj83yB`YhY$jvwYVf?k_ zb8MPJ({r~z-`OrU%{$mM2gIgPE&1z9L9md41&U!bokC6($4=gFPutsBF6ha17i*sH zb!KpG=IRSgO(&P5{4aC)sU;2PS7(9Fl_hoSi{0#_ikIb=c2!JK7Z~l{yAdQmjkUdh zgsk3AvWhmBRlRuUa%pu;`ilFkyZc+v8A`j`b?jJ!7xvO*gFQHjy(BK9@>j~No5>v} z%N;6b5XkOa``X?TdVsp?(bl-ZUrBATo%8AW>FBxJYU}nyCxt)aR&n1OtHL+`cvoT3 z_Djuck9rqvnI607Kz(@C-j-JWc$u!FEd&L-eLEQ zKKSp3lQSMz@&3{L%LPe^d-7=WOi69)qs^o3|AlbU`~CA;OX}7XxYrv{6PrzPlh=}(p=g^RMnU09!6=+p_$Y0lAhL1R*C zGZX#P6|OPakq%*Fm`jF|awEw2Yp3|?y5Pamu-srtK5aG!9iAJ;p=zn?WsE7#@3b+ zn zH^vExQ_0g)D6!+IdbHPSoe>`%e{pSDR&P5`A!Q+J_ZxnyL!YT72kKD=Gne`ZPUL$`C zVH3AASK@RG9)9hU?!IXC2fO>H{zde6`1DgWcUbb~e)9D{KOR?4{T+^@xkLNe8JP8g zp*PUm0j>kjN201FDhll#A|lWrhZF~VyK(S@Hc;<OPEPCgO{*jIg@60gVhvMcV`$qdQ+G}?316&Lw>4BK*C$L1l5e1)w`(Z9f zxB5-;_lF*}8TRy}>4{l?OJs`rc2vwL2>C%vJoJ(Gr zW5)Y=*g+i|I$b1K-8l5Zrt-FT;D9UI2k=N0e&)rgU<~cghW@=}EVJv(BK{=F)CKvt zzZJpqExjTsJD4xGy|||=Y~JA(ORDr{?UJaJre5jSBr!6&+!>UkSR3^Fod2~*gf+nK=R3R{ymHq&3O;rOSIYFCQRojL3*?h8=M($R0zuum{7&JDzO zjpNOU@HVl!3~$G=D3%C+Q+7r?bD`DWycMdOy)2*{<$nX!XLw6Mxd2z|Xj}pMi02Kj zu~Zt-7r0tRL76N%RI}lG;2Lv9kpqBi4S~fC(|^AXcaBxEY!2rIxPn{Y4q4%(EH;}> zqE{&u*$17Jt`0r>G>44?Pu8e8%CF$j2f<4so~&gb4E=+A@F_+-*l4BjQLKHwIU7*8 z6-%5m+`!28&nsg`&h}O)yk#Juh`%~dZiVVI91>8t%EnfY9??c0u^F7Bxf+!~fc0AE zI`lE<*~}?J5{oDsy1=n96F97o`3zLv^bt_qh6DknjDQ;Y3{V9lp!$X~1r*BnbjpT4 z1k`*pQ6@%g9%|>5Nz4miI%NjT(`aj^wa3l@W#_>&T6;FmG-B;nT^f4Ap1&TtGzuYR+g}f%=H&4S!~7G-3*%W{-k04e3zL z%on)ET#=`hZZ;sGJW$H~gHU}~bK0NG z3%K;P+^VqpLq-Yrg$*%!WU+8lw^KD*<+nz_>mTwAbly=kO}2J%`H&T<0xt^;S433(DlL&{Do2u*aFUvwJcN92eu>h0Yz56Rh`Os+-jasPG?#0@v_} z2UaNxFgn*bCL?h~jMr>HfCn>`SQfx^jtqec)@W1#C*p~@yH&zPx}eX>-6Pn)i6daT zeK-th0xQhWyMPJTVa$fUp}-*yMVjDI4b#v7pmMqHq|wZHZKM`ZL=|a*%R2w@SNZpR z&0*Ff2y}iSU-M7^J!Yknig}KQUIgNp@1Q-3!0j#4;GiNEP&};H|&Hoe8i;zgkMy&>Si+plprSYzNbyzzbnJv>V=J8Q{F^j8(i~rHt=LWbTDF zF&WyI+tBKeE_vSy!!em1bxdar-*PP{)A?6zg%X&4sbMl3&QK3f!j%bJ8#xqw1V6Q$ zSO1Bsr2s=%raL+Z${v0u10F{}wIWJWjS?AXFsLYbOMV;=jKfwVuT3vwV)XVWufWxe zEOLvb*VOT^OJSSXeX5_lNs9OwFHxK+4lAD;5au7}iFsw={!?6S);aktoR`qN2)5pGwOsxT(Ged*3T~-}WqOWJNpZ?4cIpg{vF}Ws zHq8s|?Tt=mqLtz|AXYqWNGUFE!|L4);PSyBM5FtlAiNjD6FuBKOhEZFrFDY1!*<6R z$@Z~LCHc!z`=@oBUbO1+5?9yjj?NRMYZV)6!XxW*Yqyrk73TCMduOdUKR0*tr8X9R z+jRS-h|KwM(d7{-o%M;8XV$7V3G2e0C$>n&tzJ`0rpDI96fTXK9okf%HsiiIGq=x< zv^;t{JIM0Fj{lW;CRSKjUft|Xyix&eHN}~+@!veMNtE7cKO z_MJK}6z)aV%uCDfRsg9i#$qh$+nuy`KPz}9R7u+GD zJ1SRAI)FD=zg?M?cUx}(na^iwgVXy2Wmd=;Nk6S9 zq<$~Yz?NvyzhW)sJh3rxDa3QQ#X0@GP`USNtJsV4-cA7MulCbQuzi{bJr z7Xp{AC)QcMRX*Z&6vxGW&VH>PHaK2l@kYk34jtpxDL5)-c)t}J+7pK%7K}M&0NXJu z_1H;98}7x0Yux&p^IdiVPgh%^xJ|}2N~s1lo@D`-gjcFrkOnpMJS@C*X5v=_hdMu$ z%JONyjRlldgAGwZ*?i+?fD(4#`JsuBZ4K%XE3Tgcs+H>3Y99(f)E2nNhW#HauFnCL%{7m}b!MoU&ym>z%Bp#W-Un2$@k1TgnV||{#S+v3 zDA76R3Al3_%GIWA8fT!<7@amM!@gO**E<6@sRy(=6GO@ z6zjx#SUk~(c0sJ!$`8~XNGV)msak;S_1K|b5|le!LjBZwRGnco^0A``3UYQx{TJj+ z_C)E-)uBII#<8PB8+jZZR;Wi+lj6&1;m9>#!4IT{X+Q>(dJ?X71mL~E$K;qVshF5HG`j+Y>bS*SfKaPu)Kyfqi>(J(!zhAY`H8nA0i)#fESUEZuWQK$J@beSZKw%m%Jj*W%7!^ zJyr+oppar?x^)h|7;QOUI4)55o|vyI*9|&f_Npdy3@8U*I5xn=3jPJ&#uv~=Q2{~Q zY33$qbG99pC)&sG*hniAHfZJboU%!vO!xk-ar%uK45Vpjp3#Hop9rWF>nwTX#`Gzm zV)-oF4W9`;L-CzMRw&_Vk+lKihpdez?l+L{r&}8$2IQ<`I>S6fbcdXttkIZAcM~SE zHVz7|#sHVDy#=l$9oGfcqD6Nopp-|*c!WXFMSKw0E*Sm{Y~&r0!^t-mha(Ajo3D5R zAGMop;aru*kBkC=tEjG{eFe{t@0TlrHqTpDTzQ+XILC#RTr5^Hc-$xnuU5^{F}~s) z8~FbZ^-%hzY!~rmu5a! zR`OU~qBhIDd<9qvd@?;&2P;&5fzJ|oN-oz`e3mQ$J!*7M{#IOlhNA)(`HFh1ZZs}f z>~o1KmJCd~#a@Lx?Mkkj1yyU=Y3xBjrQkGmwaUeO#C2ZaaUDU`T2?ROItd{o;yP89 znZ;FT{rqd0Bw zPR=rlG)j~)SZ0N?Mb_hU(>iDg~x9w27>s_ToF^kEcKX@h?G*yLSSk){D{Xa*f?O)V-^Av=^lZpeID066ubGv zO1q$mVip3^8N-)CYx-ah=`jm|iF6NPBHgn~aLXUK4vxk}Sqy>e94mpHKv6S)AREqN zaBL=uZVGJYpnF7gGZ^T05IU}V1Z}jx1-=U`2ec7B58#`r!vUXS&!tS@yI?@>fa@{} zK=Gg#Tx$t+I_s3j;5|`>7*+7&1DU4`3tCSIkF2nzC3yz2LkFI?Oyjv{~d%E7Pq z9{Otc%QZ(E8jsf8bDNi8e0nhWpl{(@I9 z6+}{E(5u5`a+p&mb1`-L`uQo!I27Sg1Vw~{iB@UAl3O%C(PQpOb>KF# zp^wr~eDDQ7AyT<)^s(2u1SwvqD`~aL1~isYoDfiSkf1&`nM)9d(&K-KS&xeB_ZjZz z`;AdMQ+|L#Ij;E;mE};AIQ1M#w`e#n)Atxt;5y5e3tXNy!#)Mcy!;%Gun1hZ zY9nwF)MYEb1YIQi0@rE0otlVQ%5H$t&o+UJPI1Bo{hCFb;4r#1^EC{+S=?*dB50T1 z=GME3LfQKIR1E#q)MH}6v_pehhbk4|nh$-x*Gkpup>sw*^JYNdjs@q#XKXV;#d(a1 zF0WPM_X8y;(>IN(wrt!c6?*bI%8h(yS15iYnNnK)Yh3aMv0*dtb4?sb#R&Y$Nx5LE<4??zuk;sA)tKAB{3NWqb`JI6#c)RPkHgvrSk}_Akz> z^M*sHQvqF+%e7(?J)roQ|Kw1)hA;S-JeNDyx;uEMs1Z=zXe~zcOJsWgAXJ~>9sxzx z{w+{Mml*N9VJoLgA}Bni6vickkH}{!pqdR=_$*~ym0+yP%6sve6Q>N?fbWub`Iy{Z zuyXb`sBSi2Kv6FD2cRGk%lI1k+y=wx65}-^$3Ze7u?YYh=Ds6#uP>+T3r-iszzLh| z2%k+Kk?#}ZJK8>h57in(FqEI+rWHjQ1kB&{y>tc{EslwHZ$0N1&Q&+WPlxY<2T(sO zkyWvgdz>8cf`@qTjn|}!t|E7nOS!{6aqruX)!VL)tGn)Wn)3Vp#1Y!lT*EetHK5#q zE*+zxum)Xx4I*xXG5%i1be2^LOcW7pw@Q$}Dl7OLA5-?|43D4-Lx_}|Nh4wnj#&e3eI@v5% zlKS_SkLnB)h}tseaTBcFVQ|08h!=~st4};HcGofR(T!sym)n+dxb{-(C3EwM+m=%2 z2V zpw2SVc+g~VBiFmZ@-KRQ$O;8-3s+#szl-Bhm*ZAng;snvr%UBy_8}8X*kr1Lvg*a5 z`=MoLa_vbv9hERc#7`gkfVg-MMqfm-uAMDszvnY2RWBq4FnBpiapW@^6f*+7 z9Ex(r))_&rBznIJm~PUU3jVmM58>OecjVpt+c)rSlcC#&zRTl*x}MstDpb{r?-C4s z*Gav=Jz1P3SBDOd;4R?x3phT;M)^UEL6Wb3pGV!AEb-#^+FFP+s+;W>lvC7g#OPQt zK02T{))w`w{%!1C?SwmRxG28ujBo35C{7YuBXN?n8@BT`;wPML;R+~{L;-ci@C?Vr zPbFOv#W_eL3aAU<7ioW4ipHrlY}detRDw89zvC;?&4L9sG^ZS%*Dh62g7~}u#pg9f zy?}X%Z__CBZRn(Yz9ND6u0AXNY9JrsTWy4NF?WjV=-PBN6trm}mx~O*=zB0%Eu;4b zF&Z#&TMKbBP$9lcRL}^ZqMm=3q9FQrLo7qXmMfPFSM|(rgyVo!F1UqD`hqN6!d1y< zyRc`t{T)iL{G?ag{Kd&;T0x0H$EeQ?SkPiToNSa4LBY!j>EP$ zbmK`)d(TQ98lMuCQcA!*IKZ>>XIj@G!Vv0H9&32sv)~ z4}3gigSZV~wTj@A>ieBTah)fiC{xcz^aPZy^#m@GDT0DbRdX0O!05Zgk5|B?!>gkT z7?a_mj1I+@WJhEQt`4PZ(mAvTyY(?s2BKUc4Mf#N_2<|Lj+Yc?Gr?t8;Z6Wst0@a` z;RwAF96H3nuX_-2%9sKEi*0Tq?Uim$T8ql!ia%^$}W<3Ji$)RthCrn-7!K<9- zPYs2jlFk{O2PEVo89Pe|(H zFW`+f{Bt<}dm&4I{yqg)c%gV3I2}gBtG+qKcd7Y%0r01 zE7$qov9<6SP&^)Lq8KJV^B4>Ndm8_og1`Taf1XC4@45<>j$HO|?K z@KV30oso_6=ftq}^11xEi84zsYfxcB{{obh88!mypt?~0n&Y~zLFIE?5=HYbT5-LK zwJbGJoR!uR@*Mgnl8doe#^Y9)1GsDaPrlyoaG0kiSywv=s(W~~Pl7v{{9ZGBQ*e*x zO%QE2QQicfPpS*#FZtggkNE75y?c#+p2I(Dcdc+04d)fO`zO%0f@kl9oHY+^f}D}& z4A!NL@NoJ#oS?6Hjev^NsUs*(4d(}_p*_iR1V1Q=qw`kGmH0up2r6KRb7TUL=^R;y z2#~jFBA|XIpro3iKkyZpFe0`?(w@(2q#@nF^#j%0)P+Tf=253-I9xY7Aaceg*qI`7 zfcO&6BA_!-GR0%XtG>3Kc-I&0NknrS#AH#=_J4d2QhsJa**!~IFK)=r?|r`gp-2! zrlR!b+{lv51h0S@E%(&UKG2ky)P8<>NlU(e(o%5#mR)<-y9QZr`6qn0MRNH7pQz3c z@{!L!gTF?jIR>kM-4FhDvkj=Mz;2;Pm2R<=VNboos|0?>q0Sion{!v9c7N6?f&Zwf z_!@Cqutt~@Ut9heYm3iBn}Lt=TQLfY7HbHx86E}Sx=lZg)Ni-yC&J43D9`Joj?`DY zwv6kuVE*^1kR-|vS@qW~`pUrTDfy1fM#-74HdW$e8h?THc!1M-dZcY<3w2iOt#F1qwZtZq*IdAoz^d0P2lzOv)e_qnUcCkDl74SQ z76}}#R4bGg?CBuGZwRl{!cL;54)$6$bdnx7oCR<7Nu>->UujS|LstkYBg9~6q`?W- z8sHjnQw_L2;kZ~BBv8jyuU3q_zXGV~8kFq_D85rT->{#7J4&68>LKb&^CJIz%ejqA ztHGSW&$wT6{+wa|;wx+PgNLM_q~Nwm;b+K~8q|EgW*kaCLvF?Ghcz7k|HXX(%!A{) zi<^YvcENwVVKfBIYo5jVB-WGX|C9>wX$R+%*cbobR6x%|U%auVUqc3Gaqcx65LM$g zZ#I+wFL2^=uo$+{XG$e#IK^q;pN(%qdhi*t+l=~Z#=n<2%2pO@NcZ-{8a6W(YdGeL z$ryn}?A2vltRhv;iB)W7jaWr1oUr-1Y@?o_#SEWW$Tr5kf(lsLGd0lO!+!cK(EFtE zb-X0l%U53|*+thrW zgiYOUc#^M^qYu33EO)->rU9rh6{#=ps~582iy@I?kFVfE$+enLF1aO zhA#>>%rJFJVI#iCJ<#v@Oq&~NugFpaj^tq(f=rrXIJQqTY#(UYrlKBW7+W87;U+#SYOahLci{vAFWvMr z>-5b;wSrlvkB4#uhe^t9Jal}`z?ZM%G*g6X~c?Am7rKLezS4p zicQp4Ouy?mV#Rp;PX zyve$w>md`kX#_Z7bw7zb0%-_rMJ^3iIAPI{whhsCem|RzJ*`kia|5xMcD}ITY3B=U zk1M@==d%bTEcBf(a7F64&Kvgg+at<&;L@Z0;sy}yLBfTc%xtRUWGTSJ)iI4nm_9MT zYsCZ|vKKeM$OCi(DlYm~x|FB54&k_@s+puiBo+;1x(*qFT?20hSv)aEX}aM)R>-lD zMp|N(qoJ$NA>VQxBCt)L`~TE-^}$h9SNy&E?%NGX2;oDL7*hxV5)2q1fP}9^Tq1!Y zNElE+B1)(=QV~V`Xc48Uv*v%2KML)Pmey)3BTfqxt=fts)oEu$ zG5fZ^bIyBr-v(6v>0~A|Z};r)o_pW9=iYPg`Jma1tSUfDOF8j&cl4A&YeYOkyU> zK3^Q=>k#CcU|m2`Qt*sRC9S$rcR#yWo9q+*8zy-PtTfUgD@2Eo)D%3UCX3WOyEq-E zF2AGQ{ijiqpD`joPBY7o?Lwg%`S}^XI~KVYRKNp2ac2&yJ>snsl{umA@Z$E}EZc&W zaBf7t9>{Ezx!xGa?DtT)F&&))P3K(Bb3pZ)0~@ePX0tU0D~;zRc22;9GgZbMSnjp! zi9A>2ycm_tAcI7Qi`lN!mY`jUB6tD45WKkC-)^AwZw%;6G#T#;d6g8jIA$KNl4H=S znJa`foyU!_AmQ)XS~m zP$cg;p!+`b*D;)E1m5!ry-?;lxz>f+74Lc4=?Y$4pWvM?jIYp6SA6kl)UM)IV`#){dq z1Z&urVa2RNf>m)Q7U^W+EADF6$*L4sMkh00k()pze8u%cI2VIlmji28AlJedt?7cb z*ZLc;>FOdF{bo&vk|A>$#0LetOJm9s#=Rp}HNJi=uY1uNCj7_08>E`PP z4PObpMI>kxS`58OWR%y>vt&%?aZTvwaSj5>$Ysg6Ex;QL|Dz{ZGOiXnw5lyd>xocRqHjnFVYHce_gPg2<~6d;*@O53VcFR$GpXQ6|L{^{o_6=3;y(=s)23_ zs3Zmw8a!eF3%(ifBw<=d3b#-Qi+_Lzp0_&&IUD- zGS6kUjGFdA%hE7XQ~rM%R*1F1?KFB!b*~54A@61shi__~HYMOFNo1*?a8Qp;&fIUm zO3Ci_P~EoV55c@jCFu?gr|v!m$_fD^f;`zl8M!cqGuzAUwvSU1ySAPtJ;j>_49o%8 zY2(3%R4(gpAR7j(^A4+`dXX+dv1-2+7pKU6*(@VJT%%X3}#X*w3 zo>1X8{VI*WP94I(QMmRA+81`{N3acHDZHP`}s@Ayu1H2{w<_- z6tdp12MyjPs*~0s5r=E*Y$q?T56-F-jT~j*D5x1gy=Vx^wf9jpUL)-l_KLT?3HF=s zcx_(WJNBF21m_;^(Cf&vT9qtSHOZINarKvEE033d;~Q=(Tl;NEg2i-Omkb<;G(csJ zQzngNpZxf*-X43+;nSzzL=ZZXOp`o%x7j~PevmwB|HQjN4OfFvr zQ|bfDKMTV;`=Lr8-f0z=bhMJUc}Eu{M?4;aZ`ANZ)4iNhJB(@R^U*sn^)!VJ+Zof{dL< zdA~h{&imXGpu-6Y`0vyMv3ntlhYlkjqz*7$6UMPVS_wK>#7sAt&ktQb;NYW8rW-}J zKjSs{bc*p5A8{DG6m*mQ0pp2{+Ns`UJjJH0!Sm>j;WZaGk-o*-sSd&ii7a+9c^@_W ziFHIfg=)Xu?TGf^6J=0TfI(y)?in{(g*U>2=#%^byo7bSRi!%xsHGSV{Ag;v2WE# z@HHic29MtbA<1RNx^pj%_}>5Gu^rkL+{h zegZUj!7$a2Ao0+_V=+;U-A!YREE1rSd7sc-!doe4kuZJO>?J6R1n6YmC!D!hHDL`= z#ow-!vBIvvnOHc>LP8&eWWc(bUP*N3OsvV<5zdSN8Mu$~ce4#7C(1ol&(M$hCKUgoR1^QjsxTIexM4tzbWf`lVPDaUo z#eM{%x7T`F?AJo9O~!sL#oF|o`wvhp1r;o@lG9`#5KVez!U!jMgas*~-<1*W%bLfG za4BXi`ML@gX|j1|rJD}vJjr-dnX`oZ5!2}+xT=I^MJ1&35aTWL=^D>Uw*>r=M|BVz z9cWO0)*Ak<0)J049`+zC#n&-jFFFq%hwcLRFnOHUgU-I$%Yp>RnWAM4Fj71SBYd-4 zjL0-tSGafiRqBX$6e|5;j43KKYDBC}TdYUgo_wBin0`OF*-G?w z(*Hb*>5n5?qM7MW!Kfqra;8VRFMTC{e$vO^&7Ysb=Ud$K(Js>YGkvXh1e`OUh?t2G zpLo6>{Gom?o}pR&=g0XOacOvlz3seB&lDpd-*Y5U#D?vQXKZrqEBJfZJpT6-BNd?- zDS38p=jYFYXaDz~@-xQ+A$BH0@j(9XQJf9WpYSvGD;(1jp|}@(pND>3z~7(b=W;xc z=V#n+U|gTViheG!y^Qk((H4As!1IPi=W(J^7!AybaP)E}RV&40DA?|-nW>tPm&HavO!rNxOAWIO&h`9@7e_zCa>G zqRu7}F>a+#(5eng2uG>XxT?AQeQ$}$kPz5=v*9cDhTaX%W>_;(OLpK8Td5f{&a0g; zp>FJjIqUl6oVO9dp|4cF@+ZWGk`*Y9=2Uiq>xggxvGdCO@(^_BQ6AKXu-)N-8i#sR`bklN4wSNTPx>Js$Nhve9n})8y1ZjG4GB>B<>$QtE%x^wM#ZNRt%kY z@5-4AhL3Knsh&Ho(7TLl0cu1kw&~At+aekJ`@wSVOL-qrKcZhA%7{?lYyPWx672z#7RY~)LN8S9bl{!Eq}0S<*mE(`XhG3>2F(T3PtS9p8S zEA%lV5-n!8^X$`w*h`!9z1~#oF*c+L^x!k7SUY~ioZMiyfQoLOhk%TyGV4dyqO5zM zjj_ta;8*lD@Zx7E#wslKYlr=9?yr*vUx1pRCw^{VP>J>0;rqm@so>I#%46P@8F&31 z$R$Pf0(HI$)eF2DG2h{v{tjqs8etoTMH Date: Wed, 27 Aug 2025 14:03:05 -0400 Subject: [PATCH 13/27] Replace centos:8 with almalinux:8 since centos docker images are deprecated (#19154) * Replace centos:8 with almalinux:8 since centos docker images are deprecated Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Update Dockerfile Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + .../java/org/opensearch/gradle/DockerBase.java | 2 +- distribution/docker/build.gradle | 12 ++++++------ .../docker/docker-build-context/build.gradle | 2 +- distribution/docker/src/docker/Dockerfile | 16 ++++++---------- 5 files changed, 15 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 900771e5bbd99..89db0857f2c63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add CompletionStage variants to methods in the Client Interface and default to ActionListener impl ([#18998](https://github.com/opensearch-project/OpenSearch/pull/18998)) - IllegalArgumentException when scroll ID references a node not found in Cluster ([#19031](https://github.com/opensearch-project/OpenSearch/pull/19031)) - Adding ScriptedAvg class to painless spi to allowlist usage from plugins ([#19006](https://github.com/opensearch-project/OpenSearch/pull/19006)) +- Replace centos:8 with almalinux:8 since centos docker images are deprecated ([#19154](https://github.com/opensearch-project/OpenSearch/pull/19154)) ### Fixed - Fix unnecessary refreshes on update preparation failures ([#15261](https://github.com/opensearch-project/OpenSearch/issues/15261)) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/DockerBase.java b/buildSrc/src/main/java/org/opensearch/gradle/DockerBase.java index 5fd155400cec7..cde18b1138947 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/DockerBase.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/DockerBase.java @@ -36,7 +36,7 @@ * This class models the different Docker base images that are used to build Docker distributions of OpenSearch. */ public enum DockerBase { - CENTOS("centos:8"); + ALMALINUX("almalinux:8"); private final String image; diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index cc371a3275570..1cebe24de6049 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -132,7 +132,7 @@ project.ext { } void addCopyDockerContextTask(Architecture architecture, DockerBase base) { - if (base != DockerBase.CENTOS) { + if (base != DockerBase.ALMALINUX) { throw new GradleException("The only allowed docker base image for builds is CENTOS") } @@ -181,8 +181,8 @@ opensearch_distributions { tasks.named("preProcessFixture").configure { dependsOn opensearch_distributions.docker // always run the task, otherwise the folders won't be created - outputs.upToDateWhen { - false + outputs.upToDateWhen { + false } doLast { // tests expect to have an empty repo @@ -208,7 +208,7 @@ tasks.named("check").configure { } void addBuildDockerImage(Architecture architecture, DockerBase base) { - if (base != DockerBase.CENTOS) { + if (base != DockerBase.ALMALINUX) { throw new GradleException("The only allowed docker base image for builds is CENTOS") } @@ -234,7 +234,7 @@ void addBuildDockerImage(Architecture architecture, DockerBase base) { for (final Architecture architecture : Architecture.values()) { // We only create Docker images for the distribution on CentOS. for (final DockerBase base : DockerBase.values()) { - if (base == DockerBase.CENTOS) { + if (base == DockerBase.ALMALINUX) { addCopyDockerContextTask(architecture, base) addBuildDockerImage(architecture, base) } @@ -257,7 +257,7 @@ subprojects { Project subProject -> apply plugin: 'distribution' final Architecture architecture = subProject.name.contains('arm64-') ? Architecture.ARM64 : Architecture.X64 - final DockerBase base = DockerBase.CENTOS + final DockerBase base = DockerBase.ALMALINUX final String arch = architecture == Architecture.ARM64 ? '-arm64' : '' final String extension = 'docker.tar' diff --git a/distribution/docker/docker-build-context/build.gradle b/distribution/docker/docker-build-context/build.gradle index a5bea2935b3ea..3426df47780dc 100644 --- a/distribution/docker/docker-build-context/build.gradle +++ b/distribution/docker/docker-build-context/build.gradle @@ -19,7 +19,7 @@ tasks.register("buildDockerBuildContext", Tar) { archiveClassifier = "docker-build-context" archiveBaseName = "opensearch" // Non-local builds don't need to specify an architecture. - with dockerBuildContext(null, DockerBase.CENTOS, false) + with dockerBuildContext(null, DockerBase.ALMALINUX, false) } tasks.named("assemble").configure { dependsOn "buildDockerBuildContext" } diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index c980217b0b8dc..fc2b66aaf7d53 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -63,16 +63,12 @@ FROM ${base_image} ENV OPENSEARCH_CONTAINER true -RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* && \\ - sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.epel.cloud|g' /etc/yum.repos.d/CentOS-Linux-* && \\ - for iter in {1..10}; do \\ - ${package_manager} update --setopt=tsflags=nodocs -y && \\ - ${package_manager} install --setopt=tsflags=nodocs -y \\ - nc shadow-utils zip unzip && \\ - ${package_manager} clean all && exit_code=0 && break || exit_code=\$? && echo "${package_manager} error: retry \$iter in 10s" && \\ - sleep 10; \\ - done; \\ - (exit \$exit_code) +RUN set -e \\ + && dnf -y update \\ + && dnf -y install --setopt=tsflags=nodocs \\ + nmap-ncat shadow-utils zip unzip \\ + && dnf clean all \\ + && rm -rf /var/cache/dnf RUN groupadd -g 1000 opensearch && \\ adduser -u 1000 -g 1000 -G 0 -d /usr/share/opensearch opensearch && \\ From 132031642de1a6675537b97497814035cae70177 Mon Sep 17 00:00:00 2001 From: Varun Jain Date: Wed, 27 Aug 2025 12:35:06 -0700 Subject: [PATCH 14/27] Add query argument in QueryCollectorContextSpecFactory (#19153) * Add query in QueryCollectorContextSpecFactory Signed-off-by: vibrantvarun * Add javadoc Signed-off-by: vibrantvarun --------- Signed-off-by: vibrantvarun --- .../search/query/QueryCollectorContextSpecFactory.java | 3 +++ .../search/query/QueryCollectorContextSpecRegistry.java | 9 ++++++++- .../java/org/opensearch/search/query/QueryPhase.java | 6 ++++-- .../query/QueryCollectorContextSpecRegistryTests.java | 6 +++++- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecFactory.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecFactory.java index b08bebb840343..6bcad535e5274 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecFactory.java +++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecFactory.java @@ -8,6 +8,7 @@ package org.opensearch.search.query; +import org.apache.lucene.search.Query; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.search.internal.SearchContext; @@ -21,12 +22,14 @@ public interface QueryCollectorContextSpecFactory { /** * @param searchContext context needed to create collector context spec + * @param query required to create collector context spec * @param queryCollectorArguments arguments to create collector context spec * @return QueryCollectorContextSpec * @throws IOException */ Optional createQueryCollectorContextSpec( SearchContext searchContext, + Query query, QueryCollectorArguments queryCollectorArguments ) throws IOException; } diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecRegistry.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecRegistry.java index 413cd63b97856..384f8f031373b 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecRegistry.java +++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecRegistry.java @@ -8,6 +8,7 @@ package org.opensearch.search.query; +import org.apache.lucene.search.Query; import org.opensearch.search.internal.SearchContext; import java.io.IOException; @@ -43,18 +44,24 @@ public static void registerFactory(QueryCollectorContextSpecFactory factory) { /** * Get collector context spec * @param searchContext search context + * @param query required to create collectorContext spec * @param queryCollectorArguments query collector arguments * @return collector context spec * @throws IOException */ public static Optional getQueryCollectorContextSpec( final SearchContext searchContext, + final Query query, final QueryCollectorArguments queryCollectorArguments ) throws IOException { Iterator iterator = registry.iterator(); while (iterator.hasNext()) { QueryCollectorContextSpecFactory factory = iterator.next(); - Optional spec = factory.createQueryCollectorContextSpec(searchContext, queryCollectorArguments); + Optional spec = factory.createQueryCollectorContextSpec( + searchContext, + query, + queryCollectorArguments + ); if (spec.isEmpty() == false) { return spec; } diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhase.java b/server/src/main/java/org/opensearch/search/query/QueryPhase.java index ebf8ed0ce3362..f8427440a6c13 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhase.java @@ -446,14 +446,16 @@ protected boolean searchWithCollector( boolean hasFilterCollector, boolean hasTimeout ) throws IOException { - QueryCollectorContext queryCollectorContext = getQueryCollectorContext(searchContext, hasFilterCollector); + QueryCollectorContext queryCollectorContext = getQueryCollectorContext(searchContext, query, hasFilterCollector); return searchWithCollector(searchContext, searcher, query, collectors, queryCollectorContext, hasFilterCollector, hasTimeout); } - private QueryCollectorContext getQueryCollectorContext(SearchContext searchContext, boolean hasFilterCollector) throws IOException { + private QueryCollectorContext getQueryCollectorContext(SearchContext searchContext, Query query, boolean hasFilterCollector) + throws IOException { // create the top docs collector last when the other collectors are known final Optional queryCollectorContextOpt = QueryCollectorContextSpecRegistry.getQueryCollectorContextSpec( searchContext, + query, new QueryCollectorArguments.Builder().hasFilterCollector(hasFilterCollector).build() ).map(queryCollectorContextSpec -> new QueryCollectorContext(queryCollectorContextSpec.getContextName()) { @Override diff --git a/server/src/test/java/org/opensearch/search/query/QueryCollectorContextSpecRegistryTests.java b/server/src/test/java/org/opensearch/search/query/QueryCollectorContextSpecRegistryTests.java index 03fedc3534e82..e6bf421c5b1c9 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryCollectorContextSpecRegistryTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryCollectorContextSpecRegistryTests.java @@ -49,11 +49,14 @@ public void testGetQueryCollectorContextSpec_WithValidSpec() throws IOException QueryCollectorArguments mockArguments = new QueryCollectorArguments.Builder().build(); // Given QueryCollectorContextSpecRegistry.registerFactory(mockFactory1); - when(mockFactory1.createQueryCollectorContextSpec(mockSearchContext, mockArguments)).thenReturn(Optional.of(mockSpec)); + when(mockFactory1.createQueryCollectorContextSpec(mockSearchContext, mockSearchContext.query(), mockArguments)).thenReturn( + Optional.of(mockSpec) + ); // When Optional result = QueryCollectorContextSpecRegistry.getQueryCollectorContextSpec( mockSearchContext, + mockSearchContext.query(), mockArguments ); @@ -69,6 +72,7 @@ public void testGetQueryCollectorContextSpec_NoFactories() throws IOException { // When Optional result = QueryCollectorContextSpecRegistry.getQueryCollectorContextSpec( mockSearchContext, + mockSearchContext.query(), mockArguments ); From d2a3c27b3f13adc91585aaae1d97ddce0cc24fe5 Mon Sep 17 00:00:00 2001 From: Simon Marty Date: Wed, 27 Aug 2025 13:01:03 -0700 Subject: [PATCH 15/27] Update comments and errors messages for: Replace centos:8 with almalinux:8 (#19159) Signed-off-by: Simon Marty --- distribution/docker/build.gradle | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index 1cebe24de6049..ecc2d2c5c5766 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -133,7 +133,7 @@ project.ext { void addCopyDockerContextTask(Architecture architecture, DockerBase base) { if (base != DockerBase.ALMALINUX) { - throw new GradleException("The only allowed docker base image for builds is CENTOS") + throw new GradleException("The only allowed docker base image for builds is ALMALINUX") } tasks.register(taskName("copy", architecture, base, "DockerContext"), Sync) { @@ -209,7 +209,7 @@ tasks.named("check").configure { void addBuildDockerImage(Architecture architecture, DockerBase base) { if (base != DockerBase.ALMALINUX) { - throw new GradleException("The only allowed docker base image for builds is CENTOS") + throw new GradleException("The only allowed docker base image for builds is ALMALINUX") } final TaskProvider buildDockerImageTask = @@ -232,7 +232,7 @@ void addBuildDockerImage(Architecture architecture, DockerBase base) { } for (final Architecture architecture : Architecture.values()) { - // We only create Docker images for the distribution on CentOS. + // We only create Docker images for the distribution on AlmaLinux. for (final DockerBase base : DockerBase.values()) { if (base == DockerBase.ALMALINUX) { addCopyDockerContextTask(architecture, base) From 9a9ec8683c1092f69377e6079cbc4a4e503e88fc Mon Sep 17 00:00:00 2001 From: Rajat Gupta <72070007+RajatGupta02@users.noreply.github.com> Date: Thu, 28 Aug 2025 01:37:44 +0530 Subject: [PATCH 16/27] Add Channel Factory parameter to Translog (#18918) * Add overload for channelFactory Signed-off-by: Rajat Gupta * Fix tests Signed-off-by: Rajat Gupta * Add Changelog entry Signed-off-by: Rajat Gupta * Fix conflicts Signed-off-by: Rajat Gupta * When update operations fail during preparation (e.g., version conflicts), (#18917) TransportShardBulkAction still triggers refresh even though no actual writes occurred. This fix checks if locationToSync is null (indicating no writes) and prevents refresh in such cases. Fixes #15261 Signed-off-by: Atri Sharma * Remove all entries from changelog to be released in 3.2 (#18989) Signed-off-by: Andrew Ross * Add temporal routing processors for time-based document routing (#18966) Implements TemporalRoutingProcessor for ingest pipelines and TemporalRoutingSearchProcessor for search pipelines based on RFC #18920. Features: - Route documents to shards based on timestamp fields - Support hour, day, week, and month granularities - Optional hash bucketing for better distribution - Automatic search routing to relevant time ranges - ISO week format support The processors enable efficient time-based data organization for log and metrics workloads by co-locating documents from the same time period on the same shards. --------- Signed-off-by: Atri Sharma * Add CompletionStage variants to methods in the Client Interface and default to ActionListener impl (#18998) * Add CompletableFuture variables to methods in the Client Interface and default to ActionListener impl Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Fix typo in CHANGELOG Signed-off-by: Craig Perkins * Switch to CompletionStage Signed-off-by: Craig Perkins * Update CHANGELOG entry Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins * Expand fetch phase profiling to support inner hits and top hits aggregation phases (#18936) --------- Signed-off-by: Andre van de Ven Signed-off-by: Andre van de Ven <113951599+andrevandeven@users.noreply.github.com> Signed-off-by: Andre van de Ven Co-authored-by: Andre van de Ven * IllegalArgumentException when scroll ID has a node no longer part of the Cluster (#19031) --------- Signed-off-by: Anurag Rai Signed-off-by: Anurag Rai <91844619+anuragrai16@users.noreply.github.com> * Add Changelog entry Signed-off-by: Rajat Gupta * Add secondary constructor Signed-off-by: Rajat Gupta * Modify changelog Signed-off-by: Rajat Gupta * Update changelog Signed-off-by: Rajat Gupta * Add another constructor to fix breaking change check Signed-off-by: Rajat Gupta --------- Signed-off-by: Rajat Gupta Signed-off-by: Atri Sharma Signed-off-by: Andrew Ross Signed-off-by: Craig Perkins Signed-off-by: Andre van de Ven Signed-off-by: Andre van de Ven <113951599+andrevandeven@users.noreply.github.com> Signed-off-by: Andre van de Ven Signed-off-by: Anurag Rai Signed-off-by: Anurag Rai <91844619+anuragrai16@users.noreply.github.com> Co-authored-by: Rajat Gupta Co-authored-by: Atri Sharma Co-authored-by: Andrew Ross Co-authored-by: Craig Perkins Co-authored-by: Andre van de Ven <113951599+andrevandeven@users.noreply.github.com> Co-authored-by: Andre van de Ven Co-authored-by: Anurag Rai <91844619+anuragrai16@users.noreply.github.com> --- CHANGELOG.md | 1 + .../translog/InternalTranslogFactory.java | 6 +- .../index/translog/LocalTranslog.java | 30 ++++- ...emoteBlobStoreInternalTranslogFactory.java | 3 +- .../RemoteFsTimestampAwareTranslog.java | 3 +- .../index/translog/RemoteFsTranslog.java | 6 +- .../opensearch/index/translog/Translog.java | 36 ++++- .../translog/TruncateTranslogAction.java | 3 +- .../index/engine/InternalEngineTests.java | 3 +- .../index/translog/LocalTranslogTests.java | 124 +++++++++--------- .../RemoteFsTimestampAwareTranslogTests.java | 10 +- .../index/translog/RemoteFsTranslogTests.java | 33 ++--- .../translog/TranslogManagerTestCase.java | 3 +- .../index/engine/EngineTestCase.java | 3 +- 14 files changed, 160 insertions(+), 104 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 89db0857f2c63..9565af89cd65c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add StoreFactory plugin interface for custom Store implementations([#19091](https://github.com/opensearch-project/OpenSearch/pull/19091)) - Use S3CrtClient for higher throughput while uploading files to S3 ([#18800](https://github.com/opensearch-project/OpenSearch/pull/18800)) - Add a dynamic setting to change skip_cache_factor and min_frequency for querycache ([#18351](https://github.com/opensearch-project/OpenSearch/issues/18351)) +- Add overload constructor for Translog to accept Channel Factory as a parameter ([#18918](https://github.com/opensearch-project/OpenSearch/pull/18918)) ### Changed - Add CompletionStage variants to methods in the Client Interface and default to ActionListener impl ([#18998](https://github.com/opensearch-project/OpenSearch/pull/18998)) diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java index 6e2a7db0cfeb0..b17f217aa3bef 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java @@ -41,7 +41,8 @@ public Translog newTranslog( globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); } @@ -64,7 +65,8 @@ public Translog newTranslog( globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer, - translogOperationHelper + translogOperationHelper, + null ); } } diff --git a/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java b/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java index 6b45ccb867520..1c4b89be40fa1 100644 --- a/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java @@ -50,7 +50,8 @@ public LocalTranslog( final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier, final LongConsumer persistedSequenceNumberConsumer, - final TranslogOperationHelper translogOperationHelper + final TranslogOperationHelper translogOperationHelper, + final ChannelFactory channelFactory ) throws IOException { super( config, @@ -59,7 +60,8 @@ public LocalTranslog( globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer, - translogOperationHelper + translogOperationHelper, + channelFactory ); try { final Checkpoint checkpoint = readCheckpoint(location); @@ -113,6 +115,30 @@ public LocalTranslog( } } + /** + * Secondary constructor that does not accept ChannelFactory parameter. + */ + public LocalTranslog( + final TranslogConfig config, + final String translogUUID, + TranslogDeletionPolicy deletionPolicy, + final LongSupplier globalCheckpointSupplier, + final LongSupplier primaryTermSupplier, + final LongConsumer persistedSequenceNumberConsumer, + final TranslogOperationHelper translogOperationHelper + ) throws IOException { + this( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer, + translogOperationHelper, + null + ); + } + /** * Ensures that the given location has be synced / written to the underlying storage. * diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java index 63433a489cbab..1f2b2c48b471a 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java @@ -122,7 +122,8 @@ public Translog newTranslog( startedPrimarySupplier, remoteTranslogTransferTracker, remoteStoreSettings, - translogOperationHelper + translogOperationHelper, + null ); } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java index 920d26356bbb3..7fd915ba2c297 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java @@ -90,7 +90,8 @@ public RemoteFsTimestampAwareTranslog( startedPrimarySupplier, remoteTranslogTransferTracker, remoteStoreSettings, - translogOperationHelper + translogOperationHelper, + null ); logger = Loggers.getLogger(getClass(), shardId); this.metadataFilePinnedTimestampMap = new HashMap<>(); diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index cda5085d750d0..bbe8b739e2da4 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -108,7 +108,8 @@ public RemoteFsTranslog( BooleanSupplier startedPrimarySupplier, RemoteTranslogTransferTracker remoteTranslogTransferTracker, RemoteStoreSettings remoteStoreSettings, - TranslogOperationHelper translogOperationHelper + TranslogOperationHelper translogOperationHelper, + ChannelFactory channelFactory ) throws IOException { super( config, @@ -117,7 +118,8 @@ public RemoteFsTranslog( globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer, - translogOperationHelper + translogOperationHelper, + channelFactory ); logger = Loggers.getLogger(getClass(), shardId); this.startedPrimarySupplier = startedPrimarySupplier; diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 1bd0120586ed3..7f949f85a64ab 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -154,6 +154,7 @@ public abstract class Translog extends AbstractIndexShardComponent implements In protected final TranslogDeletionPolicy deletionPolicy; protected final LongConsumer persistedSequenceNumberConsumer; protected final TranslogOperationHelper translogOperationHelper; + protected final ChannelFactory channelFactory; /** * Creates a new Translog instance. This method will create a new transaction log unless the given {@link TranslogGeneration} is @@ -182,7 +183,8 @@ public Translog( final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier, final LongConsumer persistedSequenceNumberConsumer, - final TranslogOperationHelper translogOperationHelper + final TranslogOperationHelper translogOperationHelper, + final ChannelFactory channelFactory ) throws IOException { super(config.getShardId(), config.getIndexSettings()); this.config = config; @@ -198,6 +200,31 @@ public Translog( this.location = config.getTranslogPath(); Files.createDirectories(this.location); this.translogOperationHelper = translogOperationHelper; + this.channelFactory = channelFactory != null ? channelFactory : FileChannel::open; + } + + /** + * Constructor that does not accept channelFactory parameter but accepts translogOperationHelper + */ + public Translog( + final TranslogConfig config, + final String translogUUID, + TranslogDeletionPolicy deletionPolicy, + final LongSupplier globalCheckpointSupplier, + final LongSupplier primaryTermSupplier, + final LongConsumer persistedSequenceNumberConsumer, + final TranslogOperationHelper translogOperationHelper + ) throws IOException { + this( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer, + translogOperationHelper, + null + ); } /** @@ -218,7 +245,8 @@ public Translog( globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + FileChannel::open ); assert config.getIndexSettings().isDerivedSourceEnabled() == false; // For derived source supported index, it is incorrect to use // this constructor @@ -324,7 +352,7 @@ protected void copyCheckpointTo(Path targetPath) throws IOException { } TranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException { - FileChannel channel = FileChannel.open(path, StandardOpenOption.READ); + FileChannel channel = getChannelFactory().open(path, StandardOpenOption.READ); try { assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " + Translog.parseIdFromFileName(path) @@ -1931,7 +1959,7 @@ protected void ensureOpen() { } ChannelFactory getChannelFactory() { - return FileChannel::open; + return this.channelFactory; } /** diff --git a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java index 2e515cb72fd9f..eb592822c17a4 100644 --- a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java @@ -222,7 +222,8 @@ public long minTranslogGenRequired(List readers, TranslogWriter () -> translogGlobalCheckpoint, () -> primaryTerm, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); Translog.Snapshot snapshot = translog.newSnapshot(0, Long.MAX_VALUE) ) { diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index d004d5aa90eac..bb5a1eb568108 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -4182,7 +4182,8 @@ public void testRecoverFromForeignTranslog() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); translog.add(new Translog.Index("SomeBogusId", 0, primaryTerm.get(), "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); diff --git a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java index 4c9bc2ae622ba..d61b63e6ff53b 100644 --- a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java @@ -223,7 +223,8 @@ protected Translog createTranslog(TranslogConfig config) throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, getPersistedSeqNoConsumer(), - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); } @@ -235,7 +236,8 @@ protected Translog openTranslog(TranslogConfig config, String translogUUID) thro () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, getPersistedSeqNoConsumer(), - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); } @@ -272,7 +274,8 @@ private Translog create(Path path) throws IOException { () -> globalCheckpoint.get(), primaryTerm::get, getPersistedSeqNoConsumer(), - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); } @@ -1508,13 +1511,9 @@ public int write(ByteBuffer src) throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, persistedSeqNos::add, - TranslogOperationHelper.DEFAULT - ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - } + TranslogOperationHelper.DEFAULT, + channelFactory + ) ) { TranslogWriter writer = translog.getCurrent(); int initialWriteCalls = writeCalls.get(); @@ -1614,13 +1613,9 @@ public void force(boolean metaData) throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, persistedSeqNos::add, - TranslogOperationHelper.DEFAULT - ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - } + TranslogOperationHelper.DEFAULT, + channelFactory + ) ) { TranslogWriter writer = translog.getCurrent(); byte[] bytes = new byte[256]; @@ -1712,13 +1707,9 @@ public void force(boolean metaData) throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, persistedSeqNos::add, - TranslogOperationHelper.DEFAULT - ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - } + TranslogOperationHelper.DEFAULT, + channelFactory + ) ) { TranslogWriter writer = translog.getCurrent(); @@ -1819,7 +1810,8 @@ public void testBasicRecovery() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); assertEquals( "lastCommitted must be 1 less than current", @@ -1879,7 +1871,8 @@ public void testRecoveryUncommitted() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ) { assertNotNull(translogGeneration); @@ -1907,7 +1900,8 @@ public void testRecoveryUncommitted() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ) { assertNotNull(translogGeneration); @@ -1970,7 +1964,8 @@ public void testRecoveryUncommittedFileExists() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ) { assertNotNull(translogGeneration); @@ -1999,7 +1994,8 @@ public void testRecoveryUncommittedFileExists() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ) { assertNotNull(translogGeneration); @@ -2064,7 +2060,8 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ); assertThat( @@ -2091,7 +2088,8 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ) { assertNotNull(translogGeneration); @@ -2390,7 +2388,8 @@ public void testOpenForeignTranslog() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); fail("translog doesn't belong to this UUID"); } catch (TranslogCorruptedException ex) { @@ -2403,7 +2402,8 @@ public void testOpenForeignTranslog() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); try (Translog.Snapshot snapshot = this.translog.newSnapshot(randomLongBetween(0, firstUncommitted), Long.MAX_VALUE)) { for (int i = firstUncommitted; i < translogOperations; i++) { @@ -2634,7 +2634,8 @@ public void testFailFlush() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ) { assertEquals( @@ -2792,7 +2793,8 @@ protected void afterAdd() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); Translog.Snapshot snapshot = tlog.newSnapshot() ) { @@ -2856,7 +2858,8 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); assertThat(translog.getMinFileGeneration(), equalTo(1L)); // no trimming done yet, just recovered @@ -2926,7 +2929,8 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ) { // we don't know when things broke exactly @@ -3003,13 +3007,9 @@ private Translog getFailableTranslog( () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + channelFactory ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - @Override void deleteReaderFiles(TranslogReader reader) { if (fail.fail()) { @@ -3151,7 +3151,8 @@ public void testFailWhileCreateWriteWithRecoveredTLogs() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) { @Override protected TranslogWriter createWriter( @@ -3220,7 +3221,8 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ); assertEquals(ex.getMessage(), "failed to create new translog file"); @@ -3248,7 +3250,8 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ) { assertFalse(tlog.syncNeeded()); @@ -3271,7 +3274,8 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ); assertEquals(ex.getMessage(), "failed to create new translog file"); @@ -3402,7 +3406,8 @@ public void testWithRandomException() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); Translog.Snapshot snapshot = translog.newSnapshot(localCheckpointOfSafeCommit + 1, Long.MAX_VALUE) ) { @@ -3498,7 +3503,8 @@ public void testPendingDelete() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); translog.add(new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 2 })); translog.rollGeneration(); @@ -3513,7 +3519,8 @@ public void testPendingDelete() throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); } @@ -3881,7 +3888,8 @@ class MisbehavingTranslog extends LocalTranslog { globalCheckpointSupplier, primaryTermSupplier, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); } @@ -3989,7 +3997,8 @@ public void copy(Path source, Path target, CopyOption... options) throws IOExcep () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ) { recoveredTranslog.rollGeneration(); @@ -4024,7 +4033,8 @@ public void testSyncConcurrently() throws Exception { globalCheckpointSupplier, primaryTerm::get, persistedSeqNos::add, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ) ) { Thread[] threads = new Thread[between(2, 8)]; @@ -4106,13 +4116,9 @@ public void force(boolean metaData) throws IOException { () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + channelFactory ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - @Override void syncBeforeRollGeneration() { // make it a noop like the old versions diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java index 9682c0ba45a06..6c89cf2adf988 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java @@ -621,13 +621,9 @@ public void testExtraGenToKeep() throws Exception { () -> Boolean.TRUE, new RemoteTranslogTransferTracker(shardId, 10), DefaultRemoteStoreSettings.INSTANCE, - TranslogOperationHelper.DEFAULT - ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - } + TranslogOperationHelper.DEFAULT, + channelFactory + ) ) { addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 })); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 7da3bba9448a4..edcdca3f7b3de 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -201,7 +201,8 @@ protected RemoteFsTranslog createTranslogInstance( primaryMode::get, new RemoteTranslogTransferTracker(shardId, 10), DefaultRemoteStoreSettings.INSTANCE, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); } @@ -475,13 +476,9 @@ public void testExtraGenToKeep() throws Exception { () -> Boolean.TRUE, new RemoteTranslogTransferTracker(shardId, 10), DefaultRemoteStoreSettings.INSTANCE, - TranslogOperationHelper.DEFAULT - ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - } + TranslogOperationHelper.DEFAULT, + null + ) ) { addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); @@ -1525,13 +1522,9 @@ public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { () -> Boolean.TRUE, new RemoteTranslogTransferTracker(shardId, 10), DefaultRemoteStoreSettings.INSTANCE, - TranslogOperationHelper.DEFAULT - ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - } + TranslogOperationHelper.DEFAULT, + channelFactory + ) ) { TranslogWriter writer = translog.getCurrent(); int initialWriteCalls = writeCalls.get(); @@ -1636,13 +1629,9 @@ public void force(boolean metaData) throws IOException { () -> Boolean.TRUE, new RemoteTranslogTransferTracker(shardId, 10), DefaultRemoteStoreSettings.INSTANCE, - TranslogOperationHelper.DEFAULT - ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - } + TranslogOperationHelper.DEFAULT, + channelFactory + ) ) { TranslogWriter writer = translog.getCurrent(); byte[] bytes = new byte[256]; diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java index 22aa2e88e665f..10529bb155845 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java @@ -95,7 +95,8 @@ protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSup () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier, seqNo -> {}, - TranslogOperationHelper.DEFAULT + TranslogOperationHelper.DEFAULT, + null ); } diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 9e8c3239f5197..fe6e38e1b3e48 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -553,7 +553,8 @@ protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSup () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier, seqNo -> {}, - TranslogOperationHelper.create(engine.config()) + TranslogOperationHelper.create(engine.config()), + null ); } From bb2d6c2c1e51072cab7bef2196fa7d980be8f84c Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 28 Aug 2025 00:40:24 -0700 Subject: [PATCH 17/27] Replace usage of deprecated createIndex() method in tests (#18389) The deprecated versions of this method take a type parameter, support for which was removed back in 2.0. The parameter is not used. I have kept the deprecated methods so as to not break downstream components that may be using them but changed all the code in server to stop passing in a type parameter. Signed-off-by: Andrew Ross --- .../common/DisableGraphQueryTests.java | 3 +- .../opensearch/painless/NeedsScoreTests.java | 2 +- .../action/PainlessExecuteApiTests.java | 4 +-- .../PercolatorQuerySearchTests.java | 5 ++-- .../index/mapper/size/SizeMappingTests.java | 8 ++--- .../opensearch/index/shard/IndexShardIT.java | 5 ++-- .../aggregations/FiltersAggsRewriteIT.java | 2 +- .../termvectors/GetTermVectorsTests.java | 2 +- .../index/analysis/PreBuiltAnalyzerTests.java | 2 +- .../ConstantKeywordFieldMapperTests.java | 2 +- .../index/mapper/MapperServiceTests.java | 4 +-- .../index/mapper/NestedObjectMapperTests.java | 3 -- .../index/mapper/UpdateMappingTests.java | 8 ++--- .../index/search/NestedHelperTests.java | 2 +- .../search/nested/NestedSortingTests.java | 2 +- .../index/similarity/SimilarityTests.java | 16 +++++----- .../termvectors/TermVectorsServiceTests.java | 6 ++-- .../aggregations/AggregatorBaseTests.java | 2 +- .../support/ValuesSourceConfigTests.java | 30 +++++++++++-------- .../fetch/subphase/FieldFetcherTests.java | 12 ++++---- .../DerivedFieldFetchAndHighlightTests.java | 4 +-- .../search/geo/GeoShapeQueryTests.java | 6 ++-- .../CategoryContextMappingTests.java | 2 +- .../completion/GeoContextMappingTests.java | 18 ++++------- .../test/OpenSearchSingleNodeTestCase.java | 27 +++++++++-------- 25 files changed, 86 insertions(+), 91 deletions(-) diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java index 738c81c13cb6c..261c1bfafd2ba 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java @@ -93,10 +93,9 @@ public void setup() { .put("index.analysis.analyzer.text_shingle_unigram.tokenizer", "whitespace") .put("index.analysis.analyzer.text_shingle_unigram.filter", "lowercase, shingle_unigram") .build(); - indexService = createIndex( + indexService = createIndexWithSimpleMappings( "test", settings, - "t", "text_shingle", "type=text,analyzer=text_shingle", "text_shingle_unigram", diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/NeedsScoreTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/NeedsScoreTests.java index 9f87fbedb2a8f..f036968d96658 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/NeedsScoreTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/NeedsScoreTests.java @@ -52,7 +52,7 @@ public class NeedsScoreTests extends OpenSearchSingleNodeTestCase { public void testNeedsScores() { - IndexService index = createIndex("test", Settings.EMPTY, "type", "d", "type=double"); + IndexService index = createIndexWithSimpleMappings("test", Settings.EMPTY, "d", "type=double"); Map, List> contexts = new HashMap<>(); contexts.put(NumberSortScript.CONTEXT, Allowlist.BASE_ALLOWLISTS); diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java index d1ab998c314b0..ccc7fa1c99332 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java @@ -89,7 +89,7 @@ public void testDefaults() throws IOException { public void testFilterExecutionContext() throws IOException { ScriptService scriptService = getInstanceFromNode(ScriptService.class); - IndexService indexService = createIndex("index", Settings.EMPTY, "doc", "field", "type=long"); + IndexService indexService = createIndexWithSimpleMappings("index", Settings.EMPTY, "field", "type=long"); Request.ContextSetup contextSetup = new Request.ContextSetup("index", new BytesArray("{\"field\": 3}"), null); contextSetup.setXContentType(MediaTypeRegistry.JSON); @@ -120,7 +120,7 @@ public void testFilterExecutionContext() throws IOException { public void testScoreExecutionContext() throws IOException { ScriptService scriptService = getInstanceFromNode(ScriptService.class); - IndexService indexService = createIndex("index", Settings.EMPTY, "doc", "rank", "type=long", "text", "type=text"); + IndexService indexService = createIndexWithSimpleMappings("index", Settings.EMPTY, "rank", "type=long", "text", "type=text"); Request.ContextSetup contextSetup = new Request.ContextSetup( "index", diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java index 97e80c66e3f4e..5f4925a4ae577 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java @@ -281,7 +281,7 @@ public void testPercolateQueryWithNestedDocuments_doLeakFieldDataCacheEntries() public void testMapUnmappedFieldAsText() throws IOException { Settings.Builder settings = Settings.builder().put("index.percolator.map_unmapped_fields_as_text", true); - createIndex("test", settings.build(), "query", "query", "type=percolator"); + createIndexWithSimpleMappings("test", settings.build(), "query", "type=percolator"); client().prepareIndex("test") .setId("1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject()) @@ -302,10 +302,9 @@ public void testMapUnmappedFieldAsText() throws IOException { } public void testRangeQueriesWithNow() throws Exception { - IndexService indexService = createIndex( + IndexService indexService = createIndexWithSimpleMappings( "test", Settings.builder().put("index.number_of_shards", 1).build(), - "_doc", "field1", "type=keyword", "field2", diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java index e7e8d92cee65a..49aab68be416b 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java @@ -60,7 +60,7 @@ protected Collection> getPlugins() { } public void testSizeEnabled() throws Exception { - IndexService service = createIndex("test", Settings.EMPTY, "type", "_size", "enabled=true"); + IndexService service = createIndexWithSimpleMappings("test", Settings.EMPTY, "_size", "enabled=true"); DocumentMapper docMapper = service.mapperService().documentMapper(); BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()); @@ -77,7 +77,7 @@ public void testSizeEnabled() throws Exception { } public void testSizeDisabled() throws Exception { - IndexService service = createIndex("test", Settings.EMPTY, "type", "_size", "enabled=false"); + IndexService service = createIndexWithSimpleMappings("test", Settings.EMPTY, "_size", "enabled=false"); DocumentMapper docMapper = service.mapperService().documentMapper(); BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()); @@ -87,7 +87,7 @@ public void testSizeDisabled() throws Exception { } public void testSizeNotSet() throws Exception { - IndexService service = createIndex("test", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME); + IndexService service = createIndexWithSimpleMappings("test", Settings.EMPTY); DocumentMapper docMapper = service.mapperService().documentMapper(); BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()); @@ -97,7 +97,7 @@ public void testSizeNotSet() throws Exception { } public void testThatDisablingWorksWhenMerging() throws Exception { - IndexService service = createIndex("test", Settings.EMPTY, "type", "_size", "enabled=true"); + IndexService service = createIndexWithSimpleMappings("test", Settings.EMPTY, "_size", "enabled=true"); DocumentMapper docMapper = service.mapperService().documentMapper(); assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(true)); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 2ce50c8b5a768..8cd6fb7ed5aa6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -77,7 +77,6 @@ import org.opensearch.index.engine.MergedSegmentWarmerFactory; import org.opensearch.index.engine.NoOpEngine; import org.opensearch.index.flush.FlushStats; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.seqno.SequenceNumbers; @@ -477,7 +476,7 @@ public void testMaybeRollTranslogGeneration() throws Exception { .put("index.number_of_shards", 1) .put("index.translog.generation_threshold_size", generationThreshold + "b") .build(); - createIndex("test", settings, MapperService.SINGLE_MAPPING_NAME); + createIndexWithSimpleMappings("test", settings); ensureGreen("test"); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); final IndexService test = indicesService.indexService(resolveIndex("test")); @@ -813,7 +812,7 @@ public void testShardChangesWithDefaultDocType() throws Exception { .put("index.translog.flush_threshold_size", "512mb") // do not flush .put("index.soft_deletes.enabled", true) .build(); - IndexService indexService = createIndex("index", settings, "user_doc", "title", "type=keyword"); + IndexService indexService = createIndexWithSimpleMappings("index", settings, "title", "type=keyword"); int numOps = between(1, 10); for (int i = 0; i < numOps; i++) { if (randomBoolean()) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/FiltersAggsRewriteIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/FiltersAggsRewriteIT.java index b8d1d3cad77b4..c6ca4d36a86d7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/FiltersAggsRewriteIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/FiltersAggsRewriteIT.java @@ -51,7 +51,7 @@ public class FiltersAggsRewriteIT extends OpenSearchSingleNodeTestCase { public void testWrapperQueryIsRewritten() throws IOException { - createIndex("test", Settings.EMPTY, "test", "title", "type=text"); + createIndexWithSimpleMappings("test", Settings.EMPTY, "title", "type=text"); client().prepareIndex("test").setId("1").setSource("title", "foo bar baz").get(); client().prepareIndex("test").setId("2").setSource("title", "foo foo foo").get(); client().prepareIndex("test").setId("3").setSource("title", "bar baz bax").get(); diff --git a/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java b/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java index 7dd73966bb079..88ecd0f94e1a2 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java @@ -184,7 +184,7 @@ public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws IOExceptio .put("index.analysis.filter.my_delimited_payload.encoding", encodingString) .put("index.analysis.filter.my_delimited_payload.type", "mock_payload_filter") .build(); - createIndex("test", setting, "type1", mapping); + createIndex("test", setting, mapping); client().prepareIndex("test") .setId(Integer.toString(1)) diff --git a/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java index 6f9a662caff46..d8fb8603484b6 100644 --- a/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java @@ -127,7 +127,7 @@ public void testThatAnalyzersAreUsedInMapping() throws IOException { .endObject() .endObject() .endObject(); - MapperService mapperService = createIndex("test", indexSettings, "type", mapping).mapperService(); + MapperService mapperService = createIndex("test", indexSettings, mapping).mapperService(); MappedFieldType fieldType = mapperService.fieldType("field"); assertThat(fieldType.getTextSearchInfo().getSearchAnalyzer(), instanceOf(NamedAnalyzer.class)); diff --git a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java index ec670ec969bad..9fcadcfb36b69 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java @@ -156,7 +156,7 @@ public void testDerivedValueFetching() throws IOException { } private ConstantKeywordFieldMapper getMapper(FieldMapper.CopyTo copyTo) { - indexService = createIndex("test-index", Settings.EMPTY, "constant_keyword", "field", "type=constant_keyword,value=default_value"); + indexService = createIndexWithSimpleMappings("test-index", Settings.EMPTY, "field", "type=constant_keyword,value=default_value"); ConstantKeywordFieldMapper mapper = (ConstantKeywordFieldMapper) indexService.mapperService() .documentMapper() .mappers() diff --git a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java index bc0fa4e96d011..24f9f7c78372c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java @@ -235,10 +235,10 @@ public void testIndexSortWithNestedFieldsWithOlderVersion() throws IOException { Settings settings = settings(Version.V_3_0_0).put("index.sort.field", "foo").build(); IllegalArgumentException invalidNestedException = expectThrows( IllegalArgumentException.class, - () -> createIndex("test", settings, "t", "nested_field", "type=nested", "foo", "type=keyword") + () -> createIndexWithSimpleMappings("test", settings, "nested_field", "type=nested", "foo", "type=keyword") ); assertThat(invalidNestedException.getMessage(), containsString("cannot have nested fields when index sort is activated")); - IndexService indexService = createIndex("test", settings, "t", "foo", "type=keyword"); + IndexService indexService = createIndexWithSimpleMappings("test", settings, "foo", "type=keyword"); CompressedXContent nestedFieldMapping = new CompressedXContent( BytesReference.bytes( XContentFactory.jsonBuilder() diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java index 9a0d34c916f5c..cf668178b3df0 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java @@ -881,7 +881,6 @@ public void testParentObjectMapperAreNested() throws Exception { MapperService mapperService = createIndex( "index1", Settings.EMPTY, - "_doc", jsonBuilder().startObject() .startObject("properties") .startObject("comments") @@ -901,7 +900,6 @@ public void testParentObjectMapperAreNested() throws Exception { mapperService = createIndex( "index2", Settings.EMPTY, - "_doc", jsonBuilder().startObject() .startObject("properties") .startObject("comments") @@ -1107,7 +1105,6 @@ public void testMergeNestedMappings() throws IOException { MapperService mapperService = createIndex( "index1", Settings.EMPTY, - MapperService.SINGLE_MAPPING_NAME, jsonBuilder().startObject() .startObject("properties") .startObject("nested1") diff --git a/server/src/test/java/org/opensearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/opensearch/index/mapper/UpdateMappingTests.java index 7e40354eb7f29..b5d6922a12fcc 100644 --- a/server/src/test/java/org/opensearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/UpdateMappingTests.java @@ -83,7 +83,7 @@ public void testConflictFieldsMapping(String fieldName) throws Exception { } protected void testConflictWhileMergingAndMappingUnchanged(XContentBuilder mapping, XContentBuilder mappingUpdate) throws IOException { - IndexService indexService = createIndex("test", Settings.builder().build(), MapperService.SINGLE_MAPPING_NAME, mapping); + IndexService indexService = createIndex("test", Settings.builder().build(), mapping); CompressedXContent mappingBeforeUpdate = indexService.mapperService().documentMapper().mappingSource(); // simulate like in MetadataMappingService#putMapping try { @@ -111,8 +111,7 @@ public void testConflictSameType() throws Exception { .endObject() .endObject() .endObject(); - MapperService mapperService = createIndex("test", Settings.builder().build(), MapperService.SINGLE_MAPPING_NAME, mapping) - .mapperService(); + MapperService mapperService = createIndex("test", Settings.builder().build(), mapping).mapperService(); XContentBuilder update = XContentFactory.jsonBuilder() .startObject() @@ -158,8 +157,7 @@ public void testConflictNewType() throws Exception { .endObject() .endObject() .endObject(); - MapperService mapperService = createIndex("test", Settings.builder().build(), MapperService.SINGLE_MAPPING_NAME, mapping) - .mapperService(); + MapperService mapperService = createIndex("test", Settings.builder().build(), mapping).mapperService(); XContentBuilder update = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java index cc77a19755f5d..5d8469241831b 100644 --- a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java +++ b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java @@ -117,7 +117,7 @@ public void setUp() throws Exception { .endObject() .endObject() .endObject(); - indexService = createIndex("index", Settings.EMPTY, "type", mapping); + indexService = createIndex("index", Settings.EMPTY, mapping); mapperService = indexService.mapperService(); } diff --git a/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java index f50dcfde112f2..b66fcf55b5e4d 100644 --- a/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java @@ -466,7 +466,7 @@ public void testMultiLevelNestedSorting() throws IOException { mapping.endObject(); } mapping.endObject(); - IndexService indexService = createIndex("nested_sorting", Settings.EMPTY, "_doc", mapping); + IndexService indexService = createIndex("nested_sorting", Settings.EMPTY, mapping); List> books = new ArrayList<>(); { diff --git a/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java b/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java index 1fafa4739b8b4..ccaeeef190684 100644 --- a/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java +++ b/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java @@ -102,7 +102,7 @@ public void testResolveLegacySimilarity() throws IOException { .endObject() .endObject(); - MapperService mapperService = createIndex("foo", settings, "type", mapping).mapperService(); + MapperService mapperService = createIndex("foo", settings, mapping).mapperService(); assertThat(mapperService.fieldType("dummy").getTextSearchInfo().getSimilarity().get(), instanceOf(LegacyBM25Similarity.class)); } @@ -136,7 +136,7 @@ public void testResolveSimilaritiesFromMapping_bm25() throws IOException { .put("index.similarity.my_similarity.b", 0.5f) .put("index.similarity.my_similarity.discount_overlaps", false) .build(); - MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); + MapperService mapperService = createIndex("foo", indexSettings, mapping).mapperService(); assertThat(mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(), instanceOf(BM25Similarity.class)); BM25Similarity similarity = (BM25Similarity) mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(); @@ -156,7 +156,7 @@ public void testResolveSimilaritiesFromMapping_boolean() throws IOException { .endObject() .endObject(); - MapperService mapperService = createIndex("foo", Settings.EMPTY, "type", mapping).mapperService(); + MapperService mapperService = createIndex("foo", Settings.EMPTY, mapping).mapperService(); assertThat(mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(), instanceOf(BooleanSimilarity.class)); } @@ -178,7 +178,7 @@ public void testResolveSimilaritiesFromMapping_DFR() throws IOException { .put("index.similarity.my_similarity.normalization", "h2") .put("index.similarity.my_similarity.normalization.h2.c", 3f) .build(); - MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); + MapperService mapperService = createIndex("foo", indexSettings, mapping).mapperService(); assertThat(mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(), instanceOf(DFRSimilarity.class)); DFRSimilarity similarity = (DFRSimilarity) mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(); @@ -206,7 +206,7 @@ public void testResolveSimilaritiesFromMapping_IB() throws IOException { .put("index.similarity.my_similarity.normalization", "h2") .put("index.similarity.my_similarity.normalization.h2.c", 3f) .build(); - MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); + MapperService mapperService = createIndex("foo", indexSettings, mapping).mapperService(); assertThat(mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(), instanceOf(IBSimilarity.class)); IBSimilarity similarity = (IBSimilarity) mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(); @@ -231,7 +231,7 @@ public void testResolveSimilaritiesFromMapping_DFI() throws IOException { .put("index.similarity.my_similarity.type", "DFI") .put("index.similarity.my_similarity.independence_measure", "chisquared") .build(); - MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); + MapperService mapperService = createIndex("foo", indexSettings, mapping).mapperService(); MappedFieldType fieldType = mapperService.fieldType("field1"); assertThat(fieldType.getTextSearchInfo().getSimilarity().get(), instanceOf(DFISimilarity.class)); @@ -255,7 +255,7 @@ public void testResolveSimilaritiesFromMapping_LMDirichlet() throws IOException .put("index.similarity.my_similarity.mu", 3000f) .build(); - MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); + MapperService mapperService = createIndex("foo", indexSettings, mapping).mapperService(); assertThat(mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(), instanceOf(LMDirichletSimilarity.class)); LMDirichletSimilarity similarity = (LMDirichletSimilarity) mapperService.fieldType("field1") @@ -280,7 +280,7 @@ public void testResolveSimilaritiesFromMapping_LMJelinekMercer() throws IOExcept .put("index.similarity.my_similarity.type", "LMJelinekMercer") .put("index.similarity.my_similarity.lambda", 0.7f) .build(); - MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); + MapperService mapperService = createIndex("foo", indexSettings, mapping).mapperService(); assertThat( mapperService.fieldType("field1").getTextSearchInfo().getSimilarity().get(), instanceOf(LMJelinekMercerSimilarity.class) diff --git a/server/src/test/java/org/opensearch/index/termvectors/TermVectorsServiceTests.java b/server/src/test/java/org/opensearch/index/termvectors/TermVectorsServiceTests.java index 37b672dc064c4..f6f50d6cdf2a2 100644 --- a/server/src/test/java/org/opensearch/index/termvectors/TermVectorsServiceTests.java +++ b/server/src/test/java/org/opensearch/index/termvectors/TermVectorsServiceTests.java @@ -67,7 +67,7 @@ public void testTook() throws Exception { .endObject() .endObject() .endObject(); - createIndex("test", Settings.EMPTY, "type1", mapping); + createIndex("test", Settings.EMPTY, mapping); ensureGreen(); client().prepareIndex("test").setId("0").setSource("field", "foo bar").setRefreshPolicy(IMMEDIATE).get(); @@ -96,7 +96,7 @@ public void testDocFreqs() throws IOException { .endObject() .endObject(); Settings settings = Settings.builder().put("number_of_shards", 1).build(); - createIndex("test", settings, "_doc", mapping); + createIndex("test", settings, mapping); ensureGreen(); int max = between(3, 10); @@ -135,7 +135,7 @@ public void testWithIndexedPhrases() throws IOException { .endObject() .endObject(); Settings settings = Settings.builder().put("number_of_shards", 1).build(); - createIndex("test", settings, "_doc", mapping); + createIndex("test", settings, mapping); ensureGreen(); int max = between(3, 10); diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java index ce96623ea06df..34c1f5ab7f218 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java @@ -144,7 +144,7 @@ private ValuesSourceConfig getVSConfig( } public void testShortcutIsApplicable() throws IOException { - IndexService indexService = createIndex("index", Settings.EMPTY, "type", "bytes", "type=keyword"); + IndexService indexService = createIndexWithSimpleMappings("index", Settings.EMPTY, "bytes", "type=keyword"); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java index 568c3c950f588..bcd35d6418652 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java @@ -42,7 +42,6 @@ import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.fielddata.SortedBinaryDocValues; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -50,7 +49,7 @@ public class ValuesSourceConfigTests extends OpenSearchSingleNodeTestCase { public void testKeyword() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY, "type", "bytes", "type=keyword"); + IndexService indexService = createIndexWithSimpleMappings("index", Settings.EMPTY, "bytes", "type=keyword"); client().prepareIndex("index").setId("1").setSource("bytes", "abc").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -76,7 +75,7 @@ public void testKeyword() throws Exception { } public void testEmptyKeyword() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY, "type", "bytes", "type=keyword"); + IndexService indexService = createIndexWithSimpleMappings("index", Settings.EMPTY, "bytes", "type=keyword"); client().prepareIndex("index").setId("1").setSource().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -107,7 +106,7 @@ public void testEmptyKeyword() throws Exception { } public void testUnmappedKeyword() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY, "type"); + IndexService indexService = createIndex("index", Settings.EMPTY); client().prepareIndex("index").setId("1").setSource().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -137,7 +136,7 @@ public void testUnmappedKeyword() throws Exception { } public void testLong() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY, "type", "long", "type=long"); + IndexService indexService = createIndexWithSimpleMappings("index", Settings.EMPTY, "long", "type=long"); client().prepareIndex("index").setId("1").setSource("long", 42).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -163,7 +162,7 @@ public void testLong() throws Exception { } public void testEmptyLong() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY, "type", "long", "type=long"); + IndexService indexService = createIndexWithSimpleMappings("index", Settings.EMPTY, "long", "type=long"); client().prepareIndex("index").setId("1").setSource().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -194,7 +193,7 @@ public void testEmptyLong() throws Exception { } public void testUnmappedLong() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY, "type"); + IndexService indexService = createIndex("index", Settings.EMPTY); client().prepareIndex("index").setId("1").setSource().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -225,7 +224,7 @@ public void testUnmappedLong() throws Exception { } public void testBoolean() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY, "type", "bool", "type=boolean"); + IndexService indexService = createIndexWithSimpleMappings("index", Settings.EMPTY, "bool", "type=boolean"); client().prepareIndex("index").setId("1").setSource("bool", true).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -251,7 +250,7 @@ public void testBoolean() throws Exception { } public void testEmptyBoolean() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY, "type", "bool", "type=boolean"); + IndexService indexService = createIndexWithSimpleMappings("index", Settings.EMPTY, "bool", "type=boolean"); client().prepareIndex("index").setId("1").setSource().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -282,7 +281,7 @@ public void testEmptyBoolean() throws Exception { } public void testUnmappedBoolean() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY, "type"); + IndexService indexService = createIndex("index", Settings.EMPTY); client().prepareIndex("index").setId("1").setSource().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -313,7 +312,14 @@ public void testUnmappedBoolean() throws Exception { } public void testFieldAlias() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY, "type", "field", "type=keyword", "alias", "type=alias,path=field"); + IndexService indexService = createIndexWithSimpleMappings( + "index", + Settings.EMPTY, + "field", + "type=keyword", + "alias", + "type=alias,path=field" + ); client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -354,7 +360,7 @@ public void testDerivedField() throws Exception { .endObject() .endObject() .endObject(); - IndexService indexService = createIndex("index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + IndexService indexService = createIndex("index", Settings.EMPTY, mapping); client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java index 1c8a93f6483ae..a738b386f277e 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java @@ -277,7 +277,7 @@ public void testIgnoreAbove() throws IOException { .endObject() .endObject(); - IndexService indexService = createIndex("index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + IndexService indexService = createIndex("index", Settings.EMPTY, mapping); MapperService mapperService = indexService.mapperService(); XContentBuilder source = XContentFactory.jsonBuilder() @@ -307,7 +307,7 @@ public void testFieldAliases() throws IOException { .endObject() .endObject(); - IndexService indexService = createIndex("index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + IndexService indexService = createIndex("index", Settings.EMPTY, mapping); MapperService mapperService = indexService.mapperService(); XContentBuilder source = XContentFactory.jsonBuilder().startObject().field("field", "value").endObject(); @@ -341,7 +341,7 @@ public void testMultiFields() throws IOException { .endObject() .endObject(); - IndexService indexService = createIndex("index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + IndexService indexService = createIndex("index", Settings.EMPTY, mapping); MapperService mapperService = indexService.mapperService(); XContentBuilder source = XContentFactory.jsonBuilder().startObject().field("field", 42).endObject(); @@ -374,7 +374,7 @@ public void testCopyTo() throws IOException { .endObject() .endObject(); - IndexService indexService = createIndex("index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + IndexService indexService = createIndex("index", Settings.EMPTY, mapping); MapperService mapperService = indexService.mapperService(); XContentBuilder source = XContentFactory.jsonBuilder() @@ -420,7 +420,7 @@ public void testTextSubFields() throws IOException { .endObject() .endObject(); - IndexService indexService = createIndex("index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + IndexService indexService = createIndex("index", Settings.EMPTY, mapping); MapperService mapperService = indexService.mapperService(); XContentBuilder source = XContentFactory.jsonBuilder().startObject().array("field", "some text").endObject(); @@ -484,7 +484,7 @@ public MapperService createMapperService() throws IOException { .endObject() .endObject(); - IndexService indexService = createIndex("index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + IndexService indexService = createIndex("index", Settings.EMPTY, mapping); return indexService.mapperService(); } diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/DerivedFieldFetchAndHighlightTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/DerivedFieldFetchAndHighlightTests.java index f106aaa13dc48..409f123d6e108 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/DerivedFieldFetchAndHighlightTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/DerivedFieldFetchAndHighlightTests.java @@ -124,7 +124,7 @@ public void testDerivedFieldFromIndexMapping() throws IOException { .endObject(); int docId = 0; - IndexService indexService = createIndex("test_index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + IndexService indexService = createIndex("test_index", Settings.EMPTY, mapping); MapperService mapperService = indexService.mapperService(); try ( @@ -260,7 +260,7 @@ public void testDerivedFieldFromSearchMapping() throws IOException { // Create index and mapper service // We are not defining derived fields in index mapping here XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().endObject(); - IndexService indexService = createIndex("test_index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + IndexService indexService = createIndex("test_index", Settings.EMPTY, mapping); MapperService mapperService = indexService.mapperService(); try ( diff --git a/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java index 4f78d9166b414..3fb6fef08452f 100644 --- a/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java @@ -365,7 +365,7 @@ public void testEnvelopeSpanningDateline() throws Exception { public void testGeometryCollectionRelations() throws Exception { XContentBuilder mapping = createDefaultMapping(); - createIndex("test", Settings.builder().put("index.number_of_shards", 1).build(), "doc", mapping); + createIndex("test", Settings.builder().put("index.number_of_shards", 1).build(), mapping); EnvelopeBuilder envelopeBuilder = new EnvelopeBuilder(new Coordinate(-10, 10), new Coordinate(10, -10)); @@ -490,7 +490,7 @@ public void testEdgeCases() throws Exception { public void testIndexedShapeReferenceSourceDisabled() throws Exception { XContentBuilder mapping = createDefaultMapping(); client().admin().indices().prepareCreate("test").setMapping(mapping).get(); - createIndex("shapes", Settings.EMPTY, "shape_type", "_source", "enabled=false"); + createIndexWithSimpleMappings("shapes", Settings.EMPTY, "_source", "enabled=false"); ensureGreen(); EnvelopeBuilder shape = new EnvelopeBuilder(new Coordinate(-45, 45), new Coordinate(45, -45)); @@ -749,7 +749,7 @@ public void testFieldAlias() throws IOException { .endObject() .endObject(); - createIndex("test", Settings.EMPTY, "type", mapping); + createIndex("test", Settings.EMPTY, mapping); ShapeBuilder shape = RandomShapeGenerator.createShape(random(), RandomShapeGenerator.ShapeType.MULTIPOINT); client().prepareIndex("test") diff --git a/server/src/test/java/org/opensearch/search/suggest/completion/CategoryContextMappingTests.java b/server/src/test/java/org/opensearch/search/suggest/completion/CategoryContextMappingTests.java index 09a66a2cfcd91..57be372b81359 100644 --- a/server/src/test/java/org/opensearch/search/suggest/completion/CategoryContextMappingTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/completion/CategoryContextMappingTests.java @@ -787,7 +787,7 @@ public void testUnknownQueryContextParsing() throws Exception { .endObject() .endObject(); - MapperService mapperService = createIndex("test", Settings.EMPTY, "type1", mapping).mapperService(); + MapperService mapperService = createIndex("test", Settings.EMPTY, mapping).mapperService(); CompletionFieldType completionFieldType = (CompletionFieldType) mapperService.fieldType("completion"); Exception e = expectThrows(IllegalArgumentException.class, () -> completionFieldType.getContextMappings().get("brand")); diff --git a/server/src/test/java/org/opensearch/search/suggest/completion/GeoContextMappingTests.java b/server/src/test/java/org/opensearch/search/suggest/completion/GeoContextMappingTests.java index 07f3526dd2bb0..88898fb8cf963 100644 --- a/server/src/test/java/org/opensearch/search/suggest/completion/GeoContextMappingTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/completion/GeoContextMappingTests.java @@ -77,7 +77,7 @@ public void testIndexingWithNoContexts() throws Exception { .endObject() .endObject(); - MapperService mapperService = createIndex("test", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping).mapperService(); + MapperService mapperService = createIndex("test", Settings.EMPTY, mapping).mapperService(); MappedFieldType completionFieldType = mapperService.fieldType("completion"); ParsedDocument parsedDocument = mapperService.documentMapper() .parse( @@ -124,7 +124,7 @@ public void testIndexingWithSimpleContexts() throws Exception { .endObject() .endObject(); - MapperService mapperService = createIndex("test", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping).mapperService(); + MapperService mapperService = createIndex("test", Settings.EMPTY, mapping).mapperService(); MappedFieldType completionFieldType = mapperService.fieldType("completion"); ParsedDocument parsedDocument = mapperService.documentMapper() .parse( @@ -169,7 +169,7 @@ public void testIndexingWithContextList() throws Exception { .endObject() .endObject(); - MapperService mapperService = createIndex("test", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping).mapperService(); + MapperService mapperService = createIndex("test", Settings.EMPTY, mapping).mapperService(); MappedFieldType completionFieldType = mapperService.fieldType("completion"); ParsedDocument parsedDocument = mapperService.documentMapper() .parse( @@ -222,7 +222,7 @@ public void testIndexingWithMultipleContexts() throws Exception { .endObject() .endObject(); - MapperService mapperService = createIndex("test", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping).mapperService(); + MapperService mapperService = createIndex("test", Settings.EMPTY, mapping).mapperService(); MappedFieldType completionFieldType = mapperService.fieldType("completion"); XContentBuilder builder = jsonBuilder().startObject() .startArray("completion") @@ -268,10 +268,7 @@ public void testMalformedGeoField() throws Exception { mapping.endObject(); mapping.endObject(); - OpenSearchParseException ex = expectThrows( - OpenSearchParseException.class, - () -> createIndex("test", Settings.EMPTY, "type1", mapping) - ); + OpenSearchParseException ex = expectThrows(OpenSearchParseException.class, () -> createIndex("test", Settings.EMPTY, mapping)); assertThat(ex.getMessage(), equalTo("field [pin] referenced in context [st] must be mapped to geo_point, found [" + type + "]")); } @@ -298,10 +295,7 @@ public void testMissingGeoField() throws Exception { mapping.endObject(); mapping.endObject(); - OpenSearchParseException ex = expectThrows( - OpenSearchParseException.class, - () -> createIndex("test", Settings.EMPTY, "type1", mapping) - ); + OpenSearchParseException ex = expectThrows(OpenSearchParseException.class, () -> createIndex("test", Settings.EMPTY, mapping)); assertThat(ex.getMessage(), equalTo("field [pin] referenced in context [st] is not defined in the mapping")); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index 1d84eeca9c6c2..48a5184349658 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -339,17 +339,15 @@ protected IndexService createIndex(String index) { * Create a new index on the singleton node with the provided index settings. */ protected IndexService createIndex(String index, Settings settings) { - return createIndex(index, settings, null, (XContentBuilder) null); + return createIndex(index, settings, null); } /** - * Create a new index on the singleton node with the provided index settings. - * @deprecated types are being removed + * Create a new index on the singleton node with the provided index settings and mappings. */ - @Deprecated - protected IndexService createIndex(String index, Settings settings, String type, XContentBuilder mappings) { + protected IndexService createIndex(String index, Settings settings, XContentBuilder mappings) { CreateIndexRequestBuilder createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings); - if (type != null && mappings != null) { + if (mappings != null) { createIndexRequestBuilder.setMapping(mappings); } return createIndex(index, createIndexRequestBuilder); @@ -357,15 +355,20 @@ protected IndexService createIndex(String index, Settings settings, String type, /** * Create a new index on the singleton node with the provided index settings. - * @deprecated types are being removed + * @deprecated types have been removed + */ + @Deprecated + protected IndexService createIndex(String index, Settings settings, String type, XContentBuilder mappings) { + return createIndex(index, settings, mappings); + } + + /** + * Create a new index on the singleton node with the provided index settings. + * @deprecated types have been removed */ @Deprecated protected IndexService createIndex(String index, Settings settings, String type, String... mappings) { - CreateIndexRequestBuilder createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings); - if (mappings != null) { - createIndexRequestBuilder.setMapping(mappings); - } - return createIndex(index, createIndexRequestBuilder); + return createIndexWithSimpleMappings(index, settings, mappings); } /** From bc44848054ae06a4f367979be39489a4d0e4d788 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 28 Aug 2025 06:29:30 -0400 Subject: [PATCH 18/27] [AUTO] [main] Add bwc version 2.19.4. (#18837) * Add bwc version 2.19.4 Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> * Update libs/core/src/main/java/org/opensearch/Version.java Signed-off-by: Craig Perkins --------- Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Craig Perkins Co-authored-by: opensearch-ci-bot <83309141+opensearch-ci-bot@users.noreply.github.com> Co-authored-by: Craig Perkins --- .ci/bwcVersions | 1 + libs/core/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 05121e16e3896..3833f497536c3 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -46,6 +46,7 @@ BWC_VERSION: - "2.19.1" - "2.19.2" - "2.19.3" + - "2.19.4" - "3.0.0" - "3.1.0" - "3.2.0" diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index a5b682d653295..5bb93e53ec1ee 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -117,6 +117,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_19_1 = new Version(2190199, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_2_19_2 = new Version(2190299, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_2_19_3 = new Version(2190399, org.apache.lucene.util.Version.LUCENE_9_12_2); + public static final Version V_2_19_4 = new Version(2190499, org.apache.lucene.util.Version.LUCENE_9_12_2); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_10_1_0); public static final Version V_3_1_0 = new Version(3010099, org.apache.lucene.util.Version.LUCENE_10_2_1); public static final Version V_3_2_0 = new Version(3020099, org.apache.lucene.util.Version.LUCENE_10_2_2); From 201339f322267c4d13cb2b5700af21c8e92157d1 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Thu, 28 Aug 2025 12:03:47 -0400 Subject: [PATCH 19/27] Add CompletionStage variants to IndicesAdminClient as an alternative to ActionListener (#19161) * Add CompletionStage variants to IndicesAdminClient as an alternative to ActionListener Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + .../transport/client/IndicesAdminClient.java | 305 ++++++++++++++++++ .../snapshots/RestoreServiceIntegTests.java | 18 +- 3 files changed, 313 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9565af89cd65c..bf78b0bd54508 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - IllegalArgumentException when scroll ID references a node not found in Cluster ([#19031](https://github.com/opensearch-project/OpenSearch/pull/19031)) - Adding ScriptedAvg class to painless spi to allowlist usage from plugins ([#19006](https://github.com/opensearch-project/OpenSearch/pull/19006)) - Replace centos:8 with almalinux:8 since centos docker images are deprecated ([#19154](https://github.com/opensearch-project/OpenSearch/pull/19154)) +- Add CompletionStage variants to IndicesAdminClient as an alternative to ActionListener ([#19161](https://github.com/opensearch-project/OpenSearch/pull/19161)) ### Fixed - Fix unnecessary refreshes on update preparation failures ([#15261](https://github.com/opensearch-project/OpenSearch/issues/15261)) diff --git a/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java index 6b8d168ecbbda..46b270e7eec7c 100644 --- a/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/transport/client/IndicesAdminClient.java @@ -142,6 +142,9 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionListener; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; + /** * Administrative actions/operations against indices. * @@ -899,4 +902,306 @@ public interface IndicesAdminClient extends OpenSearchClient { * @return The request builder configured with the specified scaling direction */ ScaleIndexRequestBuilder prepareScaleSearchOnly(String index, boolean searchOnly); + + /** Indices Exists - CompletionStage version */ + default CompletionStage existsAsync(IndicesExistsRequest request) { + CompletableFuture future = new CompletableFuture<>(); + exists(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Indices stats - CompletionStage version */ + default CompletionStage statsAsync(IndicesStatsRequest request) { + CompletableFuture future = new CompletableFuture<>(); + stats(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Recoveries - CompletionStage version */ + default CompletionStage recoveriesAsync(RecoveryRequest request) { + CompletableFuture future = new CompletableFuture<>(); + recoveries(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Segment replication stats - CompletionStage version */ + default CompletionStage segmentReplicationStatsAsync(SegmentReplicationStatsRequest request) { + CompletableFuture future = new CompletableFuture<>(); + segmentReplicationStats(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Segments - CompletionStage version */ + default CompletionStage segmentsAsync(IndicesSegmentsRequest request) { + CompletableFuture future = new CompletableFuture<>(); + segments(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Shard stores - CompletionStage version */ + default CompletionStage shardStoresAsync(IndicesShardStoresRequest request) { + CompletableFuture future = new CompletableFuture<>(); + shardStores(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Create index - CompletionStage version */ + default CompletionStage createAsync(CreateIndexRequest request) { + CompletableFuture future = new CompletableFuture<>(); + create(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Delete index - CompletionStage version */ + default CompletionStage deleteAsync(DeleteIndexRequest request) { + CompletableFuture future = new CompletableFuture<>(); + delete(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Close index - CompletionStage version */ + default CompletionStage closeAsync(CloseIndexRequest request) { + CompletableFuture future = new CompletableFuture<>(); + close(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Open index - CompletionStage version */ + default CompletionStage openAsync(OpenIndexRequest request) { + CompletableFuture future = new CompletableFuture<>(); + open(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Add index block - CompletionStage version */ + default CompletionStage addBlockAsync(AddIndexBlockRequest request) { + CompletableFuture future = new CompletableFuture<>(); + addBlock(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Refresh - CompletionStage version */ + default CompletionStage refreshAsync(RefreshRequest request) { + CompletableFuture future = new CompletableFuture<>(); + refresh(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Flush - CompletionStage version */ + default CompletionStage flushAsync(FlushRequest request) { + CompletableFuture future = new CompletableFuture<>(); + flush(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Force-merge - CompletionStage version */ + default CompletionStage forceMergeAsync(ForceMergeRequest request) { + CompletableFuture future = new CompletableFuture<>(); + forceMerge(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Upgrade - CompletionStage version */ + default CompletionStage upgradeAsync(UpgradeRequest request) { + CompletableFuture future = new CompletableFuture<>(); + upgrade(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Upgrade status - CompletionStage version */ + default CompletionStage upgradeStatusAsync(UpgradeStatusRequest request) { + CompletableFuture future = new CompletableFuture<>(); + upgradeStatus(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Get mappings - CompletionStage version */ + default CompletionStage getMappingsAsync(GetMappingsRequest request) { + CompletableFuture future = new CompletableFuture<>(); + getMappings(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Get field mappings - CompletionStage version */ + default CompletionStage getFieldMappingsAsync(GetFieldMappingsRequest request) { + CompletableFuture future = new CompletableFuture<>(); + getFieldMappings(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Put mapping - CompletionStage version */ + default CompletionStage putMappingAsync(PutMappingRequest request) { + CompletableFuture future = new CompletableFuture<>(); + putMapping(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Aliases - CompletionStage version */ + default CompletionStage aliasesAsync(IndicesAliasesRequest request) { + CompletableFuture future = new CompletableFuture<>(); + aliases(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Get aliases - CompletionStage version */ + default CompletionStage getAliasesAsync(GetAliasesRequest request) { + CompletableFuture future = new CompletableFuture<>(); + getAliases(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Get index - CompletionStage version */ + default CompletionStage getIndexAsync(GetIndexRequest request) { + CompletableFuture future = new CompletableFuture<>(); + getIndex(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Clear cache - CompletionStage version */ + default CompletionStage clearCacheAsync(ClearIndicesCacheRequest request) { + CompletableFuture future = new CompletableFuture<>(); + clearCache(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Update settings - CompletionStage version */ + default CompletionStage updateSettingsAsync(UpdateSettingsRequest request) { + CompletableFuture future = new CompletableFuture<>(); + updateSettings(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Get settings - CompletionStage version */ + default CompletionStage getSettingsAsync(GetSettingsRequest request) { + CompletableFuture future = new CompletableFuture<>(); + getSettings(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Analyze - CompletionStage version */ + default CompletionStage analyzeAsync(AnalyzeAction.Request request) { + CompletableFuture future = new CompletableFuture<>(); + analyze(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Put template - CompletionStage version */ + default CompletionStage putTemplateAsync(PutIndexTemplateRequest request) { + CompletableFuture future = new CompletableFuture<>(); + putTemplate(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Delete template - CompletionStage version */ + default CompletionStage deleteTemplateAsync(DeleteIndexTemplateRequest request) { + CompletableFuture future = new CompletableFuture<>(); + deleteTemplate(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Get templates - CompletionStage version */ + default CompletionStage getTemplatesAsync(GetIndexTemplatesRequest request) { + CompletableFuture future = new CompletableFuture<>(); + getTemplates(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Validate query - CompletionStage version */ + default CompletionStage validateQueryAsync(ValidateQueryRequest request) { + CompletableFuture future = new CompletableFuture<>(); + validateQuery(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Resize index - CompletionStage version */ + default CompletionStage resizeIndexAsync(ResizeRequest request) { + CompletableFuture future = new CompletableFuture<>(); + resizeIndex(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Rollover index - CompletionStage version */ + default CompletionStage rolloverIndexAsync(RolloverRequest request) { + CompletableFuture future = new CompletableFuture<>(); + rolloverIndex(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Create data stream - CompletionStage version */ + default CompletionStage createDataStreamAsync(CreateDataStreamAction.Request request) { + CompletableFuture future = new CompletableFuture<>(); + createDataStream(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Delete data stream - CompletionStage version */ + default CompletionStage deleteDataStreamAsync(DeleteDataStreamAction.Request request) { + CompletableFuture future = new CompletableFuture<>(); + deleteDataStream(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Get data streams - CompletionStage version */ + default CompletionStage getDataStreamsAsync(GetDataStreamAction.Request request) { + CompletableFuture future = new CompletableFuture<>(); + getDataStreams(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Resolve index - CompletionStage version */ + default CompletionStage resolveIndexAsync(ResolveIndexAction.Request request) { + CompletableFuture future = new CompletableFuture<>(); + resolveIndex(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Create view - CompletionStage version */ + default CompletionStage createViewAsync(CreateViewAction.Request request) { + CompletableFuture future = new CompletableFuture<>(); + createView(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Get view - CompletionStage version */ + default CompletionStage getViewAsync(GetViewAction.Request request) { + CompletableFuture future = new CompletableFuture<>(); + getView(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Delete view - CompletionStage version */ + default CompletionStage deleteViewAsync(DeleteViewAction.Request request) { + CompletableFuture future = new CompletableFuture<>(); + deleteView(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Update view - CompletionStage version */ + default CompletionStage updateViewAsync(CreateViewAction.Request request) { + CompletableFuture future = new CompletableFuture<>(); + updateView(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Pause ingestion - CompletionStage version */ + default CompletionStage pauseIngestionAsync(PauseIngestionRequest request) { + CompletableFuture future = new CompletableFuture<>(); + pauseIngestion(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Resume ingestion - CompletionStage version */ + default CompletionStage resumeIngestionAsync(ResumeIngestionRequest request) { + CompletableFuture future = new CompletableFuture<>(); + resumeIngestion(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + + /** Get ingestion state - CompletionStage version */ + default CompletionStage getIngestionStateAsync(GetIngestionStateRequest request) { + CompletableFuture future = new CompletableFuture<>(); + getIngestionState(request, ActionListener.wrap(future::complete, future::completeExceptionally)); + return future; + } + } diff --git a/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java b/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java index f733154c643da..f0090c05634d7 100644 --- a/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java +++ b/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java @@ -22,7 +22,6 @@ import org.opensearch.action.admin.indices.close.CloseIndexResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest; -import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse; import org.opensearch.action.admin.indices.open.OpenIndexRequest; import org.opensearch.action.admin.indices.open.OpenIndexResponse; import org.opensearch.action.bulk.BulkRequest; @@ -118,13 +117,12 @@ public static Collection parameters() { public void cleanup() throws InterruptedException { final CountDownLatch allDeleted = new CountDownLatch(3); for (String indexName : new String[] { indexName, renamedIndexName }) { - final StepListener existsIndexResponseStepListener = new StepListener<>(); - client().admin().indices().exists(new IndicesExistsRequest(indexName), existsIndexResponseStepListener); - continueOrDie(existsIndexResponseStepListener, resp -> { + client().admin().indices().existsAsync(new IndicesExistsRequest(indexName)).thenAccept(resp -> { if (resp.isExists()) { - final StepListener deleteIndexResponseStepListener = new StepListener<>(); - client().admin().indices().delete(new DeleteIndexRequest(indexName), deleteIndexResponseStepListener); - continueOrDie(deleteIndexResponseStepListener, ignored -> allDeleted.countDown()); + client().admin() + .indices() + .deleteAsync(new DeleteIndexRequest(indexName)) + .thenAccept(ignored -> { allDeleted.countDown(); }); } else { allDeleted.countDown(); } @@ -218,11 +216,9 @@ public void testRestoreWithRename() throws Exception { final CountDownLatch isRestorable = new CountDownLatch(1); if (!this.exists && !this.renameIndexes) { - final StepListener deleteIndexResponseStepListener = new StepListener<>(); continueOrDie(createSnapshotResponseStepListener, ignored -> { - client().admin().indices().delete(new DeleteIndexRequest(indexName), deleteIndexResponseStepListener); + client().admin().indices().deleteAsync(new DeleteIndexRequest(indexName)).thenAccept(r -> isRestorable.countDown()); }); - continueOrDie(deleteIndexResponseStepListener, ignored -> isRestorable.countDown()); } else { continueOrDie(createSnapshotResponseStepListener, ignored -> isRestorable.countDown()); } @@ -243,7 +239,7 @@ public void testRestoreWithRename() throws Exception { restoreSnapshotResponseStepListener.whenComplete(ignored -> { isRestored.countDown(); - assertTrue("unexpected sucesssful restore", expectSuccess); + assertTrue("unexpected successful restore", expectSuccess); }, e -> { isRestored.countDown(); if (expectSuccess) { From 28a9e18fec905642bc82a9049f3e855d67003ca8 Mon Sep 17 00:00:00 2001 From: Riley Jerger <214163063+RileyJergerAmazon@users.noreply.github.com> Date: Thu, 28 Aug 2025 09:20:57 -0700 Subject: [PATCH 20/27] Update delete_backport_branch workflow to include release-chores branches (#18981) Signed-off-by: Riley Jerger --- .github/workflows/delete_backport_branch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/delete_backport_branch.yml b/.github/workflows/delete_backport_branch.yml index 22ce83c69a5d8..7923bac599888 100644 --- a/.github/workflows/delete_backport_branch.yml +++ b/.github/workflows/delete_backport_branch.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest permissions: contents: write - if: github.repository == 'opensearch-project/OpenSearch' && startsWith(github.event.pull_request.head.ref,'backport/') + if: github.repository == 'opensearch-project/OpenSearch' && (startsWith(github.event.pull_request.head.ref,'backport/') || startsWith(github.event.pull_request.head.ref,'release-chores/')) steps: - name: Delete merged branch uses: actions/github-script@v7 From 9d54c41aa717d26409dba387cfa6aa1eb7fa4bfd Mon Sep 17 00:00:00 2001 From: Atri Sharma Date: Thu, 28 Aug 2025 23:10:05 +0530 Subject: [PATCH 21/27] Fix deadlock in SearchPhaseControllerTests cancellation tests (#19171) The cancellation tests could deadlock when threads are delayed by OS scheduling. If cancellation triggers before all threads start, late threads may hit a code path where batchReduceSize causes the latch callback to be deferred to a MergeTask. Under certain timing conditions, these callbacks never execute, causing latch.await() to hang indefinitely. Ensure latch.countDown() is always called by wrapping consumeResult in try-catch. This guarantees test completion regardless of cancellation timing or exceptions. Signed-off-by: Atri Sharma --- .../action/search/SearchPhaseControllerTests.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java index 9115ccadf2998..a4eb8f7548be5 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java @@ -1826,7 +1826,13 @@ private static void consumeShardLevelQueryPhaseResultsAsync(int expectedNumResul result.setShardIndex(index); result.size(1); - consumer.consumeResult(result, latch::countDown); + try { + consumer.consumeResult(result, latch::countDown); + } catch (Exception e) { + // Ensure latch counts down even on cancellation + latch.countDown(); + // Don't rethrow - let the thread complete normally + } }); threads[index].start(); } From 9cf043b1540584d748cf266f8b204a81fe4ea374 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 28 Aug 2025 16:19:11 -0700 Subject: [PATCH 22/27] Remove cap on Java version used by forbidden APIs (#19163) There was a check in the forbidden APIs plugin to cap the Java version at 14, presumably from ages ago when the plugin did not support Java 15 or newer. That means we have not been enforcing any Java deprecations in the past 5 years. This removes that check, updates the plugin to 3.9, and fixes all the resulting deprecation failures. Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + buildSrc/build.gradle | 2 +- .../ForbiddenApisPrecommitPlugin.java | 18 ++++++++---- .../cli/plugin/InstallPluginCommand.java | 6 ++-- .../cli/plugin/InstallPluginCommandTests.java | 6 ++-- .../core/util/FileSystemUtilsTests.java | 7 +++-- .../ssl/DefaultJdkTrustConfigTests.java | 4 +-- .../common/ssl/PemKeyConfigTests.java | 4 +-- .../common/ssl/PemTrustConfigTests.java | 2 +- .../common/ssl/StoreKeyConfigTests.java | 4 +-- .../common/ssl/StoreTrustConfigTests.java | 2 +- .../ingest/common/DateProcessorTests.java | 4 ++- .../org/opensearch/painless/Compiler.java | 4 +-- .../lookup/PainlessLookupBuilder.java | 4 +-- .../blobstore/url/URLBlobContainer.java | 5 +++- .../common/blobstore/url/URLBlobStore.java | 15 +++++----- .../repositories/url/URLRepository.java | 7 +++-- .../RepositoryURLClientYamlTestSuiteIT.java | 3 +- .../example/resthandler/ExampleFixtureIT.java | 3 +- .../bootstrap/SystemCallFilterTests.java | 2 +- .../action/admin/ReloadSecureSettingsIT.java | 14 +-------- .../opensearch/common/util/LocaleUtils.java | 29 ++++++++----------- .../profile/fetch/FlatFetchProfileTree.java | 8 ++--- .../AbstractTermVectorsTestCase.java | 3 +- .../opensearch/bootstrap/SecurityTests.java | 13 +++++---- .../opensearch/common/cache/CacheTests.java | 2 +- .../org/opensearch/env/EnvironmentTests.java | 27 +++++++++-------- .../test/OpenSearchIntegTestCase.java | 8 ++--- 28 files changed, 104 insertions(+), 103 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf78b0bd54508..321d693579826 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Adding ScriptedAvg class to painless spi to allowlist usage from plugins ([#19006](https://github.com/opensearch-project/OpenSearch/pull/19006)) - Replace centos:8 with almalinux:8 since centos docker images are deprecated ([#19154](https://github.com/opensearch-project/OpenSearch/pull/19154)) - Add CompletionStage variants to IndicesAdminClient as an alternative to ActionListener ([#19161](https://github.com/opensearch-project/OpenSearch/pull/19161)) +- Remove cap on Java version used by forbidden APIs ([#19163](https://github.com/opensearch-project/OpenSearch/pull/19163)) ### Fixed - Fix unnecessary refreshes on update preparation failures ([#15261](https://github.com/opensearch-project/OpenSearch/issues/15261)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 1e3adf762fa48..8f97f90cf21bb 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -114,7 +114,7 @@ dependencies { api 'com.gradleup.shadow:shadow-gradle-plugin:8.3.5' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" - api 'de.thetaphi:forbiddenapis:3.8' + api 'de.thetaphi:forbiddenapis:3.9' api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.12' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.6' diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenApisPrecommitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenApisPrecommitPlugin.java index 6b89aa8b60197..c42b7ea975de5 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenApisPrecommitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenApisPrecommitPlugin.java @@ -40,7 +40,6 @@ import org.opensearch.gradle.ExportOpenSearchBuildResourcesTask; import org.opensearch.gradle.info.BuildParams; import org.opensearch.gradle.util.GradleUtils; -import org.gradle.api.JavaVersion; import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.plugins.ExtraPropertiesExtension; @@ -53,6 +52,7 @@ import java.util.Arrays; import java.util.HashSet; import java.util.List; +import java.util.Set; public class ForbiddenApisPrecommitPlugin extends PrecommitPlugin { @Override @@ -89,10 +89,6 @@ public TaskProvider createTask(Project project) { t.setClasspath(project.files(sourceSet.getRuntimeClasspath()).plus(sourceSet.getCompileClasspath())); t.setTargetCompatibility(BuildParams.getRuntimeJavaVersion().getMajorVersion()); - if (BuildParams.getRuntimeJavaVersion().compareTo(JavaVersion.VERSION_14) > 0) { - // TODO: forbidden apis does not yet support java 15, rethink using runtime version - t.setTargetCompatibility(JavaVersion.VERSION_14.getMajorVersion()); - } t.setBundledSignatures(new HashSet<>(Arrays.asList("jdk-unsafe", "jdk-deprecated", "jdk-non-portable", "jdk-system-out"))); t.setSignaturesFiles( project.files( @@ -140,6 +136,18 @@ public Void call(Object... names) { return null; } }); + // Use of the deprecated security manager APIs are pervasive so set them to warn + // globally for all projects. Replacements for (most of) these APIs are available + // so usages can move to the non-deprecated variants to avoid the warnings. + t.setSignaturesWithSeverityWarn( + Set.of( + "java.security.AccessController", + "java.security.AccessControlContext", + "java.lang.System#getSecurityManager()", + "java.lang.SecurityManager", + "java.security.Policy" + ) + ); }); TaskProvider forbiddenApis = project.getTasks().named("forbiddenApis"); forbiddenApis.configure(t -> t.setGroup("")); diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java index ea76e051d253e..c71728056b4c4 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java @@ -399,7 +399,7 @@ private String getMavenUrl(Terminal terminal, String[] coordinates, String platf @SuppressForbidden(reason = "Make HEAD request using URLConnection.connect()") boolean urlExists(Terminal terminal, String urlString) throws IOException { terminal.println(VERBOSE, "Checking if url exists: " + urlString); - URL url = new URL(urlString); + URL url = URI.create(urlString).toURL(); assert "https".equals(url.getProtocol()) : "Use of https protocol is required"; HttpURLConnection urlConnection = (HttpURLConnection) url.openConnection(); urlConnection.addRequestProperty("User-Agent", "opensearch-plugin-installer"); @@ -427,7 +427,7 @@ private List checkMisspelledPlugin(String pluginId) { @SuppressForbidden(reason = "We use getInputStream to download plugins") Path downloadZip(Terminal terminal, String urlString, Path tmpDir, boolean isBatch) throws IOException { terminal.println(VERBOSE, "Retrieving zip from " + urlString); - URL url = new URL(urlString); + URL url = URI.create(urlString).toURL(); Path zip = Files.createTempFile(tmpDir, null, ".zip"); URLConnection urlConnection = url.openConnection(); urlConnection.addRequestProperty("User-Agent", "opensearch-plugin-installer"); @@ -684,7 +684,7 @@ InputStream getPublicKey() { */ // pkg private for tests URL openUrl(String urlString) throws IOException { - URL checksumUrl = new URL(urlString); + URL checksumUrl = URI.create(urlString).toURL(); HttpURLConnection connection = (HttpURLConnection) checksumUrl.openConnection(); if (connection.getResponseCode() == 404) { return null; diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java index 70cccc94a26f9..57cf65a4a2c51 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java @@ -526,7 +526,7 @@ public void testSpaceInUrl() throws Exception { Path pluginDir = createPluginDir(temp); String pluginZip = createPluginUrl("fake", pluginDir); Path pluginZipWithSpaces = createTempFile("foo bar", ".zip"); - try (InputStream in = FileSystemUtils.openFileURLStream(new URL(pluginZip))) { + try (InputStream in = FileSystemUtils.openFileURLStream(URI.create(pluginZip).toURL())) { Files.copy(in, pluginZipWithSpaces, StandardCopyOption.REPLACE_EXISTING); } installPlugin(pluginZipWithSpaces.toUri().toURL().toString(), env.v1()); @@ -536,8 +536,8 @@ public void testSpaceInUrl() throws Exception { public void testMalformedUrlNotMaven() throws Exception { Tuple env = createEnv(fs, temp); // has two colons, so it appears similar to maven coordinates - MalformedURLException e = expectThrows(MalformedURLException.class, () -> installPlugin("://host:1234", env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains("no protocol")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> installPlugin("://host:1234", env.v1())); + assertThat(e.getMessage(), startsWith("Expected scheme name")); } public void testFileNotMaven() throws Exception { diff --git a/libs/core/src/test/java/org/opensearch/core/util/FileSystemUtilsTests.java b/libs/core/src/test/java/org/opensearch/core/util/FileSystemUtilsTests.java index 8b29378dfde12..08f5f120f879d 100644 --- a/libs/core/src/test/java/org/opensearch/core/util/FileSystemUtilsTests.java +++ b/libs/core/src/test/java/org/opensearch/core/util/FileSystemUtilsTests.java @@ -40,6 +40,7 @@ import java.io.IOException; import java.io.InputStream; +import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.nio.ByteBuffer; @@ -132,21 +133,21 @@ public void testIsHidden() { } public void testOpenFileURLStream() throws IOException { - URL urlWithWrongProtocol = new URL("http://www.google.com"); + URL urlWithWrongProtocol = URI.create("http://www.google.com").toURL(); try (InputStream is = FileSystemUtils.openFileURLStream(urlWithWrongProtocol)) { fail("Should throw IllegalArgumentException due to invalid protocol"); } catch (IllegalArgumentException e) { assertEquals("Invalid protocol [http], must be [file] or [jar]", e.getMessage()); } - URL urlWithHost = new URL("file", "localhost", txtFile.toString()); + URL urlWithHost = URI.create("file://localhost/" + txtFile.toString()).toURL(); try (InputStream is = FileSystemUtils.openFileURLStream(urlWithHost)) { fail("Should throw IllegalArgumentException due to host"); } catch (IllegalArgumentException e) { assertEquals("URL cannot have host. Found: [localhost]", e.getMessage()); } - URL urlWithPort = new URL("file", "", 80, txtFile.toString()); + URL urlWithPort = URI.create("file://:80/" + txtFile.toString()).toURL(); try (InputStream is = FileSystemUtils.openFileURLStream(urlWithPort)) { fail("Should throw IllegalArgumentException due to port"); } catch (IllegalArgumentException e) { diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/DefaultJdkTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/DefaultJdkTrustConfigTests.java index 82f4e94e31ae6..9a723fe491394 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/DefaultJdkTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/DefaultJdkTrustConfigTests.java @@ -77,12 +77,12 @@ private void assertStandardIssuers(X509ExtendedTrustManager trustManager) { private void assertHasTrustedIssuer(X509ExtendedTrustManager trustManager, String name) { final String lowerName = name.toLowerCase(Locale.ROOT); final Optional ca = Stream.of(trustManager.getAcceptedIssuers()) - .filter(cert -> cert.getSubjectDN().getName().toLowerCase(Locale.ROOT).contains(lowerName)) + .filter(cert -> cert.getSubjectX500Principal().getName().toLowerCase(Locale.ROOT).contains(lowerName)) .findAny(); if (ca.isPresent() == false) { logger.info("Failed to find issuer [{}] in trust manager, but did find ...", lowerName); for (X509Certificate cert : trustManager.getAcceptedIssuers()) { - logger.info(" - {}", cert.getSubjectDN().getName().replaceFirst("^\\w+=([^,]+),.*", "$1")); + logger.info(" - {}", cert.getSubjectX500Principal().getName().replaceFirst("^\\w+=([^,]+),.*", "$1")); } Assert.fail("Cannot find trusted issuer with name [" + name + "]."); } diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java index 70cb76ceaec51..51e69a758ad44 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java @@ -154,8 +154,8 @@ private void assertCertificateAndKey(PemKeyConfig keyConfig, String expectedDN) assertThat(chain, notNullValue()); assertThat(chain, arrayWithSize(1)); final X509Certificate certificate = chain[0]; - assertThat(certificate.getIssuerDN().getName(), is("CN=Test CA 1")); - assertThat(certificate.getSubjectDN().getName(), is(expectedDN)); + assertThat(certificate.getIssuerX500Principal().getName(), is("CN=Test CA 1")); + assertThat(certificate.getSubjectX500Principal().getName(), is(expectedDN)); assertThat(certificate.getSubjectAlternativeNames(), iterableWithSize(2)); assertThat( certificate.getSubjectAlternativeNames(), diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java index 773b4071313d9..05bf4dd194b00 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java @@ -146,7 +146,7 @@ private void assertCertificateChain(PemTrustConfig trustConfig, String... caName final X509ExtendedTrustManager trustManager = trustConfig.createTrustManager(); final X509Certificate[] issuers = trustManager.getAcceptedIssuers(); final Set issuerNames = Stream.of(issuers) - .map(X509Certificate::getSubjectDN) + .map(X509Certificate::getSubjectX500Principal) .map(Principal::getName) .collect(Collectors.toSet()); diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java index 1745c547d04ee..fdf98dc38bca5 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java @@ -183,8 +183,8 @@ private void assertKeysLoaded(StoreKeyConfig keyConfig, String... names) throws assertThat(chain, notNullValue()); assertThat(chain, arrayWithSize(1)); final X509Certificate certificate = chain[0]; - assertThat(certificate.getIssuerDN().getName(), is("CN=Test CA 1")); - assertThat(certificate.getSubjectDN().getName(), is("CN=" + name)); + assertThat(certificate.getIssuerX500Principal().getName(), is("CN=Test CA 1")); + assertThat(certificate.getSubjectX500Principal().getName(), is("CN=" + name)); assertThat(certificate.getSubjectAlternativeNames(), iterableWithSize(2)); assertThat( certificate.getSubjectAlternativeNames(), diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java index 8058ffe95dc93..656d8c468be60 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java @@ -140,7 +140,7 @@ private void assertCertificateChain(StoreTrustConfig trustConfig, String... caNa final X509ExtendedTrustManager trustManager = trustConfig.createTrustManager(); final X509Certificate[] issuers = trustManager.getAcceptedIssuers(); final Set issuerNames = Stream.of(issuers) - .map(X509Certificate::getSubjectDN) + .map(X509Certificate::getSubjectX500Principal) .map(Principal::getName) .collect(Collectors.toSet()); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateProcessorTests.java index 8a4f3b4a898b4..02ac2b866ce71 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateProcessorTests.java @@ -46,12 +46,14 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.IllformedLocaleException; import java.util.List; import java.util.Locale; import java.util.Map; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; public class DateProcessorTests extends OpenSearchTestCase { @@ -315,7 +317,7 @@ public void testInvalidLocale() { () -> processor.execute(RandomDocumentPicks.randomIngestDocument(random(), document)) ); assertThat(e.getMessage(), equalTo("unable to parse date [2010]")); - assertThat(e.getCause().getMessage(), equalTo("Unknown language: invalid")); + assertThat(e.getCause(), instanceOf(IllformedLocaleException.class)); } public void testOutputFormat() { diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java index c19d4f361b2b6..c55cb4707d464 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java @@ -50,7 +50,7 @@ import java.lang.reflect.Method; import java.net.MalformedURLException; -import java.net.URL; +import java.net.URI; import java.security.CodeSource; import java.security.SecureClassLoader; import java.security.cert.Certificate; @@ -77,7 +77,7 @@ final class Compiler { static { try { // Setup the code privileges. - CODESOURCE = new CodeSource(new URL("file:" + BootstrapInfo.UNTRUSTED_CODEBASE), (Certificate[]) null); + CODESOURCE = new CodeSource(URI.create("file:" + BootstrapInfo.UNTRUSTED_CODEBASE).toURL(), (Certificate[]) null); } catch (MalformedURLException impossible) { throw new RuntimeException(impossible); } diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java index e155a890c03d1..e2291754a26e4 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java @@ -57,7 +57,7 @@ import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.net.MalformedURLException; -import java.net.URL; +import java.net.URI; import java.security.AccessController; import java.security.CodeSource; import java.security.PrivilegedAction; @@ -120,7 +120,7 @@ Class defineBridge(String name, byte[] bytes) { static { try { - CODESOURCE = new CodeSource(new URL("file:" + BootstrapInfo.UNTRUSTED_CODEBASE), (Certificate[]) null); + CODESOURCE = new CodeSource(URI.create("file:" + BootstrapInfo.UNTRUSTED_CODEBASE).toURL(), (Certificate[]) null); } catch (MalformedURLException mue) { throw new RuntimeException(mue); } diff --git a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java index 02e858cb8d1f2..395f741c67133 100644 --- a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java +++ b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java @@ -43,6 +43,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.net.URISyntaxException; import java.net.URL; import java.nio.file.NoSuchFileException; import java.security.AccessController; @@ -136,9 +137,11 @@ public DeleteResult delete() { @Override public InputStream readBlob(String name) throws IOException { try { - return new BufferedInputStream(getInputStream(new URL(path, name)), blobStore.bufferSizeInBytes()); + return new BufferedInputStream(getInputStream(this.path.toURI().resolve(name).toURL()), blobStore.bufferSizeInBytes()); } catch (FileNotFoundException fnfe) { throw new NoSuchFileException("[" + name + "] blob not found"); + } catch (URISyntaxException e) { + throw new IOException(e); } } diff --git a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobStore.java b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobStore.java index 0fad0cbe21033..dda206ae540f5 100644 --- a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobStore.java +++ b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobStore.java @@ -41,6 +41,7 @@ import org.opensearch.core.common.unit.ByteSizeValue; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; /** @@ -97,7 +98,7 @@ public int bufferSizeInBytes() { public BlobContainer blobContainer(BlobPath path) { try { return new URLBlobContainer(this, path, buildPath(path)); - } catch (MalformedURLException ex) { + } catch (MalformedURLException | URISyntaxException ex) { throw new BlobStoreException("malformed URL " + path, ex); } } @@ -113,17 +114,15 @@ public void close() { * @param path relative path * @return Base URL + path */ - private URL buildPath(BlobPath path) throws MalformedURLException { + private URL buildPath(BlobPath path) throws MalformedURLException, URISyntaxException { String[] paths = path.toArray(); if (paths.length == 0) { return path(); } - URL blobPath = new URL(this.path, paths[0] + "/"); - if (paths.length > 1) { - for (int i = 1; i < paths.length; i++) { - blobPath = new URL(blobPath, paths[i] + "/"); - } + var uri = this.path.toURI(); + for (String pathElement : paths) { + uri = uri.resolve(pathElement + "/"); } - return blobPath; + return uri.toURL(); } } diff --git a/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java index 4c8d8aab4532b..0780002f175ab 100644 --- a/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java @@ -50,6 +50,7 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import java.net.MalformedURLException; +import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.util.Arrays; @@ -85,10 +86,10 @@ public class URLRepository extends BlobStoreRepository { Property.NodeScope ); - public static final Setting URL_SETTING = new Setting<>("url", "http:", URLRepository::parseURL, Property.NodeScope); + public static final Setting URL_SETTING = new Setting<>("url", "http://?", URLRepository::parseURL, Property.NodeScope); public static final Setting REPOSITORIES_URL_SETTING = new Setting<>( "repositories.url.url", - (s) -> s.get("repositories.uri.url", "http:"), + (s) -> s.get("repositories.uri.url", "http://?"), URLRepository::parseURL, Property.NodeScope ); @@ -194,7 +195,7 @@ public boolean isReadOnly() { private static URL parseURL(String s) { try { - return new URL(s); + return URI.create(s).toURL(); } catch (MalformedURLException e) { throw new IllegalArgumentException("Unable to parse URL repository setting", e); } diff --git a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java index 27cef3f7d7251..c18e84f46e471 100644 --- a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java +++ b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java @@ -55,7 +55,6 @@ import java.io.IOException; import java.net.InetAddress; import java.net.URI; -import java.net.URL; import java.util.List; import java.util.Map; @@ -120,7 +119,7 @@ public void registerRepositories() throws IOException { List allowedUrls = (List) XContentMapValues.extractValue("defaults.repositories.url.allowed_urls", clusterSettings); for (String allowedUrl : allowedUrls) { try { - InetAddress inetAddress = InetAddress.getByName(new URL(allowedUrl).getHost()); + InetAddress inetAddress = InetAddress.getByName(URI.create(allowedUrl).getHost()); if (inetAddress.isAnyLocalAddress() || inetAddress.isLoopbackAddress()) { Request createUrlRepositoryRequest = new Request("PUT", "/_snapshot/repository-url"); createUrlRepositoryRequest.setEntity(buildRepositorySettings("url", Settings.builder().put("url", allowedUrl).build())); diff --git a/plugins/examples/rest-handler/src/javaRestTest/java/org/opensearch/example/resthandler/ExampleFixtureIT.java b/plugins/examples/rest-handler/src/javaRestTest/java/org/opensearch/example/resthandler/ExampleFixtureIT.java index 0d50f9efbecd4..0ff9f78e34bef 100644 --- a/plugins/examples/rest-handler/src/javaRestTest/java/org/opensearch/example/resthandler/ExampleFixtureIT.java +++ b/plugins/examples/rest-handler/src/javaRestTest/java/org/opensearch/example/resthandler/ExampleFixtureIT.java @@ -40,6 +40,7 @@ import java.io.OutputStreamWriter; import java.net.InetAddress; import java.net.Socket; +import java.net.URI; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -53,7 +54,7 @@ public void testExample() throws Exception { final String externalAddress = System.getProperty("external.address"); assertNotNull("External address must not be null", externalAddress); - final URL url = new URL("http://" + externalAddress); + final URL url = URI.create("http://" + externalAddress).toURL(); final InetAddress address = InetAddress.getByName(url.getHost()); try ( Socket socket = new Socket(address, url.getPort()); diff --git a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java index 99c9ee7e96d01..8e77cbc979dfb 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java @@ -39,7 +39,7 @@ public class SystemCallFilterTests extends OpenSearchTestCase { /** command to try to run in tests */ - static final String EXECUTABLE = Constants.WINDOWS ? "calc" : "ls"; + static final String[] EXECUTABLE = new String[] { Constants.WINDOWS ? "calc" : "ls" }; @SuppressWarnings("removal") @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java index c81d491719e4b..e3ba967a28154 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java @@ -50,7 +50,6 @@ import java.io.InputStream; import java.nio.file.Files; import java.nio.file.StandardCopyOption; -import java.security.AccessControlException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -449,20 +448,9 @@ public void onFailure(Exception e) { } } - @SuppressWarnings("removal") private SecureSettings writeEmptyKeystore(Environment environment, char[] password) throws Exception { final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); - try { - keyStoreWrapper.save(environment.configDir(), password); - } catch (final AccessControlException e) { - if (e.getPermission() instanceof RuntimePermission && e.getPermission().getName().equals("accessUserInformation")) { - // this is expected: the save method is extra diligent and wants to make sure - // the keystore is readable, not relying on umask and whatnot. It's ok, we don't - // care about this in tests. - } else { - throw e; - } - } + keyStoreWrapper.save(environment.configDir(), password); return keyStoreWrapper; } diff --git a/server/src/main/java/org/opensearch/common/util/LocaleUtils.java b/server/src/main/java/org/opensearch/common/util/LocaleUtils.java index c684b1b2d781f..05c5cd89705b1 100644 --- a/server/src/main/java/org/opensearch/common/util/LocaleUtils.java +++ b/server/src/main/java/org/opensearch/common/util/LocaleUtils.java @@ -37,7 +37,7 @@ import java.util.MissingResourceException; /** - * Utilities for for dealing with {@link Locale} objects + * Utilities for dealing with {@link Locale} objects * * @opensearch.internal */ @@ -90,23 +90,18 @@ public static Locale parse(String localeStr) { } private static Locale parseParts(String[] parts) { - switch (parts.length) { - case 3: - // lang, country, variant - return new Locale(parts[0], parts[1], parts[2]); - case 2: - // lang, country - return new Locale(parts[0], parts[1]); - case 1: + return switch (parts.length) { + case 3 -> new Locale.Builder().setLanguage(parts[0]).setRegion(parts[1]).setVariant(parts[2]).build(); + case 2 -> new Locale.Builder().setLanguage(parts[0]).setRegion(parts[1]).build(); + case 1 -> { if ("ROOT".equalsIgnoreCase(parts[0])) { - return Locale.ROOT; + yield Locale.ROOT; } - // lang - return new Locale(parts[0]); - default: - throw new IllegalArgumentException( - "Locales can have at most 3 parts but got " + parts.length + ": " + Arrays.asList(parts) - ); - } + yield new Locale.Builder().setLanguage(parts[0]).build(); + } + default -> throw new IllegalArgumentException( + "Locales can have at most 3 parts but got " + parts.length + ": " + Arrays.asList(parts) + ); + }; } } diff --git a/server/src/main/java/org/opensearch/search/profile/fetch/FlatFetchProfileTree.java b/server/src/main/java/org/opensearch/search/profile/fetch/FlatFetchProfileTree.java index 9c9bef2a23e53..0b850c163379c 100644 --- a/server/src/main/java/org/opensearch/search/profile/fetch/FlatFetchProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/fetch/FlatFetchProfileTree.java @@ -65,7 +65,7 @@ private static class Node { /** Start profiling a new fetch phase and return its breakdown. */ FetchProfileBreakdown startFetchPhase(String element) { // Make phase name unique for concurrent slices by including thread info - String uniqueElement = element + "_" + Thread.currentThread().getId(); + String uniqueElement = element + "_" + Thread.currentThread().threadId(); Node node = rootsMap.get(uniqueElement); if (node == null) { @@ -81,8 +81,8 @@ FetchProfileBreakdown startFetchPhase(String element) { /** Start profiling a fetch sub-phase under the specified parent phase. */ FetchProfileBreakdown startSubPhase(String element, String parentElement) { // Make phase names unique for concurrent slices - String uniqueParentElement = parentElement + "_" + Thread.currentThread().getId(); - String uniqueElement = element + "_" + Thread.currentThread().getId(); + String uniqueParentElement = parentElement + "_" + Thread.currentThread().threadId(); + String uniqueElement = element + "_" + Thread.currentThread().threadId(); Node parent = phaseMap.get(uniqueParentElement); if (parent == null) { @@ -107,7 +107,7 @@ FetchProfileBreakdown startSubPhase(String element, String parentElement) { */ void endFetchPhase(String element) { // Make phase name unique for concurrent slices - String uniqueElement = element + "_" + Thread.currentThread().getId(); + String uniqueElement = element + "_" + Thread.currentThread().threadId(); Node node = phaseMap.get(uniqueElement); if (node == null) { diff --git a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java index 92f8e132b691b..4ad7cc324c618 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -223,9 +223,8 @@ public String toString() { if (requestPayloads) { requested += "payload,"; } - Locale aLocale = new Locale("en", "US"); return String.format( - aLocale, + Locale.US, "(doc: %s\n requested: %s, fields: %s)", doc, requested, diff --git a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java index 9bd5e46fe50a5..738fc80c34ce6 100644 --- a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java @@ -35,6 +35,7 @@ import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.net.URI; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; @@ -80,7 +81,7 @@ public void testEnsureRegularFile() throws IOException { public void testProcessExecution() throws Exception { assumeTrue("test requires security manager", System.getSecurityManager() != null); try { - Runtime.getRuntime().exec("ls"); + Runtime.getRuntime().exec(new String[] { "ls" }); fail("didn't get expected exception"); } catch (SecurityException expected) {} } @@ -89,15 +90,15 @@ public void testProcessExecution() throws Exception { public void testReadPolicyWithCodebases() throws IOException { final Map codebases = Map.of( "test-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar", - new URL("file://test-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar"), + URI.create("file://test-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar").toURL(), "test-kafka-server-common-3.6.1.jar", - new URL("file://test-kafka-server-common-3.6.1.jar"), + URI.create("file://test-kafka-server-common-3.6.1.jar").toURL(), "test-kafka-server-common-3.6.1-test.jar", - new URL("file://test-kafka-server-common-3.6.1-test.jar"), + URI.create("file://test-kafka-server-common-3.6.1-test.jar").toURL(), "test-lucene-core-9.11.0-snapshot-8a555eb.jar", - new URL("file://test-lucene-core-9.11.0-snapshot-8a555eb.jar"), + URI.create("file://test-lucene-core-9.11.0-snapshot-8a555eb.jar").toURL(), "test-zstd-jni-1.5.6-1.jar", - new URL("file://test-zstd-jni-1.5.6-1.jar") + URI.create("file://test-zstd-jni-1.5.6-1.jar").toURL() ); AccessController.doPrivileged( diff --git a/server/src/test/java/org/opensearch/common/cache/CacheTests.java b/server/src/test/java/org/opensearch/common/cache/CacheTests.java index f6277a7139c7e..65aa5931f144c 100644 --- a/server/src/test/java/org/opensearch/common/cache/CacheTests.java +++ b/server/src/test/java/org/opensearch/common/cache/CacheTests.java @@ -774,7 +774,7 @@ public int hashCode() { // start a watchdog service ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1); scheduler.scheduleAtFixedRate(() -> { - Set ids = threads.stream().map(t -> t.getId()).collect(Collectors.toSet()); + Set ids = threads.stream().map(Thread::threadId).collect(Collectors.toSet()); ThreadMXBean mxBean = ManagementFactory.getThreadMXBean(); long[] deadlockedThreads = mxBean.findDeadlockedThreads(); if (!deadlock.get() && deadlockedThreads != null) { diff --git a/server/src/test/java/org/opensearch/env/EnvironmentTests.java b/server/src/test/java/org/opensearch/env/EnvironmentTests.java index 0e343a6e43ba7..ebbd17fc636a2 100644 --- a/server/src/test/java/org/opensearch/env/EnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/EnvironmentTests.java @@ -38,7 +38,7 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.net.URL; +import java.net.URI; import java.nio.file.Path; import java.util.List; @@ -86,17 +86,20 @@ public void testRepositoryResolution() throws IOException { assertThat(environment.resolveRepoFile("/somethingeles/repos/repo1"), nullValue()); assertThat(environment.resolveRepoFile("/test/other/repo"), notNullValue()); - assertThat(environment.resolveRepoURL(new URL("file:///test/repos/repo1")), notNullValue()); - assertThat(environment.resolveRepoURL(new URL("file:/test/repos/repo1")), notNullValue()); - assertThat(environment.resolveRepoURL(new URL("file://test/repos/repo1")), nullValue()); - assertThat(environment.resolveRepoURL(new URL("file:///test/repos/../repo1")), nullValue()); - assertThat(environment.resolveRepoURL(new URL("http://localhost/test/")), nullValue()); - - assertThat(environment.resolveRepoURL(new URL("jar:file:///test/repos/repo1!/repo/")), notNullValue()); - assertThat(environment.resolveRepoURL(new URL("jar:file:/test/repos/repo1!/repo/")), notNullValue()); - assertThat(environment.resolveRepoURL(new URL("jar:file:///test/repos/repo1!/repo/")).toString(), endsWith("repo1!/repo/")); - assertThat(environment.resolveRepoURL(new URL("jar:file:///test/repos/../repo1!/repo/")), nullValue()); - assertThat(environment.resolveRepoURL(new URL("jar:http://localhost/test/../repo1?blah!/repo/")), nullValue()); + assertThat(environment.resolveRepoURL(URI.create("file:///test/repos/repo1").toURL()), notNullValue()); + assertThat(environment.resolveRepoURL(URI.create("file:/test/repos/repo1").toURL()), notNullValue()); + assertThat(environment.resolveRepoURL(URI.create("file://test/repos/repo1").toURL()), nullValue()); + assertThat(environment.resolveRepoURL(URI.create("file:///test/repos/../repo1").toURL()), nullValue()); + assertThat(environment.resolveRepoURL(URI.create("http://localhost/test/").toURL()), nullValue()); + + assertThat(environment.resolveRepoURL(URI.create("jar:file:///test/repos/repo1!/repo/").toURL()), notNullValue()); + assertThat(environment.resolveRepoURL(URI.create("jar:file:/test/repos/repo1!/repo/").toURL()), notNullValue()); + assertThat( + environment.resolveRepoURL(URI.create("jar:file:///test/repos/repo1!/repo/").toURL()).toString(), + endsWith("repo1!/repo/") + ); + assertThat(environment.resolveRepoURL(URI.create("jar:file:///test/repos/../repo1!/repo/").toURL()), nullValue()); + assertThat(environment.resolveRepoURL(URI.create("jar:http://localhost/test/../repo1?blah!/repo/").toURL()), nullValue()); } public void testPathDataWhenNotSet() { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 812109c5df6b8..6d537c95725b3 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -193,7 +193,7 @@ import java.lang.annotation.Target; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.URL; +import java.net.URI; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -2017,9 +2017,9 @@ private ExternalTestCluster buildExternalCluster(String clusterAddresses, String TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; for (String stringAddress : stringAddresses) { - URL url = new URL("http://" + stringAddress); - InetAddress inetAddress = InetAddress.getByName(url.getHost()); - transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, url.getPort())); + URI uri = URI.create("http://" + stringAddress); + InetAddress inetAddress = InetAddress.getByName(uri.getHost()); + transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, uri.getPort())); } return new ExternalTestCluster( createTempDir(), From 99053972a4a47457d0a481f4d87808fe01878af9 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Fri, 29 Aug 2025 10:16:26 +0800 Subject: [PATCH 23/27] Remove the setShardIndex parameter in CollapseTopFieldDocs.merge() (#19053) * Do not set shardIndex of top docs in CollapsingTopDocsCollectorContext Signed-off-by: Binlong Gao * Modify change log Signed-off-by: Binlong Gao * Format code Signed-off-by: Binlong Gao * Remove setShardIndex parameter in CollapseTopFieldDocs.merge() Signed-off-by: Binlong Gao * Modify change log Signed-off-by: Binlong Gao * tiny change Signed-off-by: Binlong Gao --------- Signed-off-by: Binlong Gao --- CHANGELOG.md | 1 + .../test/search/110_field_collapsing.yml | 101 ++++++++++++++++++ .../search/grouping/CollapseTopFieldDocs.java | 66 ++++++------ .../action/search/SearchPhaseController.java | 2 +- .../search/query/TopDocsCollectorContext.java | 3 +- .../CollapsingTopDocsCollectorTests.java | 41 ++++++- .../search/query/QueryPhaseTests.java | 9 ++ 7 files changed, 185 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 321d693579826..8cc2e517d844c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix flaky tests in CloseIndexIT by addressing cluster state synchronization issues ([#18878](https://github.com/opensearch-project/OpenSearch/issues/18878)) - [Tiered Caching] Handle query execution exception ([#19000](https://github.com/opensearch-project/OpenSearch/issues/19000)) - Grant access to testclusters dir for tests ([#19085](https://github.com/opensearch-project/OpenSearch/issues/19085)) +- Fix assertion error when collapsing search results with concurrent segment search enabled ([#19053](https://github.com/opensearch-project/OpenSearch/pull/19053)) - Fix skip_unavailable setting changing to default during node drop issue ([#18766](https://github.com/opensearch-project/OpenSearch/pull/18766)) ### Dependencies diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index 455b348e7433b..f49927cbae12d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -509,3 +509,104 @@ setup: - match: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._id: "4" } - gte: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._seq_no: 0 } - gte: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._primary_term: 1 } + +--- +"Test field collapsing with sort": + - skip: + version: " - 3.2.99" + reason: Fixed in 3.3.0 + - do: + indices.create: + index: test_1 + body: + mappings: + properties: + sort_field: { type: integer } + collapse_field: { type: integer } + marker: {type: keyword} + + - do: + index: + index: test_1 + refresh: true + id: 1 + body: { sort_field: 1, collapse_field: 1, marker: "doc1" } + - do: + index: + index: test_1 + refresh: true + id: 2 + body: { sort_field: 1, collapse_field: 2, marker: "doc2" } + - do: + index: + index: test_1 + refresh: true + id: 3 + body: { sort_field: 1, collapse_field: 2, marker: "doc3" } + + - do: + search: + index: test_1 + size: 2 + body: + collapse: { field: collapse_field } + sort: [{ sort_field: desc }] + - match: { hits.total.value: 3 } + - length: { hits.hits: 2 } + - match: { hits.hits.0._id: '1' } + - match: { hits.hits.0._source.marker: 'doc1' } + - match: { hits.hits.1._id: '2' } + - match: { hits.hits.1._source.marker: 'doc2' } + +--- +"Test field collapsing with sort when concurrent segment search enabled": + - skip: + version: " - 3.2.99" + reason: Fixed in 3.3.0 + - do: + indices.create: + index: test_1 + body: + mappings: + properties: + sort_field: { type: integer } + collapse_field: { type: integer } + marker: {type: keyword} + + - do: + index: + index: test_1 + refresh: true + id: 1 + body: { sort_field: 1, collapse_field: 1, marker: "doc1" } + - do: + index: + index: test_1 + refresh: true + id: 2 + body: { sort_field: 1, collapse_field: 2, marker: "doc2" } + - do: + index: + index: test_1 + refresh: true + id: 3 + body: { sort_field: 1, collapse_field: 2, marker: "doc3" } + - do: + indices.put_settings: + index: test_1 + body: + index.search.concurrent_segment_search.mode: 'all' + + - do: + search: + index: test_1 + size: 2 + body: + collapse: { field: collapse_field } + sort: [{ sort_field: desc }] + - match: { hits.total.value: 3 } + - length: { hits.hits: 2 } + - match: { hits.hits.0._id: '1' } + - match: { hits.hits.0._source.marker: 'doc1' } + - match: { hits.hits.1._id: '2' } + - match: { hits.hits.1._source.marker: 'doc2' } diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java index 4ab1eee4e089f..e453d8690d9c6 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java @@ -43,6 +43,7 @@ import org.opensearch.core.common.util.CollectionUtils; import java.util.ArrayList; +import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -55,6 +56,14 @@ public final class CollapseTopFieldDocs extends TopFieldDocs { public final String field; /** The collapse value for each top doc */ public final Object[] collapseValues; + /** Internal comparator with shardIndex */ + private static final Comparator SHARD_INDEX_TIE_BREAKER = Comparator.comparingInt(d -> d.shardIndex); + + /** Internal comparator with docID */ + private static final Comparator DOC_ID_TIE_BREAKER = Comparator.comparingInt(d -> d.doc); + + /** Default comparator */ + private static final Comparator DEFAULT_TIE_BREAKER = SHARD_INDEX_TIE_BREAKER.thenComparing(DOC_ID_TIE_BREAKER); public CollapseTopFieldDocs(String field, TotalHits totalHits, ScoreDoc[] scoreDocs, SortField[] sortFields, Object[] values) { super(totalHits, scoreDocs, sortFields); @@ -67,55 +76,35 @@ private static final class ShardRef { // Which shard (index into shardHits[]): final int shardIndex; - // True if we should use the incoming ScoreDoc.shardIndex for sort order - final boolean useScoreDocIndex; - // Which hit within the shard: int hitIndex; - ShardRef(int shardIndex, boolean useScoreDocIndex) { + ShardRef(int shardIndex) { this.shardIndex = shardIndex; - this.useScoreDocIndex = useScoreDocIndex; } @Override public String toString() { return "ShardRef(shardIndex=" + shardIndex + " hitIndex=" + hitIndex + ")"; } - - int getShardIndex(ScoreDoc scoreDoc) { - if (useScoreDocIndex) { - if (scoreDoc.shardIndex == -1) { - throw new IllegalArgumentException( - "setShardIndex is false but TopDocs[" + shardIndex + "].scoreDocs[" + hitIndex + "] is not set" - ); - } - return scoreDoc.shardIndex; - } else { - // NOTE: we don't assert that shardIndex is -1 here, because caller could in fact have set it but asked us to ignore it now - return shardIndex; - } - } } /** - * if we need to tie-break since score / sort value are the same we first compare shard index (lower shard wins) - * and then iff shard index is the same we use the hit index. + * Use the default tie breaker. If tie breaker returns 0 signifying equal values, we use hit + * indices to tie break intra shard ties */ static boolean tieBreakLessThan(ShardRef first, ScoreDoc firstDoc, ShardRef second, ScoreDoc secondDoc) { - final int firstShardIndex = first.getShardIndex(firstDoc); - final int secondShardIndex = second.getShardIndex(secondDoc); - // Tie break: earlier shard wins - if (firstShardIndex < secondShardIndex) { - return true; - } else if (firstShardIndex > secondShardIndex) { - return false; - } else { + int value = DEFAULT_TIE_BREAKER.compare(firstDoc, secondDoc); + + if (value == 0) { + // Equal Values // Tie break in same shard: resolve however the // shard had resolved it: assert first.hitIndex != second.hitIndex; return first.hitIndex < second.hitIndex; } + + return value < 0; } private static class MergeSortQueue extends PriorityQueue { @@ -173,8 +162,10 @@ public boolean lessThan(ShardRef first, ShardRef second) { /** * Returns a new CollapseTopDocs, containing topN collapsed results across * the provided CollapseTopDocs, sorting by score. Each {@link CollapseTopFieldDocs} instance must be sorted. + * docIDs are expected to be in consistent pattern i.e. either all ScoreDocs have their shardIndex set, + * or all have them as -1 (signifying that all hits belong to same shard) **/ - public static CollapseTopFieldDocs merge(Sort sort, int start, int size, CollapseTopFieldDocs[] shardHits, boolean setShardIndex) { + public static CollapseTopFieldDocs merge(Sort sort, int start, int size, CollapseTopFieldDocs[] shardHits) { String collapseField = shardHits[0].field; for (int i = 1; i < shardHits.length; i++) { if (collapseField.equals(shardHits[i].field) == false) { @@ -200,12 +191,13 @@ public static CollapseTopFieldDocs merge(Sort sort, int start, int size, Collaps } if (CollectionUtils.isEmpty(shard.scoreDocs) == false) { availHitCount += shard.scoreDocs.length; - queue.add(new ShardRef(shardIDX, setShardIndex == false)); + queue.add(new ShardRef(shardIDX)); } } final ScoreDoc[] hits; final Object[] values; + boolean unsetShardIndex = false; if (availHitCount <= start) { hits = new ScoreDoc[0]; values = new Object[0]; @@ -223,6 +215,15 @@ public static CollapseTopFieldDocs merge(Sort sort, int start, int size, Collaps ShardRef ref = queue.top(); final ScoreDoc hit = shardHits[ref.shardIndex].scoreDocs[ref.hitIndex]; final Object collapseValue = shardHits[ref.shardIndex].collapseValues[ref.hitIndex++]; + // Irrespective of whether we use shard indices for tie breaking or not, we check for + // consistent order in shard indices to defend against potential bugs + if (hitUpto > 0) { + if (unsetShardIndex != (hit.shardIndex == -1)) { + throw new IllegalArgumentException("Inconsistent order of shard indices"); + } + } + unsetShardIndex |= hit.shardIndex == -1; + if (seen.contains(collapseValue)) { if (ref.hitIndex < shardHits[ref.shardIndex].scoreDocs.length) { queue.updateTop(); @@ -232,9 +233,6 @@ public static CollapseTopFieldDocs merge(Sort sort, int start, int size, Collaps continue; } seen.add(collapseValue); - if (setShardIndex) { - hit.shardIndex = ref.shardIndex; - } if (hitUpto >= start) { hitList.add(hit); collapseList.add(collapseValue); diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java index 503252a814401..40a2805563369 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java @@ -233,7 +233,7 @@ static TopDocs mergeTopDocs(Collection results, int topN, int from) { } else if (topDocs instanceof CollapseTopFieldDocs) { final CollapseTopFieldDocs[] shardTopDocs = results.toArray(new CollapseTopFieldDocs[numShards]); final Sort sort = createSort(shardTopDocs); - mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs, false); + mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs); } else if (topDocs instanceof TopFieldDocs) { final TopFieldDocs[] shardTopDocs = results.toArray(new TopFieldDocs[numShards]); final Sort sort = createSort(shardTopDocs); diff --git a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java index 82a8d3507cb10..5b82b0df68ca6 100644 --- a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java @@ -331,8 +331,7 @@ protected ReduceableSearchResult reduceWith(final Collection> void assertSearchCollapse( subSearcher.search(weight, c); shardHits[shardIDX] = c.getTopDocs(); } - CollapseTopFieldDocs mergedFieldDocs = CollapseTopFieldDocs.merge(sort, 0, expectedNumGroups, shardHits, true); + CollapseTopFieldDocs mergedFieldDocs = CollapseTopFieldDocs.merge(sort, 0, expectedNumGroups, shardHits); assertTopDocsEquals(query, mergedFieldDocs, collapseTopFieldDocs); w.close(); reader.close(); @@ -455,4 +456,42 @@ public void testEmptySortedSegment() throws Exception { reader.close(); dir.close(); } + + public void testInconsistentShardIndicesException() { + Sort sort = Sort.RELEVANCE; + + // Create TopDocs with mixed shardIndex values - some set, some -1 + ScoreDoc[] shard1Docs = { + new FieldDoc(1, 9.0f, new Object[] { 9.0f }, 0), // shardIndex = 0 + new FieldDoc(2, 8.0f, new Object[] { 8.0f }, 0) // shardIndex = 0 + }; + + ScoreDoc[] shard2Docs = { + new FieldDoc(3, 7.0f, new Object[] { 7.0f }, -1), // shardIndex = -1 (inconsistent!) + new FieldDoc(4, 6.0f, new Object[] { 6.0f }, -1) // shardIndex = -1 + }; + + CollapseTopFieldDocs[] shardHits = { + new CollapseTopFieldDocs( + "field", + new TotalHits(2, TotalHits.Relation.EQUAL_TO), + shard1Docs, + sort.getSort(), + new Object[] { "val1", "val2" } + ), + new CollapseTopFieldDocs( + "field", + new TotalHits(2, TotalHits.Relation.EQUAL_TO), + shard2Docs, + sort.getSort(), + new Object[] { "val3", "val4" } + ) }; + + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { + CollapseTopFieldDocs.merge(sort, 0, 10, shardHits); + }); + + assertEquals("Inconsistent order of shard indices", exception.getMessage()); + } + } diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index ae32ebd0a6f7a..f00111b6160a2 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -1123,6 +1123,9 @@ public void testCollapseQuerySearchResults() throws Exception { assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); assertThat(context.queryResult().topDocs().topDocs.totalHits.value(), equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + for (ScoreDoc scoreDoc : context.queryResult().topDocs().topDocs.scoreDocs) { + assertEquals(-1, scoreDoc.shardIndex); + } CollapseTopFieldDocs topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs; assertThat(topDocs.collapseValues.length, equalTo(2)); @@ -1135,6 +1138,9 @@ public void testCollapseQuerySearchResults() throws Exception { assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); assertThat(context.queryResult().topDocs().topDocs.totalHits.value(), equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + for (ScoreDoc scoreDoc : context.queryResult().topDocs().topDocs.scoreDocs) { + assertEquals(-1, scoreDoc.shardIndex); + } topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs; assertThat(topDocs.collapseValues.length, equalTo(2)); @@ -1147,6 +1153,9 @@ public void testCollapseQuerySearchResults() throws Exception { assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); assertThat(context.queryResult().topDocs().topDocs.totalHits.value(), equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + for (ScoreDoc scoreDoc : context.queryResult().topDocs().topDocs.scoreDocs) { + assertEquals(-1, scoreDoc.shardIndex); + } topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs; assertThat(topDocs.collapseValues.length, equalTo(2)); From d994a3266093817a1ef2fd6ee077efbffa1913d9 Mon Sep 17 00:00:00 2001 From: Atri Sharma Date: Fri, 29 Aug 2025 21:23:39 +0530 Subject: [PATCH 24/27] Fix LocalTranslogTests handling of corrupted translog recovery (#19177) Partial writes can corrupt translog files during simulated failures. When recovering these files, TranslogCorruptedException is thrown but wasn't caught since it extends OpenSearchException, not TranslogException. Add TranslogCorruptedException to catch blocks alongside other expected exceptions during translog recovery attempts. Signed-off-by: Atri Sharma --- .../org/opensearch/index/translog/LocalTranslogTests.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java index d61b63e6ff53b..ec015a3049d38 100644 --- a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java @@ -3368,8 +3368,8 @@ public void testWithRandomException() throws IOException { localCheckpointOfSafeCommit = failableTLog.getDeletionPolicy().getLocalCheckpointOfSafeCommit(); IOUtils.closeWhileHandlingException(failableTLog); } - } catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) { - // failed - that's ok, we didn't even create it + } catch (TranslogException | MockDirectoryWrapper.FakeIOException | TranslogCorruptedException ex) { + // failed - that's ok, we didn't even create it or it was corrupted from partial writes } catch (IOException ex) { assertEquals(ex.getMessage(), "__FAKE__ no space left on device"); } @@ -3379,8 +3379,8 @@ public void testWithRandomException() throws IOException { TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(); deletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpointOfSafeCommit); IOUtils.close(getFailableTranslog(fail, config, randomBoolean(), false, generationUUID, deletionPolicy)); - } catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) { - // failed - that's ok, we didn't even create it + } catch (TranslogException | MockDirectoryWrapper.FakeIOException | TranslogCorruptedException ex) { + // failed - that's ok, we didn't even create it or it was corrupted from partial writes } catch (IOException ex) { assertEquals(ex.getMessage(), "__FAKE__ no space left on device"); } From d579a48dfcf7dd401e6b43a0e34ef8d17ec8a768 Mon Sep 17 00:00:00 2001 From: Ankit Jain Date: Fri, 29 Aug 2025 11:38:23 -0700 Subject: [PATCH 25/27] Handling concurrent segment search as part of fetch profiling (#19164) Signed-off-by: Ankit Jain --- .../search/profile/fetch/FlatFetchProfileTree.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/profile/fetch/FlatFetchProfileTree.java b/server/src/main/java/org/opensearch/search/profile/fetch/FlatFetchProfileTree.java index 0b850c163379c..e9adfcf9c0760 100644 --- a/server/src/main/java/org/opensearch/search/profile/fetch/FlatFetchProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/fetch/FlatFetchProfileTree.java @@ -12,11 +12,12 @@ import org.opensearch.search.profile.Timer; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; /** * Simplified profiling tree for fetch phase operations. Each fetch phase is @@ -59,8 +60,8 @@ private static class Node { } private final List roots = new ArrayList<>(); - private final Map rootsMap = new HashMap<>(); - private final Map phaseMap = new HashMap<>(); + private final ConcurrentMap rootsMap = new ConcurrentHashMap<>(); + private final ConcurrentMap phaseMap = new ConcurrentHashMap<>(); /** Start profiling a new fetch phase and return its breakdown. */ FetchProfileBreakdown startFetchPhase(String element) { From 47a28a0905ab6c1821436ee8400b4a5c86119f37 Mon Sep 17 00:00:00 2001 From: Pranit Kumar Date: Tue, 2 Sep 2025 13:53:18 +0530 Subject: [PATCH 26/27] Add index metadata for SSE Signed-off-by: Pranit Kumar --- .../metadata/MetadataCreateIndexService.java | 29 +++++++--- .../CompositeRemoteRepository.java | 4 ++ .../remotestore/RemoteStoreNodeAttribute.java | 54 ++++--------------- .../opensearch/snapshots/RestoreService.java | 6 +-- 4 files changed, 39 insertions(+), 54 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 42c501b1c0d7f..effd0ba5643b0 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1149,6 +1149,23 @@ public static void updateReplicationStrategy( settingsBuilder.put(SETTING_REPLICATION_TYPE, indexReplicationType); } + public static void updateRemoteStoreSettings( + Settings.Builder settingsBuilder, + ClusterState clusterState, + ClusterSettings clusterSettings, + Settings nodeSettings, + String indexName, + IndexMetadata indexMetadata + ) { + System.out.println("settingsBuilder = " + settingsBuilder.toString()); + boolean sseEnabledIndex = IndexMetadata.INDEX_REMOTE_STORE_SSE_ENABLED_SETTING.get(indexMetadata.getSettings()); + System.out.println("[RESTORING FROM SNAPSHOT] sseEnabledIndex = " + sseEnabledIndex); + if (sseEnabledIndex) { + settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, true); + } + updateRemoteStoreSettings(settingsBuilder, clusterState, clusterSettings, nodeSettings, indexName, true); + } + /** * Updates index settings to enable remote store by default based on node attributes * @param settingsBuilder index settings builder to be updated with relevant settings @@ -1169,12 +1186,6 @@ public static void updateRemoteStoreSettings( && clusterSettings.get(REMOTE_STORE_COMPATIBILITY_MODE_SETTING).equals(RemoteStoreNodeService.CompatibilityMode.STRICT)) || isMigratingToRemoteStore(clusterSettings)) { - if (!isRestoreFromSnapshot) { - if (indexName.startsWith("sse-rp")) { - settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, true); - } - } - String segmentRepo, translogRepo; Optional remoteNode = clusterState.nodes() @@ -1184,6 +1195,11 @@ public static void updateRemoteStoreSettings( .filter(DiscoveryNode::isRemoteStoreNode) .findFirst(); + if (!isRestoreFromSnapshot && RemoteStoreNodeAttribute.isRemoteStoreServerSideEncryptionEnabled() && indexName.startsWith("sse-rp")) { + System.out.println("MetadataCreateIndexService.updateRemoteStoreSettings"); + settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, true); + } + if (remoteNode.isPresent()) { Map indexSettings = settingsBuilder.keys().stream() .collect(Collectors.toMap(key -> key, settingsBuilder::get)); @@ -1193,7 +1209,6 @@ public static void updateRemoteStoreSettings( translogRepo = RemoteStoreNodeAttribute.getTranslogRepoName(remoteNode.get().getAttributes(), currentIndexSettings); segmentRepo = RemoteStoreNodeAttribute.getSegmentRepoName(remoteNode.get().getAttributes(), currentIndexSettings); - System.out.println("MetadataCreateIndexService.updateRemoteStoreSettings trepo " + translogRepo + ", srepo " + segmentRepo); if (segmentRepo != null && translogRepo != null) { diff --git a/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java b/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java index 467b4a5e2e97d..37f7a1b4fdf57 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java +++ b/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java @@ -48,6 +48,10 @@ public String toString() { '}'; } + public boolean isServerSideEncryptionEnabled() { + return repositoryEncryptionTypeMap.get(RemoteStoreRepositoryType.SEGMENT).containsKey(CompositeRepositoryEncryptionType.SERVER); + } + /** * Enum for Remote store repo types */ diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index 4fd9ce9bf44bb..686032718478a 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -40,7 +40,6 @@ */ public class RemoteStoreNodeAttribute { - private static final Logger logger = LogManager.getLogger(RemoteStoreNodeAttribute.class); private static final String REMOTE_STORE_TRANSLOG_REPO_PREFIX = "translog"; private static final String REMOTE_STORE_SEGMENT_REPO_PREFIX = "segment"; @@ -308,16 +307,6 @@ public static boolean isTranslogRepoConfigured(Settings settings) { return false; } -// public static boolean isServerSideEncryptionRepoConfigured(Settings settings) { -// boolean isServerSideEncryptionConfigured = false; -// for (String prefix : REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { -// if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { -// isServerSideEncryptionConfigured = true; -// } -// } -// return isServerSideEncryptionConfigured; -// } - public static boolean isRemoteClusterStateConfigured(Settings settings) { for (String prefix : REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { @@ -337,16 +326,7 @@ public static String getRemoteStoreSegmentRepo(Settings settings) { } public static String getRemoteStoreSegmentRepo(Settings settings, boolean sseEnabled) { -// if (sseEnabled) { -// for (String prefix : REMOTE_SEGMENT_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { -// if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { -// return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); -// } -// } -// } else { - return getRemoteStoreSegmentRepo(settings); -// } -// return null; + return getRemoteStoreSegmentRepo(settings); } public static String getRemoteStoreTranslogRepo(Settings settings) { @@ -359,17 +339,11 @@ public static String getRemoteStoreTranslogRepo(Settings settings) { } public static String getRemoteStoreTranslogRepo(Settings settings, boolean sseEnabled) { -// if (sseEnabled) { -// for (String prefix : REMOTE_TRANSLOG_SSE_REPOSITORY_NAME_ATTRIBUTE_KEYS) { -// if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { -// return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); -// } -// } -// } else { - return getRemoteStoreTranslogRepo(settings); -// } -// -// return null; + return getRemoteStoreTranslogRepo(settings); + } + + public static boolean isRemoteStoreServerSideEncryptionEnabled() { + return compositeRemoteRepository.isServerSideEncryptionEnabled(); } public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { @@ -430,30 +404,22 @@ public static String getRoutingTableRepoName(Map repos) { } public static String getSegmentRepoName(Map repos, Settings indexSettings) { + CompositeRemoteRepository.RemoteStoreRepositoryType repositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.SEGMENT; - CompositeRemoteRepository.RemoteStoreRepositoryType repositoryType = - CompositeRemoteRepository.RemoteStoreRepositoryType.SEGMENT; - - CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = - CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; + CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; if (indexSettings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, false)) { encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.SERVER; } - //getValueFromAnyKey(repos, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS); return compositeRemoteRepository.getRepository(repositoryType, encryptionType).name(); - //return getValueFromAnyKey(repos, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS); } public static String getTranslogRepoName(Map repos, Settings indexSettings) { - CompositeRemoteRepository.RemoteStoreRepositoryType repositoryType = - CompositeRemoteRepository.RemoteStoreRepositoryType.TRANSLOG; + CompositeRemoteRepository.RemoteStoreRepositoryType repositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.TRANSLOG; - CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = - CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; + CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; if (indexSettings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, false)) { encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.SERVER; } - //getValueFromAnyKey(repos, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS); return compositeRemoteRepository.getRepository(repositoryType, encryptionType).name(); } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 47eac8816361a..e66f8fe14dd04 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -382,7 +382,7 @@ public ClusterState execute(ClusterState currentState) { IndexId snapshotIndexId = repositoryData.resolveIndexId(index); - final Settings overrideSettingsInternal = getOverrideSettingsInternal(); + final Settings overrideSettingsInternal = getOverrideSettingsInternal(metadata.index(index)); final String[] ignoreSettingsInternal = getIgnoreSettingsInternal(); IndexMetadata snapshotIndexMetadata = updateIndexSettings( @@ -688,7 +688,7 @@ private String[] getIgnoreSettingsInternal() { return indexSettingsToBeIgnored; } - private Settings getOverrideSettingsInternal() { + private Settings getOverrideSettingsInternal(IndexMetadata indexMetadata) { final Settings.Builder settingsBuilder = Settings.builder(); // We will use whatever replication strategy provided by user or from snapshot metadata unless @@ -713,7 +713,7 @@ private Settings getOverrideSettingsInternal() { clusterSettings, clusterService.getSettings(), String.join(",", request.indices()), - true + indexMetadata ); return settingsBuilder.build(); } From 2b09b7824c97dd2d41a072bbfdb940f4eee23541 Mon Sep 17 00:00:00 2001 From: Pranit Kumar Date: Thu, 4 Sep 2025 18:50:34 +0530 Subject: [PATCH 27/27] Finalize Remote store changes for Composite repo Signed-off-by: Pranit Kumar --- .../cluster/metadata/IndexMetadata.java | 6 +- .../metadata/MetadataCreateIndexService.java | 25 +-- .../org/opensearch/index/IndexService.java | 5 +- .../org/opensearch/index/IndexSettings.java | 18 +- .../index/remote/RemoteIndexPathUploader.java | 16 +- .../RemoteMigrationIndexMetadataUpdater.java | 14 +- .../RemoteStoreCustomMetadataResolver.java | 5 +- .../RemoteSegmentStoreDirectoryFactory.java | 45 +---- ...emoteBlobStoreInternalTranslogFactory.java | 1 - .../opensearch/indices/IndicesService.java | 2 +- .../CompositeRemoteRepository.java | 13 +- .../remotestore/RemoteStoreNodeAttribute.java | 169 +++++++++++------- 12 files changed, 153 insertions(+), 166 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index a1e7adfae648f..b0a5f4c83e2b4 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -428,11 +428,9 @@ public void validate(final Boolean value) {} @Override public void validate(final Boolean value, final Map, Object> settings) { final Boolean isRemoteStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); - if (!isRemoteStoreEnabled) { + if (!isRemoteStoreEnabled && value) { throw new IllegalArgumentException( - "Server Side Encryption can be enabled when " - + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() - + " is enabled. " + "Server Side Encryption can be enabled when " + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + " is enabled. " ); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index effd0ba5643b0..2ec7bb98c9777 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1157,13 +1157,15 @@ public static void updateRemoteStoreSettings( String indexName, IndexMetadata indexMetadata ) { - System.out.println("settingsBuilder = " + settingsBuilder.toString()); - boolean sseEnabledIndex = IndexMetadata.INDEX_REMOTE_STORE_SSE_ENABLED_SETTING.get(indexMetadata.getSettings()); - System.out.println("[RESTORING FROM SNAPSHOT] sseEnabledIndex = " + sseEnabledIndex); - if (sseEnabledIndex) { - settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, true); + if ((isRemoteDataAttributePresent(nodeSettings) + && clusterSettings.get(REMOTE_STORE_COMPATIBILITY_MODE_SETTING).equals(RemoteStoreNodeService.CompatibilityMode.STRICT)) + || isMigratingToRemoteStore(clusterSettings)) { + boolean sseEnabledIndex = IndexMetadata.INDEX_REMOTE_STORE_SSE_ENABLED_SETTING.get(indexMetadata.getSettings()); + if (sseEnabledIndex) { + settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, true); + } + updateRemoteStoreSettings(settingsBuilder, clusterState, clusterSettings, nodeSettings, indexName, true); } - updateRemoteStoreSettings(settingsBuilder, clusterState, clusterSettings, nodeSettings, indexName, true); } /** @@ -1195,21 +1197,20 @@ public static void updateRemoteStoreSettings( .filter(DiscoveryNode::isRemoteStoreNode) .findFirst(); - if (!isRestoreFromSnapshot && RemoteStoreNodeAttribute.isRemoteStoreServerSideEncryptionEnabled() && indexName.startsWith("sse-rp")) { - System.out.println("MetadataCreateIndexService.updateRemoteStoreSettings"); + if (!isRestoreFromSnapshot && RemoteStoreNodeAttribute.isRemoteStoreServerSideEncryptionEnabled()) { settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, true); } if (remoteNode.isPresent()) { - Map indexSettings = settingsBuilder.keys().stream() + Map indexSettings = settingsBuilder.keys() + .stream() .collect(Collectors.toMap(key -> key, settingsBuilder::get)); Settings.Builder currentSettingsBuilder = Settings.builder(); Settings currentIndexSettings = currentSettingsBuilder.loadFromMap(indexSettings).build(); - translogRepo = RemoteStoreNodeAttribute.getTranslogRepoName(remoteNode.get().getAttributes(), currentIndexSettings); - segmentRepo = RemoteStoreNodeAttribute.getSegmentRepoName(remoteNode.get().getAttributes(), currentIndexSettings); - System.out.println("MetadataCreateIndexService.updateRemoteStoreSettings trepo " + translogRepo + ", srepo " + segmentRepo); + translogRepo = RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(currentIndexSettings); + segmentRepo = RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(currentIndexSettings); if (segmentRepo != null && translogRepo != null) { settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 1d2e2219a2f02..02f4f35705ba6 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -697,12 +697,11 @@ public synchronized IndexShard createShard( } remoteDirectory = ((RemoteSegmentStoreDirectoryFactory) remoteDirectoryFactory).newDirectory( - RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(this.indexSettings.getNodeSettings()), + RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(this.indexSettings.getSettings()), this.indexSettings.getUUID(), shardId, this.indexSettings.getRemoteStorePathStrategy(), - this.indexSettings.getRemoteStoreSegmentPathPrefix(), - this.indexSettings.isRemoteStoreSSEnabled() + this.indexSettings.getRemoteStoreSegmentPathPrefix() ); } // When an instance of Store is created, a shardlock is created which is released on closing the instance of store. diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 96e192eb778d6..c68c0c73ce7ca 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -1037,27 +1037,15 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null); replicationType = IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.get(settings); isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); - isRemoteStoreSSEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, false); - System.out.println("IndexSettings.IndexSettings isRemoteStoreDirectorySSEnabled " + isRemoteStoreSSEnabled); isWarmIndex = settings.getAsBoolean(IndexModule.IS_WARM_INDEX_SETTING.getKey(), false); - remoteStoreTranslogRepository = settings.get(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY); - remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); - - System.out.println("IndexSettings.IndexSettings remoteStoreRepository " + remoteStoreRepository - + " remoteStoreTranslogRepository " + remoteStoreTranslogRepository) ; - - if (isRemoteStoreSSEnabled) { - remoteStoreRepository = RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(nodeSettings, true); - remoteStoreTranslogRepository = RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(this.getNodeSettings(), true); - } - System.out.println("2. IndexSettings.IndexSettings remoteStoreRepository " + remoteStoreRepository - + " remoteStoreTranslogRepository " + remoteStoreTranslogRepository) ; + remoteStoreRepository = RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(indexMetadata.getSettings()); + remoteStoreTranslogRepository = RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(indexMetadata.getSettings()); remoteTranslogUploadBufferInterval = INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); - remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); + this.remoteTranslogKeepExtraGen = INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.get(settings); String rawPrefix = IndexMetadata.INDEX_REMOTE_STORE_SEGMENT_PATH_PREFIX.get(settings); // Only set the prefix if it's explicitly set and not empty diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java b/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java index de84e0de2d16d..7ea9448d9fd7d 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteIndexPathUploader.java @@ -250,18 +250,22 @@ public void start() { return; } - translogRepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(settings)); - segmentRepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(settings)); + translogRepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(false)); + segmentRepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(false)); - translogSSERepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(settings, true)); - segmentSSERepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(settings, true)); + if (RemoteStoreNodeAttribute.isRemoteStoreServerSideEncryptionEnabled()) { + translogSSERepository = (BlobStoreRepository) validateAndGetRepository( + RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(true) + ); + segmentSSERepository = (BlobStoreRepository) validateAndGetRepository(RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(true)); + } } private boolean isTranslogSegmentRepoSame() { // TODO - The current comparison checks the repository name. But it is also possible that the repository are same // by attributes, but different by name. We need to handle this. - String translogRepoName = RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(settings); - String segmentRepoName = RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(settings); + String translogRepoName = RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(false); + String segmentRepoName = RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(false); return Objects.equals(translogRepoName, segmentRepoName); } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java index 83ec8fd377cb9..8440e774c085a 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java @@ -30,7 +30,6 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.index.remote.RemoteStoreUtils.determineRemoteStoreCustomMetadataDuringMigration; -import static org.opensearch.index.remote.RemoteStoreUtils.getRemoteStoreRepoName; /** * Utils for checking and mutating cluster state during remote migration @@ -72,17 +71,12 @@ public void maybeAddRemoteIndexSettings(IndexMetadata.Builder indexMetadataBuild "Index {} does not have remote store based index settings but all primary shards and STARTED replica shards have moved to remote enabled nodes. Applying remote store settings to the index", index ); - Map remoteRepoNames = getRemoteStoreRepoName(discoveryNodes); - System.out.println("RemoteMigrationIndexMetadataUpdater.maybeAddRemoteIndexSettings Remote repo Names are " + remoteRepoNames); + String segmentRepoName = RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(currentIndexSettings); + String translogRepoName = RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(currentIndexSettings); - String segmentRepoName = RemoteStoreNodeAttribute.getSegmentRepoName(remoteRepoNames, currentIndexSettings); - String tlogRepoName = RemoteStoreNodeAttribute.getTranslogRepoName(remoteRepoNames, currentIndexSettings); - - System.out.println("Index name is " + indexMetadata.getIndex().getName() + " Seg repo name " + segmentRepoName + " tlogRepoName " + tlogRepoName); - - assert Objects.nonNull(segmentRepoName) && Objects.nonNull(tlogRepoName) : "Remote repo names cannot be null"; + assert Objects.nonNull(segmentRepoName) && Objects.nonNull(translogRepoName) : "Remote repo names cannot be null"; Settings.Builder indexSettingsBuilder = Settings.builder().put(currentIndexSettings); - updateRemoteStoreSettings(indexSettingsBuilder, segmentRepoName, tlogRepoName); + updateRemoteStoreSettings(indexSettingsBuilder, segmentRepoName, translogRepoName); indexMetadataBuilder.settings(indexSettingsBuilder); indexMetadataBuilder.settingsVersion(1 + indexMetadata.getVersion()); } else { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java index e8a0dda5a699e..fe6d52115ed5b 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java @@ -14,6 +14,7 @@ import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryMissingException; @@ -21,8 +22,6 @@ import java.util.function.Supplier; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo; - /** * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. * @@ -61,7 +60,7 @@ public RemoteStorePathStrategy getPathStrategy() { public boolean isTranslogMetadataEnabled() { Repository repository; try { - repository = repositoriesServiceSupplier.get().repository(getRemoteStoreTranslogRepo(settings)); + repository = repositoriesServiceSupplier.get().repository(RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(false)); } catch (RepositoryMissingException ex) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", ex); } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index f4cc8858ada59..35aba694729cb 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -11,7 +11,6 @@ import org.apache.lucene.store.Directory; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.BlobStore; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.remote.RemoteStorePathStrategy; @@ -66,52 +65,17 @@ public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throw public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathStrategy pathStrategy) throws IOException { - return newDirectory(repositoryName, indexUUID, shardId, pathStrategy, null, false); + return newDirectory(repositoryName, indexUUID, shardId, pathStrategy, null); } -// public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathStrategy pathStrategy) -// throws IOException { -// return this.newDirectory(repositoryName, indexUUID, shardId, pathStrategy, false); -// } - - -// @Override -// public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { -// String repositoryName = indexSettings.getRemoteStoreRepository(); -// String indexUUID = indexSettings.getIndex().getUUID(); -// RemoteSegmentStoreDirectory directory = null; -// try { -// boolean serverSideEncryptionEnabled = indexSettings.isRemoteStoreSSEnabled(); -// System.out.println("[pranikum]: RemoteSegmentStoreDirectoryFactory.newDirectory Index name is " -// + indexSettings.getIndex().getName() + " SSE Value is " + serverSideEncryptionEnabled); -// -// System.out.println("repositoryName = " + repositoryName); -// -// directory = (RemoteSegmentStoreDirectory) newDirectory( -// repositoryName, -// indexUUID, -// path.getShardId(), -// indexSettings.getRemoteStorePathStrategy(), -// null, -// serverSideEncryptionEnabled -// ); -// -// } catch (IOException e) { -// e.printStackTrace(); -// } -// return directory; -// } - public Directory newDirectory( String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathStrategy pathStrategy, - String indexFixedPrefix, - boolean isSSEEnabled + String indexFixedPrefix ) throws IOException { assert Objects.nonNull(pathStrategy); - System.out.println("RemoteSegmentStoreDirectoryFactory.newDirectory repository Name is " + repositoryName); try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; @@ -120,7 +84,6 @@ public Directory newDirectory( String shardIdStr = String.valueOf(shardId.id()); Map pendingDownloadMergedSegments = new ConcurrentHashMap<>(); - BlobStore blobStore = blobStoreRepository.blobStore(); RemoteStorePathStrategy.ShardDataPathInput dataPathInput = RemoteStorePathStrategy.ShardDataPathInput.builder() .basePath(repositoryBasePath) .indexUUID(indexUUID) @@ -133,7 +96,7 @@ public Directory newDirectory( // Derive the path for data directory of SEGMENTS BlobPath dataPath = pathStrategy.generatePath(dataPathInput); RemoteDirectory dataDirectory = new RemoteDirectory( - blobStore.blobContainer(dataPath), + blobStoreRepository.blobStore().blobContainer(dataPath), blobStoreRepository::maybeRateLimitRemoteUploadTransfers, blobStoreRepository::maybeRateLimitLowPriorityRemoteUploadTransfers, blobStoreRepository::maybeRateLimitRemoteDownloadTransfers, @@ -152,7 +115,7 @@ public Directory newDirectory( .build(); // Derive the path for metadata directory of SEGMENTS BlobPath mdPath = pathStrategy.generatePath(mdPathInput); - RemoteDirectory metadataDirectory = new RemoteDirectory(blobStore.blobContainer(mdPath)); + RemoteDirectory metadataDirectory = new RemoteDirectory(blobStoreRepository.blobStore().blobContainer(mdPath)); // The path for lock is derived within the RemoteStoreLockManagerFactory RemoteStoreLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java index 1f2b2c48b471a..d022e38d42203 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java @@ -91,7 +91,6 @@ public Translog newTranslog( BooleanSupplier startedPrimarySupplier, TranslogOperationHelper translogOperationHelper ) throws IOException { - assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 3ae849df07a13..83c1d2f35d607 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -714,7 +714,7 @@ private static BiFunction getTrans return new RemoteBlobStoreInternalTranslogFactory( repositoriesServiceSupplier, threadPool, - RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(indexSettings.getNodeSettings()), + RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(false), remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()), remoteStoreSettings ); diff --git a/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java b/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java index 37f7a1b4fdf57..e1cb226402c39 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java +++ b/server/src/main/java/org/opensearch/node/remotestore/CompositeRemoteRepository.java @@ -9,7 +9,6 @@ package org.opensearch.node.remotestore; import org.opensearch.cluster.metadata.RepositoryMetadata; -import org.opensearch.repositories.blobstore.BlobStoreRepository; import java.util.HashMap; import java.util.Map; @@ -25,9 +24,11 @@ public CompositeRemoteRepository() { repositoryEncryptionTypeMap = new HashMap<>(); } - public void registerCompositeRepository(final RemoteStoreRepositoryType repositoryType, - final CompositeRepositoryEncryptionType type, - final RepositoryMetadata metadata) { + public void registerCompositeRepository( + final RemoteStoreRepositoryType repositoryType, + final CompositeRepositoryEncryptionType type, + final RepositoryMetadata metadata + ) { Map encryptionTypeMap = repositoryEncryptionTypeMap.get(repositoryType); if (encryptionTypeMap == null) { encryptionTypeMap = new HashMap<>(); @@ -43,9 +44,7 @@ public RepositoryMetadata getRepository(RemoteStoreRepositoryType repositoryType @Override public String toString() { - return "CompositeRemoteRepository{" + - "repositoryEncryptionTypeMap=" + repositoryEncryptionTypeMap + - '}'; + return "CompositeRemoteRepository{" + "repositoryEncryptionTypeMap=" + repositoryEncryptionTypeMap + '}'; } public boolean isServerSideEncryptionEnabled() { diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index 686032718478a..53c8436652b8e 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -8,8 +8,6 @@ package org.opensearch.node.remotestore; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.opensearch.cluster.metadata.CryptoMetadata; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; @@ -93,6 +91,9 @@ public class RemoteStoreNodeAttribute { REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS ); + public static final String REMOTE_STORE_MODE_KEY = "remote_store.mode"; + public static final String REMOTE_STORE_SSE_REPO_SUFFIX = "-sse"; + private static CompositeRemoteRepository compositeRemoteRepository; private static Map repositoryMetadataMap; @@ -196,34 +197,42 @@ private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { repositoryMetadataMap.put(repositoryMetadata.name(), repositoryMetadata); if (isCompositeRepository(repositoryMetadata)) { - RepositoryMetadata sseRepoMetatdata = new RepositoryMetadata(repositoryMetadata.name() + "-SSE", repositoryMetadata.type(), repositoryMetadata.settings()); - repositoryMetadataMap.put(sseRepoMetatdata.name(), sseRepoMetatdata); - repositoryMetadataList.add(sseRepoMetatdata); + RepositoryMetadata sseRepoMetadata = new RepositoryMetadata( + repositoryMetadata.name() + REMOTE_STORE_SSE_REPO_SUFFIX, + repositoryMetadata.type(), + repositoryMetadata.settings() + ); + repositoryMetadataMap.put(sseRepoMetadata.name(), sseRepoMetadata); + repositoryMetadataList.add(sseRepoMetadata); } } // Let's Iterate over repo's and build Composite Repository structure for (Map.Entry repositoryTypeToNameEntry : remoteStoryTypeToRepoNameMap.entrySet()) { - CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; - CompositeRemoteRepository.RemoteStoreRepositoryType remoteStoreRepositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.SEGMENT; - if (repositoryTypeToNameEntry.getKey().contains("translog")) { + CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = + CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; + CompositeRemoteRepository.RemoteStoreRepositoryType remoteStoreRepositoryType = + CompositeRemoteRepository.RemoteStoreRepositoryType.SEGMENT; + if (repositoryTypeToNameEntry.getKey().contains(REMOTE_STORE_TRANSLOG_REPO_PREFIX)) { remoteStoreRepositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.TRANSLOG; } String repositoryName = repositoryTypeToNameEntry.getValue(); - compositeRemoteRepository.registerCompositeRepository(remoteStoreRepositoryType, + compositeRemoteRepository.registerCompositeRepository( + remoteStoreRepositoryType, encryptionType, - repositoryMetadataMap.get(repositoryName)); + repositoryMetadataMap.get(repositoryName) + ); - String sseRepositoryName = repositoryTypeToNameEntry.getValue() + "-SSE"; - System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata sseRepositoryName = " + sseRepositoryName); + String sseRepositoryName = repositoryTypeToNameEntry.getValue() + REMOTE_STORE_SSE_REPO_SUFFIX; if (repositoryMetadataMap.containsKey(sseRepositoryName)) { encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.SERVER; - compositeRemoteRepository.registerCompositeRepository(remoteStoreRepositoryType, + compositeRemoteRepository.registerCompositeRepository( + remoteStoreRepositoryType, encryptionType, - repositoryMetadataMap.get(sseRepositoryName)); + repositoryMetadataMap.get(sseRepositoryName) + ); } - System.out.println("RemoteStoreNodeAttribute.buildRepositoriesMetadata compositeRemoteRepository is " + compositeRemoteRepository); } return new RepositoriesMetadata(repositoryMetadataList); } @@ -241,18 +250,62 @@ private static Tuple getValue(Map attributes, Li return null; } + private enum RemoteStoreMode { + SEGMENTS_ONLY, + DEFAULT + } + private Map getValidatedRepositoryNames(DiscoveryNode node, Map remoteStoryTypeToRepoNameMap) { Set> repositoryNames = new HashSet<>(); - if (containsKey(node.getAttributes(), REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS) - || containsKey(node.getAttributes(), REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { - addRepositoryNames(node, remoteStoryTypeToRepoNameMap, repositoryNames, REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS, REMOTE_STORE_SEGMENT_REPO_PREFIX); - addRepositoryNames(node, remoteStoryTypeToRepoNameMap, repositoryNames, REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS, REMOTE_STORE_TRANSLOG_REPO_PREFIX); - repositoryNames.add(getAndValidateNodeAttributeEntries(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); - } else if (containsKey(node.getAttributes(), REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { - repositoryNames.add(getAndValidateNodeAttributeEntries(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); + RemoteStoreMode remoteStoreMode = RemoteStoreMode.DEFAULT; + if (containsKey(node.getAttributes(), List.of(REMOTE_STORE_MODE_KEY))) { + String mode = node.getAttributes().get(REMOTE_STORE_MODE_KEY); + if (mode != null && mode.equalsIgnoreCase(RemoteStoreMode.SEGMENTS_ONLY.name())) { + remoteStoreMode = RemoteStoreMode.SEGMENTS_ONLY; + } else if (mode != null && mode.equalsIgnoreCase(RemoteStoreMode.DEFAULT.name()) == false) { + throw new IllegalStateException("Unknown remote store mode [" + mode + "] for node [" + node + "]"); + } } + if (remoteStoreMode == RemoteStoreMode.SEGMENTS_ONLY) { + addRepositoryNames( + node, + remoteStoryTypeToRepoNameMap, + repositoryNames, + REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS, + REMOTE_STORE_SEGMENT_REPO_PREFIX + ); + } else if (containsKey(node.getAttributes(), REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS) + || containsKey(node.getAttributes(), REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { + addRepositoryNames( + node, + remoteStoryTypeToRepoNameMap, + repositoryNames, + REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS, + REMOTE_STORE_SEGMENT_REPO_PREFIX + ); + addRepositoryNames( + node, + remoteStoryTypeToRepoNameMap, + repositoryNames, + REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS, + REMOTE_STORE_TRANSLOG_REPO_PREFIX + ); + + repositoryNames.add(getAndValidateNodeAttributeEntries(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); + } else if (containsKey(node.getAttributes(), REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { + repositoryNames.add(getAndValidateNodeAttributeEntries(node, REMOTE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); + } if (containsKey(node.getAttributes(), REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS)) { + if (remoteStoreMode == RemoteStoreMode.SEGMENTS_ONLY) { + throw new IllegalStateException( + "Cannot set " + + REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS + + " attributes when remote store mode is set to segments only for node [" + + node + + "]" + ); + } repositoryNames.add(getAndValidateNodeAttributeEntries(node, REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS)); } @@ -265,17 +318,18 @@ private Map getValidatedRepositoryNames(DiscoveryNode node, Map< return repoNamesWithPrefix; } - private void addRepositoryNames(DiscoveryNode node, - Map remoteStoryTypeToRepoNameMap, - Set> repositoryNames, - List attributeKeys, - String remoteStoreRepoPrefix) { + private void addRepositoryNames( + DiscoveryNode node, + Map remoteStoryTypeToRepoNameMap, + Set> repositoryNames, + List attributeKeys, + String remoteStoreRepoPrefix + ) { Tuple remoteStoreAttributeKeyMap = getAndValidateNodeAttributeEntries(node, attributeKeys); remoteStoryTypeToRepoNameMap.put(remoteStoreRepoPrefix, remoteStoreAttributeKeyMap.v1()); repositoryNames.add(remoteStoreAttributeKeyMap); } - public static boolean isRemoteStoreAttributePresent(Settings settings) { for (String prefix : REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX) { if (settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + prefix).isEmpty() == false) { @@ -316,32 +370,6 @@ public static boolean isRemoteClusterStateConfigured(Settings settings) { return false; } - public static String getRemoteStoreSegmentRepo(Settings settings) { - for (String prefix : REMOTE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEYS) { - if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { - return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); - } - } - return null; - } - - public static String getRemoteStoreSegmentRepo(Settings settings, boolean sseEnabled) { - return getRemoteStoreSegmentRepo(settings); - } - - public static String getRemoteStoreTranslogRepo(Settings settings) { - for (String prefix : REMOTE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEYS) { - if (settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix) != null) { - return settings.get(Node.NODE_ATTRIBUTES.getKey() + prefix); - } - } - return null; - } - - public static String getRemoteStoreTranslogRepo(Settings settings, boolean sseEnabled) { - return getRemoteStoreTranslogRepo(settings); - } - public static boolean isRemoteStoreServerSideEncryptionEnabled() { return compositeRemoteRepository.isServerSideEncryptionEnabled(); } @@ -403,26 +431,42 @@ public static String getRoutingTableRepoName(Map repos) { return getValueFromAnyKey(repos, REMOTE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEYS); } - public static String getSegmentRepoName(Map repos, Settings indexSettings) { - CompositeRemoteRepository.RemoteStoreRepositoryType repositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.SEGMENT; + public static String getRemoteStoreSegmentRepo(boolean serverSideEncryptionEnabled) { + if (compositeRemoteRepository == null) { + return null; + } - CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; - if (indexSettings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, false)) { + CompositeRemoteRepository.RemoteStoreRepositoryType repositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.SEGMENT; + CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = + CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; + if (serverSideEncryptionEnabled) { encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.SERVER; } return compositeRemoteRepository.getRepository(repositoryType, encryptionType).name(); } - public static String getTranslogRepoName(Map repos, Settings indexSettings) { - CompositeRemoteRepository.RemoteStoreRepositoryType repositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.TRANSLOG; + public static String getRemoteStoreSegmentRepo(Settings indexSettings) { + return getRemoteStoreSegmentRepo(indexSettings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, false)); + } - CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; - if (indexSettings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, false)) { + public static String getRemoteStoreTranslogRepo(boolean serverSideEncryptionEnabled) { + if (compositeRemoteRepository == null) { + return null; + } + + CompositeRemoteRepository.RemoteStoreRepositoryType repositoryType = CompositeRemoteRepository.RemoteStoreRepositoryType.TRANSLOG; + CompositeRemoteRepository.CompositeRepositoryEncryptionType encryptionType = + CompositeRemoteRepository.CompositeRepositoryEncryptionType.CLIENT; + if (serverSideEncryptionEnabled) { encryptionType = CompositeRemoteRepository.CompositeRepositoryEncryptionType.SERVER; } return compositeRemoteRepository.getRepository(repositoryType, encryptionType).name(); } + public static String getRemoteStoreTranslogRepo(Settings indexSettings) { + return getRemoteStoreTranslogRepo(indexSettings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_SSE_ENABLED, false)); + } + private static String getValueFromAnyKey(Map repos, List keys) { for (String key : keys) { if (repos.get(key) != null) { @@ -493,7 +537,6 @@ public boolean equalsWithRepoSkip(Object o, List reposToSkip) { if (o == null || getClass() != o.getClass()) return false; RemoteStoreNodeAttribute that = (RemoteStoreNodeAttribute) o; - System.out.println("[pranikum]: reposToSkip = " + reposToSkip); return this.getRepositoriesMetadata().equalsIgnoreGenerationsWithRepoSkip(that.getRepositoriesMetadata(), reposToSkip); }