diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 1ba3ee562317a..1dc8dc955f7c6 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -49,4 +49,6 @@ BWC_VERSION: - "2.1.1" - "2.2.0" - "2.2.1" + - "2.2.2" - "2.3.0" + - "2.4.0" diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9682461d9e110..07755ef69c6a3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,869 +4,1391 @@ updates: package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /benchmarks/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/reaper/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/oss-darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/minor/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/opensearch-build-resources/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/opensearch.build/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/reaper/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/symbolic-link-preserving-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/testingConventions/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/thirdPartyAudit/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/thirdPartyAudit/sample_jars/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/benchmark/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/client-benchmark-noop-api-plugin/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/rest/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/rest-high-level/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/sniffer/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/test/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/integ-test-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/linux-arm64-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/linux-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-linux-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-windows-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/windows-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/bugfix/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/maintenance/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/minor/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/staged/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-arm64-export/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-build-context/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-export/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/arm64-deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/arm64-rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/no-jdk-deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/no-jdk-rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/java-version-checker/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/keystore-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/launchers/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/plugin-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/upgrade-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /doc-tools/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /doc-tools/missing-doclet/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/core/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/dissect/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/geo/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/grok/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/nio/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/plugin-classloader/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/secure-sm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/ssl-config/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/x-content/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/aggs-matrix-stats/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/analysis-common/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/geo/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-common/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-geoip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-user-agent/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-expression/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-mustache/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-painless/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-painless/spi/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/mapper-extras/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/opensearch-dashboards/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/parent-join/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/percolator/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/rank-eval/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/reindex/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/repository-url/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/systemd/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/transport-netty4/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-icu/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-kuromoji/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-nori/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-phonetic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-smartcn/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-stempel/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-ukrainian/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-azure-classic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/qa/amazon-ec2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/qa/gce/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-settings/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-significance-heuristic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-suggester/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/painless-allowlist/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/rescore/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/rest-handler/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/script-expert-scoring/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ingest-attachment/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-annotated-text/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-murmur3/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-size/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-azure/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-gcs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-hdfs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-s3/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/store-smb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/transport-nio/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/ccs-unavailable-clusters/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/die-with-dignity/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/evil-tests/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/full-cluster-restart/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/logging-config/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/mixed-cluster/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/multi-cluster-search/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/no-bootstrap-tests/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/centos-6/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/centos-7/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/debian-8/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/debian-9/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/fedora-28/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/fedora-29/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/oel-6/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/oel-7/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/sles-12/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ubuntu-1604/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ubuntu-1804/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/windows-2012r2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/windows-2016/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/remote-clusters/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/repository-multi-version/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/rolling-upgrade/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-http/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-ingest-disabled/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-ingest-with-all-dependencies/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-multinode/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/translog-policy/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/unconfigured-node-name/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/verify-version-constants/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/wildfly/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /rest-api-spec/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/libs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /server/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/external-modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/external-modules/delayed-aggs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/azure-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/gcs-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/hdfs-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/krb5kdc-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/minio-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/old-elasticsearch/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/s3-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/framework/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/logger-usage/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" version: 2 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index d7981b5113972..4537cadf71074 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,15 +1,16 @@ ### Description [Describe what this change achieves] - + ### Issues Resolved [List any issues this PR will resolve] - + ### Check List - [ ] New functionality includes testing. - [ ] All tests pass - [ ] New functionality has been documented. - [ ] New functionality has javadoc added -- [ ] Commits are signed per the DCO using --signoff +- [ ] Commits are signed per the DCO using --signoff +- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml new file mode 100644 index 0000000000000..96f99f17b016e --- /dev/null +++ b/.github/workflows/changelog_verifier.yml @@ -0,0 +1,18 @@ +name: "Changelog Verifier" +on: + pull_request: + types: [opened, edited, review_requested, synchronize, reopened, ready_for_review, labeled, unlabeled] + +jobs: + # Enforces the update of a changelog file on every pull request + verify-changelog: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + ref: ${{ github.event.pull_request.head.sha }} + + - uses: dangoslen/changelog-enforcer@v3 + with: + skipLabels: "autocut" diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 2ac904bf4ccf7..ed98bae8978ed 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -47,3 +47,17 @@ jobs: commit_user_name: dependabot[bot] commit_user_email: support@github.com commit_options: '--signoff' + + - name: Update the changelog + uses: dangoslen/dependabot-changelog-helper@v1 + with: + version: 'Unreleased' + + - name: Commit the changes + uses: stefanzweifel/git-auto-commit-action@v4 + with: + commit_message: "Update changelog" + branch: ${{ github.head_ref }} + commit_user_name: dependabot[bot] + commit_user_email: support@github.com + commit_options: '--signoff' diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 030689642677a..42c2d21d106ce 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -5,7 +5,7 @@ on: tags: - '*.*.*' -jobs: +jobs: build: runs-on: ubuntu-latest steps: @@ -61,6 +61,8 @@ jobs: commit-message: Incremented version to ${{ env.NEXT_VERSION }} signoff: true delete-branch: true + labels: | + autocut title: '[AUTO] Incremented version to ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and incremented the version from ${{ env.CURRENT_VERSION }} to ${{ env.NEXT_VERSION }}. @@ -86,6 +88,8 @@ jobs: commit-message: Added bwc version ${{ env.NEXT_VERSION }} signoff: true delete-branch: true + labels: | + autocut title: '[AUTO] [${{ env.BASE_X }}] Added bwc version ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. @@ -111,6 +115,8 @@ jobs: commit-message: Added bwc version ${{ env.NEXT_VERSION }} signoff: true delete-branch: true + labels: | + autocut title: '[AUTO] [main] Added bwc version ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. diff --git a/.linelint.yml b/.linelint.yml index 6240c8b3d7a96..ec947019f8ab6 100644 --- a/.linelint.yml +++ b/.linelint.yml @@ -7,6 +7,7 @@ ignore: - .idea/ - '*.sha1' - '*.txt' + - 'CHANGELOG.md' - '.github/CODEOWNERS' - 'buildSrc/src/testKit/opensearch.build/LICENSE' - 'buildSrc/src/testKit/opensearch.build/NOTICE' diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000000..cc33553811dc6 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,75 @@ +# CHANGELOG +Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) + +## [Unreleased] +### Added +- Add support for s390x architecture ([#4001](https://github.com/opensearch-project/OpenSearch/pull/4001)) +- Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) +- Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) +- Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) +- Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) +- Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) +- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) +- Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) +- BWC version 2.2.2 ([#4383](https://github.com/opensearch-project/OpenSearch/pull/4383)) +- Support for labels on version bump PRs, skip label support for changelog verifier ([#4391](https://github.com/opensearch-project/OpenSearch/pull/4391)) +- Update previous release bwc version to 2.4.0 ([#4455](https://github.com/opensearch-project/OpenSearch/pull/4455)) +- 2.3.0 release notes ([#4457](https://github.com/opensearch-project/OpenSearch/pull/4457)) + +### Dependencies +- Bumps `org.gradle.test-retry` from 1.4.0 to 1.4.1 +- Bumps `reactor-netty-core` from 1.0.19 to 1.0.22 + +### Dependencies +- Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 +- Bumps `xmlbeans` from 5.1.0 to 5.1.1 +- Bumps azure-core-http-netty from 1.12.0 to 1.12.4([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) +- Bumps azure-core from 1.27.0 to 1.31.0([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) +- Bumps azure-storage-common from 12.16.0 to 12.18.0([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) +>>>>>>> upstream/main + +### Changed +- Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) +- Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) +- Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) +- Add DecommissionService and helper to execute awareness attribute decommissioning ([#4084](https://github.com/opensearch-project/OpenSearch/pull/4084)) + +### Deprecated + +### Removed + +### Fixed +- `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) +- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) +- `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) +- Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307)) +- Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) +- Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) +- Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) +- Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) +- Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) + +### Security +- CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) + +## [2.x] +### Added +- Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) + +### Changed + +### Deprecated + +### Removed + +### Fixed +- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) +- Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) + +### Security + + +[Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 467c7716cc578..fc02d52f0bc3b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,6 +6,7 @@ - [Documentation Changes](#documentation-changes) - [Contributing Code](#contributing-code) - [Developer Certificate of Origin](#developer-certificate-of-origin) + - [Changelog](#changelog) - [Review Process](#review-process) # Contributing to OpenSearch @@ -116,6 +117,23 @@ Signed-off-by: Jane Smith ``` You may type this line on your own when writing your commit messages. However, if your user.name and user.email are set in your git configs, you can use `-s` or `--signoff` to add the `Signed-off-by` line to the end of the commit message. +## Changelog + +OpenSearch maintains version specific changelog by enforcing a change to the ongoing [CHANGELOG](CHANGELOG.md) file adhering to the [Keep A Changelog](https://keepachangelog.com/en/1.0.0/) format. + +Briefly, the changes are curated by version, with the changes to the main branch added chronologically to `Unreleased` version. Further, each version has corresponding sections which list out the category of the change - `Added`, `Changed`, `Deprecated`, `Removed`, `Fixed`, `Security`. + + +### How to add my changes to [CHANGELOG](CHANGELOG.md)? + +As a contributor, you must ensure that every pull request has the changes listed out within the corresponding version and appropriate section of [CHANGELOG](CHANGELOG.md) file. + +Adding in the change is two step process - +1. Add your changes to the corresponding section within the CHANGELOG file with dummy pull request information, publish the PR + +2. Update the entry for your change in [`CHANGELOG.md`](CHANGELOG.md) and make sure that you reference the pull request there. + + ## Review Process We deeply appreciate everyone who takes the time to make a contribution. We will review all contributions as quickly as possible. As a reminder, [opening an issue](https://github.com/opensearch-project/OpenSearch/issues/new/choose) discussing your change before you make it is the best way to smooth the PR process. This will prevent a rejection because someone else is already working on the problem, or because the solution is incompatible with the architectural direction. diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 94e649a634c7f..2f54656b2ab59 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -23,6 +23,7 @@ | Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon | | Shweta Thareja |[shwetathareja](https://github.com/shwetathareja) | Amazon | +| Suraj Singh |[dreamer-89](https://github.com/dreamer-89) | Amazon | | Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | | Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | diff --git a/build.gradle b/build.gradle index ce5ea6cdd7e11..bcae5bc3884a7 100644 --- a/build.gradle +++ b/build.gradle @@ -55,8 +55,8 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.9.1" apply false - id "org.gradle.test-retry" version "1.4.0" apply false + id "com.diffplug.spotless" version "6.10.0" apply false + id "org.gradle.test-retry" version "1.4.1" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' } @@ -264,6 +264,12 @@ tasks.register("branchConsistency") { allprojects { // configure compiler options tasks.withType(JavaCompile).configureEach { JavaCompile compile -> + options.fork = true + + configure(options.forkOptions) { + memoryMaximumSize = project.property('options.forkOptions.memoryMaximumSize') + } + // See please https://bugs.openjdk.java.net/browse/JDK-8209058 if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_11) { compile.options.compilerArgs << '-Werror' @@ -389,6 +395,10 @@ allprojects { // the dependency is added. gradle.projectsEvaluated { allprojects { + project.tasks.withType(JavaForkOptions) { + maxHeapSize project.property('options.forkOptions.memoryMaximumSize') + } + if (project.path == ':test:framework') { // :test:framework:test cannot run before and after :server:test return diff --git a/buildSrc/src/main/java/org/opensearch/gradle/Architecture.java b/buildSrc/src/main/java/org/opensearch/gradle/Architecture.java index 38d6db8c9916e..2bd87d6fa50b2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/Architecture.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/Architecture.java @@ -35,7 +35,8 @@ public enum Architecture { X64, - ARM64; + ARM64, + S390X; public static Architecture current() { final String architecture = System.getProperty("os.arch", ""); @@ -45,6 +46,8 @@ public static Architecture current() { return X64; case "aarch64": return ARM64; + case "s390x": + return S390X; default: throw new IllegalArgumentException("can not determine architecture from [" + architecture + "]"); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java index fccdc49ef6fc9..ae7b0d938e8ef 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java @@ -247,6 +247,9 @@ private String dependencyNotation(OpenSearchDistribution distribution) { case X64: classifier = ":" + distribution.getPlatform() + "-x64"; break; + case S390X: + classifier = ":" + distribution.getPlatform() + "-s390x"; + break; default: throw new IllegalArgumentException("Unsupported architecture: " + distribution.getArchitecture()); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java b/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java index 53fd998bcc53f..4b289de3f0619 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java @@ -48,7 +48,7 @@ public class Jdk implements Buildable, Iterable { - private static final List ALLOWED_ARCHITECTURES = Collections.unmodifiableList(Arrays.asList("aarch64", "x64")); + private static final List ALLOWED_ARCHITECTURES = Collections.unmodifiableList(Arrays.asList("aarch64", "x64", "s390x")); private static final List ALLOWED_VENDORS = Collections.unmodifiableList(Arrays.asList("adoptium", "adoptopenjdk", "openjdk")); private static final List ALLOWED_PLATFORMS = Collections.unmodifiableList( Arrays.asList("darwin", "freebsd", "linux", "mac", "windows") diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index d83384ec7d172..70c3737ba3674 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -9,7 +9,8 @@ import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.publish.Publication; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; import org.gradle.api.publish.PublishingExtension; import org.gradle.api.publish.maven.MavenPublication; import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; @@ -18,6 +19,9 @@ import org.gradle.api.Task; public class Publish implements Plugin { + + private static final Logger LOGGER = Logging.getLogger(Publish.class); + public final static String EXTENSION_NAME = "zipmavensettings"; public final static String PUBLICATION_NAME = "pluginZip"; public final static String STAGING_REPO = "zipStaging"; @@ -37,27 +41,25 @@ public static void configMaven(Project project) { }); }); publishing.publications(publications -> { - final Publication publication = publications.findByName(PUBLICATION_NAME); - if (publication == null) { - publications.create(PUBLICATION_NAME, MavenPublication.class, mavenZip -> { - String zipGroup = "org.opensearch.plugin"; - String zipArtifact = project.getName(); - String zipVersion = getProperty("version", project); - mavenZip.artifact(project.getTasks().named("bundlePlugin")); - mavenZip.setGroupId(zipGroup); - mavenZip.setArtifactId(zipArtifact); - mavenZip.setVersion(zipVersion); - }); - } else { - final MavenPublication mavenZip = (MavenPublication) publication; - String zipGroup = "org.opensearch.plugin"; - String zipArtifact = project.getName(); - String zipVersion = getProperty("version", project); - mavenZip.artifact(project.getTasks().named("bundlePlugin")); - mavenZip.setGroupId(zipGroup); - mavenZip.setArtifactId(zipArtifact); - mavenZip.setVersion(zipVersion); + MavenPublication mavenZip = (MavenPublication) publications.findByName(PUBLICATION_NAME); + + if (mavenZip == null) { + mavenZip = publications.create(PUBLICATION_NAME, MavenPublication.class); } + + String groupId = mavenZip.getGroupId(); + if (groupId == null) { + // The groupId is not customized thus we get the value from "project.group". + // See https://docs.gradle.org/current/userguide/publishing_maven.html#sec:identity_values_in_the_generated_pom + groupId = getProperty("group", project); + } + + String artifactId = project.getName(); + String pluginVersion = getProperty("version", project); + mavenZip.artifact(project.getTasks().named("bundlePlugin")); + mavenZip.setGroupId(groupId); + mavenZip.setArtifactId(artifactId); + mavenZip.setVersion(pluginVersion); }); }); } diff --git a/buildSrc/src/test/java/org/opensearch/gradle/ArchitectureTests.java b/buildSrc/src/test/java/org/opensearch/gradle/ArchitectureTests.java new file mode 100644 index 0000000000000..05f920c6c9248 --- /dev/null +++ b/buildSrc/src/test/java/org/opensearch/gradle/ArchitectureTests.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle; + +import org.opensearch.gradle.test.GradleUnitTestCase; + +public class ArchitectureTests extends GradleUnitTestCase { + + final String architecture = System.getProperty("os.arch", ""); + + public void testCurrentArchitecture() { + assertEquals(Architecture.X64, currentArchitecture("amd64")); + assertEquals(Architecture.X64, currentArchitecture("x86_64")); + assertEquals(Architecture.ARM64, currentArchitecture("aarch64")); + assertEquals(Architecture.S390X, currentArchitecture("s390x")); + } + + public void testInvalidCurrentArchitecture() { + assertThrows("can not determine architecture from [", IllegalArgumentException.class, () -> currentArchitecture("fooBar64")); + } + + /** + * Determines the return value of {@link Architecture#current()} based on a string representing a potential OS Architecture. + * + * @param osArchToTest An expected value of the {@code os.arch} system property on another architecture. + * @return the value of the {@link Architecture} enum which would have resulted with the given value. + * @throws IllegalArgumentException if the string is not mapped to a value of the {@link Architecture} enum. + */ + private Architecture currentArchitecture(String osArchToTest) throws IllegalArgumentException { + // Test new architecture + System.setProperty("os.arch", osArchToTest); + try { + return Architecture.current(); + } finally { + // Restore actual architecture property value + System.setProperty("os.arch", this.architecture); + } + } +} diff --git a/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java index 4dcc65cca4c62..ad17032e718d2 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java @@ -108,7 +108,7 @@ public void testUnknownArchitecture() { "11.0.2+33", "linux", "unknown", - "unknown architecture [unknown] for jdk [testjdk], must be one of [aarch64, x64]" + "unknown architecture [unknown] for jdk [testjdk], must be one of [aarch64, x64, s390x]" ); } diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index 8c1314c4b4394..06632e2dfa476 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -10,19 +10,21 @@ import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; -import org.gradle.testfixtures.ProjectBuilder; -import org.gradle.api.Project; +import org.gradle.testkit.runner.UnexpectedBuildFailure; import org.opensearch.gradle.test.GradleUnitTestCase; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import java.io.IOException; -import org.gradle.api.publish.maven.tasks.PublishToMavenRepository; import java.io.File; +import java.io.FileReader; import java.io.FileWriter; +import java.io.IOException; import java.io.Writer; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import static org.gradle.testkit.runner.TaskOutcome.SUCCESS; @@ -30,14 +32,16 @@ import org.apache.maven.model.Model; import org.apache.maven.model.io.xpp3.MavenXpp3Reader; import org.codehaus.plexus.util.xml.pull.XmlPullParserException; -import java.io.FileReader; -import org.gradle.api.tasks.bundling.Zip; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.List; -import java.util.ArrayList; public class PublishTests extends GradleUnitTestCase { private TemporaryFolder projectDir; + private static final String TEMPLATE_RESOURCE_FOLDER = "pluginzip"; + private final String PROJECT_NAME = "sample-plugin"; + private final String ZIP_PUBLISH_TASK = "publishPluginZipPublicationToZipStagingRepository"; @Before public void setUp() throws IOException { @@ -51,155 +55,200 @@ public void tearDown() { } @Test - public void testZipPublish() throws IOException, XmlPullParserException { - String zipPublishTask = "publishPluginZipPublicationToZipStagingRepository"; - prepareProjectForPublishTask(zipPublishTask); - - // Generate the build.gradle file - String buildFileContent = "apply plugin: 'maven-publish' \n" - + "apply plugin: 'java' \n" - + "publishing {\n" - + " repositories {\n" - + " maven {\n" - + " url = 'local-staging-repo/'\n" - + " name = 'zipStaging'\n" - + " }\n" - + " }\n" - + " publications {\n" - + " pluginZip(MavenPublication) {\n" - + " groupId = 'org.opensearch.plugin' \n" - + " artifactId = 'sample-plugin' \n" - + " version = '2.0.0.0' \n" - + " artifact('sample-plugin.zip') \n" - + " }\n" - + " }\n" - + "}"; - writeString(projectDir.newFile("build.gradle"), buildFileContent); - // Execute the task publishPluginZipPublicationToZipStagingRepository - List allArguments = new ArrayList(); - allArguments.add("build"); - allArguments.add(zipPublishTask); - GradleRunner runner = GradleRunner.create(); - runner.forwardOutput(); - runner.withPluginClasspath(); - runner.withArguments(allArguments); - runner.withProjectDir(projectDir.getRoot()); + public void missingGroupValue() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("missingGroupValue.gradle"); + Exception e = assertThrows(UnexpectedBuildFailure.class, runner::build); + assertTrue(e.getMessage().contains("Invalid publication 'pluginZip': groupId cannot be empty.")); + } + + /** + * This would be the most common use case where user declares Maven publication entity with basic info + * and the resulting POM file will use groupId and version values from the Gradle project object. + */ + @Test + public void groupAndVersionValue() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("groupAndVersionValue.gradle"); BuildResult result = runner.build(); - // Check if task publishMavenzipPublicationToZipstagingRepository has ran well - assertEquals(SUCCESS, result.task(":" + zipPublishTask).getOutcome()); - // check if the zip has been published to local staging repo + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // check if both the zip and pom files have been published to local staging repo assertTrue( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.zip") - .exists() + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ).exists() ); - assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); - // Parse the maven file and validate the groupID to org.opensearch.plugin + assertTrue( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.zip" + ) + ).exists() + ); + + // Parse the maven file and validate the groupID MavenXpp3Reader reader = new MavenXpp3Reader(); Model model = reader.read( new FileReader( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.pom") + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) ) ); - assertEquals(model.getGroupId(), "org.opensearch.plugin"); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getGroupId(), "org.custom.group"); + assertEquals(model.getUrl(), "https://github.com/doe/sample-plugin"); } + /** + * In this case the Publication entity is completely missing but still the POM file is generated using the default + * values including the groupId and version values obtained from the Gradle project object. + */ @Test - public void testZipPublishWithPom() throws IOException, XmlPullParserException { - String zipPublishTask = "publishPluginZipPublicationToZipStagingRepository"; - Project project = prepareProjectForPublishTask(zipPublishTask); - - // Generate the build.gradle file - String buildFileContent = "apply plugin: 'maven-publish' \n" - + "apply plugin: 'java' \n" - + "publishing {\n" - + " repositories {\n" - + " maven {\n" - + " url = 'local-staging-repo/'\n" - + " name = 'zipStaging'\n" - + " }\n" - + " }\n" - + " publications {\n" - + " pluginZip(MavenPublication) {\n" - + " groupId = 'org.opensearch.plugin' \n" - + " artifactId = 'sample-plugin' \n" - + " version = '2.0.0.0' \n" - + " artifact('sample-plugin.zip') \n" - + " pom {\n" - + " name = 'sample-plugin'\n" - + " description = 'sample-description'\n" - + " licenses {\n" - + " license {\n" - + " name = \"The Apache License, Version 2.0\"\n" - + " url = \"http://www.apache.org/licenses/LICENSE-2.0.txt\"\n" - + " }\n" - + " }\n" - + " developers {\n" - + " developer {\n" - + " name = 'opensearch'\n" - + " url = 'https://github.com/opensearch-project/OpenSearch'\n" - + " }\n" - + " }\n" - + " url = 'https://github.com/opensearch-project/OpenSearch'\n" - + " scm {\n" - + " url = 'https://github.com/opensearch-project/OpenSearch'\n" - + " }\n" - + " }" - + " }\n" - + " }\n" - + "}"; - writeString(projectDir.newFile("build.gradle"), buildFileContent); - // Execute the task publishPluginZipPublicationToZipStagingRepository - List allArguments = new ArrayList(); - allArguments.add("build"); - allArguments.add(zipPublishTask); - GradleRunner runner = GradleRunner.create(); - runner.forwardOutput(); - runner.withPluginClasspath(); - runner.withArguments(allArguments); - runner.withProjectDir(projectDir.getRoot()); + public void missingPOMEntity() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle"); BuildResult result = runner.build(); - // Check if task publishMavenzipPublicationToZipstagingRepository has ran well - assertEquals(SUCCESS, result.task(":" + zipPublishTask).getOutcome()); - // check if the zip has been published to local staging repo - assertTrue( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.zip") - .exists() + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate it + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) + ) ); + + assertEquals(model.getArtifactId(), PROJECT_NAME); + assertEquals(model.getGroupId(), "org.custom.group"); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getPackaging(), "zip"); + + assertNull(model.getName()); + assertNull(model.getDescription()); + + assertEquals(0, model.getDevelopers().size()); + assertEquals(0, model.getContributors().size()); + assertEquals(0, model.getLicenses().size()); + } + + /** + * In some cases we need the POM groupId value to be different from the Gradle "project.group" value hence we + * allow for groupId customization (it will override whatever the Gradle "project.group" value is). + */ + @Test + public void customizedGroupValue() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("customizedGroupValue.gradle"); + BuildResult result = runner.build(); + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); - // Parse the maven file and validate the groupID to org.opensearch.plugin + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate the groupID MavenXpp3Reader reader = new MavenXpp3Reader(); Model model = reader.read( new FileReader( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.pom") + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "I", + "am", + "customized", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) ) ); - assertEquals(model.getGroupId(), "org.opensearch.plugin"); - assertEquals(model.getUrl(), "https://github.com/opensearch-project/OpenSearch"); + + assertEquals(model.getGroupId(), "I.am.customized"); } - protected Project prepareProjectForPublishTask(String zipPublishTask) throws IOException { - Project project = ProjectBuilder.builder().build(); - - // Apply the opensearch.pluginzip plugin - project.getPluginManager().apply("opensearch.pluginzip"); - // Check if the plugin has been applied to the project - assertTrue(project.getPluginManager().hasPlugin("opensearch.pluginzip")); - // Check if the project has the task from class PublishToMavenRepository after plugin apply - assertNotNull(project.getTasks().withType(PublishToMavenRepository.class)); - // Create a mock bundlePlugin task - Zip task = project.getTasks().create("bundlePlugin", Zip.class); - Publish.configMaven(project); - // Check if the main task publishPluginZipPublicationToZipStagingRepository exists after plugin apply - assertTrue(project.getTasks().getNames().contains(zipPublishTask)); - assertNotNull("Task to generate: ", project.getTasks().getByName(zipPublishTask)); - // Run Gradle functional tests, but calling a build.gradle file, that resembles the plugin publish behavior - - // Create a sample plugin zip file - File sampleZip = new File(projectDir.getRoot(), "sample-plugin.zip"); - Files.createFile(sampleZip.toPath()); - writeString(projectDir.newFile("settings.gradle"), ""); - - return project; + /** + * If the customized groupId value is invalid (from the Maven POM perspective) then we need to be sure it is + * caught and reported properly. + */ + @Test + public void customizedInvalidGroupValue() throws IOException, URISyntaxException { + GradleRunner runner = prepareGradleRunnerFromTemplate("customizedInvalidGroupValue.gradle"); + Exception e = assertThrows(UnexpectedBuildFailure.class, runner::build); + assertTrue( + e.getMessage().contains("Invalid publication 'pluginZip': groupId ( ) is not a valid Maven identifier ([A-Za-z0-9_\\-.]+).") + ); + } + + private GradleRunner prepareGradleRunnerFromTemplate(String templateName) throws IOException, URISyntaxException { + useTemplateFile(projectDir.newFile("build.gradle"), templateName); + prepareGradleFilesAndSources(); + + GradleRunner runner = GradleRunner.create() + .forwardOutput() + .withPluginClasspath() + .withArguments("build", ZIP_PUBLISH_TASK) + .withProjectDir(projectDir.getRoot()); + + return runner; + } + + private void prepareGradleFilesAndSources() throws IOException { + // A dummy "source" file that is processed with bundlePlugin and put into a ZIP artifact file + File bundleFile = new File(projectDir.getRoot(), PROJECT_NAME + "-source.txt"); + Path zipFile = Files.createFile(bundleFile.toPath()); + // Setting a project name via settings.gradle file + writeString(projectDir.newFile("settings.gradle"), "rootProject.name = '" + PROJECT_NAME + "'"); } private void writeString(File file, String string) throws IOException { @@ -208,4 +257,24 @@ private void writeString(File file, String string) throws IOException { } } + /** + * Write the content of the "template" file into the target file. + * The template file must be located in the {@value TEMPLATE_RESOURCE_FOLDER} folder. + * @param targetFile A target file + * @param templateFile A name of the template file located under {@value TEMPLATE_RESOURCE_FOLDER} folder + */ + private void useTemplateFile(File targetFile, String templateFile) throws IOException, URISyntaxException { + + URL resource = getClass().getClassLoader().getResource(String.join(File.separator, TEMPLATE_RESOURCE_FOLDER, templateFile)); + Path resPath = Paths.get(resource.toURI()).toAbsolutePath(); + List lines = Files.readAllLines(resPath, StandardCharsets.UTF_8); + + try (Writer writer = new FileWriter(targetFile)) { + for (String line : lines) { + writer.write(line); + writer.write(System.lineSeparator()); + } + } + } + } diff --git a/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle new file mode 100644 index 0000000000000..1bde3edda2d91 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle @@ -0,0 +1,45 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + groupId = "I.am.customized" + pom { + name = "sample-plugin" + description = "pluginDescription" + licenses { + license { + name = "The Apache License, Version 2.0" + url = "http://www.apache.org/licenses/LICENSE-2.0.txt" + } + } + developers { + developer { + name = "John Doe" + url = "https://github.com/john-doe/" + organization = "Doe.inc" + organizationUrl = "https://doe.inc/" + } + } + url = "https://github.com/doe/sample-plugin" + scm { + url = "https://github.com/doe/sample-plugin" + } + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle new file mode 100644 index 0000000000000..b6deeeb12ca6a --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle @@ -0,0 +1,45 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + groupId = " " // <-- User provides invalid value + pom { + name = "sample-plugin" + description = "pluginDescription" + licenses { + license { + name = "The Apache License, Version 2.0" + url = "http://www.apache.org/licenses/LICENSE-2.0.txt" + } + } + developers { + developer { + name = "John Doe" + url = "https://github.com/john-doe/" + organization = "Doe.inc" + organizationUrl = "https://doe.inc/" + } + } + url = "https://github.com/doe/sample-plugin" + scm { + url = "https://github.com/doe/sample-plugin" + } + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle b/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle new file mode 100644 index 0000000000000..bdab385f6082c --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle @@ -0,0 +1,44 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + pom { + name = "sample-plugin" + description = "pluginDescription" + licenses { + license { + name = "The Apache License, Version 2.0" + url = "http://www.apache.org/licenses/LICENSE-2.0.txt" + } + } + developers { + developer { + name = "John Doe" + url = "https://github.com/john-doe/" + organization = "Doe.inc" + organizationUrl = "https://doe.inc/" + } + } + url = "https://github.com/doe/sample-plugin" + scm { + url = "https://github.com/doe/sample-plugin" + } + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle new file mode 100644 index 0000000000000..602c178ea1a5b --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle @@ -0,0 +1,22 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +//group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle b/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle new file mode 100644 index 0000000000000..2cc67c2e98954 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle @@ -0,0 +1,22 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + } + } +} diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 4af1acfed0ab2..072dcc4578977 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -11,11 +11,11 @@ spatial4j = 0.7 jts = 1.15.0 jackson = 2.13.3 jackson_databind = 2.13.3 -snakeyaml = 1.26 +snakeyaml = 1.31 icu4j = 70.1 supercsv = 2.4.0 log4j = 2.17.1 -slf4j = 1.6.2 +slf4j = 1.7.36 asm = 9.3 # when updating the JNA version, also update the version in buildSrc/build.gradle @@ -26,10 +26,10 @@ joda = 2.10.13 # client dependencies httpclient = 4.5.13 -httpcore = 4.4.12 -httpasyncclient = 4.1.4 +httpcore = 4.4.15 +httpasyncclient = 4.1.5 commonslogging = 1.2 -commonscodec = 1.13 +commonscodec = 1.15 # plugin dependencies aws = 1.12.270 @@ -42,7 +42,7 @@ bouncycastle=1.70 randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 -mockito = 4.6.1 +mockito = 4.7.0 objenesis = 3.2 bytebuddy = 1.12.12 diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 6fa57295f48e4..eedc27d1d2ea7 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -54,6 +54,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -92,6 +94,7 @@ import org.opensearch.index.reindex.ReindexRequest; import org.opensearch.index.reindex.UpdateByQueryRequest; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.rest.action.search.RestCreatePitAction; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.script.mustache.MultiSearchTemplateRequest; import org.opensearch.script.mustache.SearchTemplateRequest; @@ -433,9 +436,19 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.withRouting(searchRequest.routing()); params.withPreference(searchRequest.preference()); - params.withIndicesOptions(searchRequest.indicesOptions()); + if (searchRequest.pointInTimeBuilder() == null) { + params.withIndicesOptions(searchRequest.indicesOptions()); + } params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + /** + * Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - + * refer to org.opensearch.action.search.SearchResponseMerger + */ + if (searchRequest.pointInTimeBuilder() != null) { + params.putParam("ccs_minimize_roundtrips", "false"); + } else { + params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + } if (searchRequest.getPreFilterShardSize() != null) { params.putParam("pre_filter_shard_size", Integer.toString(searchRequest.getPreFilterShardSize())); } @@ -464,6 +477,27 @@ static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOExcep return request; } + static Request createPit(CreatePitRequest createPitRequest) throws IOException { + Params params = new Params(); + params.putParam(RestCreatePitAction.ALLOW_PARTIAL_PIT_CREATION, Boolean.toString(createPitRequest.shouldAllowPartialPitCreation())); + params.putParam(RestCreatePitAction.KEEP_ALIVE, createPitRequest.getKeepAlive()); + params.withIndicesOptions(createPitRequest.indicesOptions()); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(createPitRequest.indices(), "_search/point_in_time")); + request.addParameters(params.asMap()); + request.setEntity(createEntity(createPitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deletePit(DeletePitRequest deletePitRequest) throws IOException { + Request request = new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time"); + request.setEntity(createEntity(deletePitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteAllPits() { + return new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time/_all"); + } + static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_msearch"); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 7ae8f8826c5a4..0c73c65f6175f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -59,6 +59,10 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -108,10 +112,6 @@ import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.opensearch.search.aggregations.bucket.filter.ParsedFilter; import org.opensearch.search.aggregations.bucket.filter.ParsedFilters; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; -import org.opensearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.global.ParsedGlobal; import org.opensearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -1254,6 +1254,120 @@ public final Cancellable scrollAsync( ); } + /** + * Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final CreatePitResponse createPit(CreatePitRequest createPitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable createPitAsync( + CreatePitRequest createPitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete point in time searches using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deletePit(DeletePitRequest deletePitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete point in time searches using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deletePitAsync( + DeletePitRequest deletePitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete all point in time searches using delete all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deleteAllPits(RequestOptions options) throws IOException { + return performRequestAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.deleteAllPits(), + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete all point in time searches using delete all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deleteAllPitsAsync(RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.deleteAllPits(), + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + /** * Clears one or more scroll ids using the Clear Scroll API. * @@ -2130,8 +2244,6 @@ static List getDefaultNamedXContents() { map.put(GlobalAggregationBuilder.NAME, (p, c) -> ParsedGlobal.fromXContent(p, (String) c)); map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); - map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); - map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java new file mode 100644 index 0000000000000..395ec6e46a7b3 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.junit.Before; +import org.opensearch.OpenSearchStatusException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Tests point in time API with rest high level client + */ +public class PitIT extends OpenSearchRestHighLevelClientTestCase { + + @Before + public void indexDocuments() throws IOException { + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index/_doc/1"); + doc1.setJsonEntity("{\"type\":\"type1\", \"id\":1, \"num\":10, \"num2\":50}"); + client().performRequest(doc1); + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index/_doc/2"); + doc2.setJsonEntity("{\"type\":\"type1\", \"id\":2, \"num\":20, \"num2\":40}"); + client().performRequest(doc2); + Request doc3 = new Request(HttpPut.METHOD_NAME, "/index/_doc/3"); + doc3.setJsonEntity("{\"type\":\"type1\", \"id\":3, \"num\":50, \"num2\":35}"); + client().performRequest(doc3); + Request doc4 = new Request(HttpPut.METHOD_NAME, "/index/_doc/4"); + doc4.setJsonEntity("{\"type\":\"type2\", \"id\":4, \"num\":100, \"num2\":10}"); + client().performRequest(doc4); + Request doc5 = new Request(HttpPut.METHOD_NAME, "/index/_doc/5"); + doc5.setJsonEntity("{\"type\":\"type2\", \"id\":5, \"num\":100, \"num2\":10}"); + client().performRequest(doc5); + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + public void testCreateAndDeletePit() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertEquals(1, pitResponse.getTotalShards()); + assertEquals(1, pitResponse.getSuccessfulShards()); + assertEquals(0, pitResponse.getFailedShards()); + assertEquals(0, pitResponse.getSkippedShards()); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute(deletePitRequest, highLevelClient()::deletePit, highLevelClient()::deletePitAsync); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); + } + + public void testDeleteAllPits() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + DeletePitResponse deletePitResponse = highLevelClient().deleteAllPits(RequestOptions.DEFAULT); + for (DeletePitInfo deletePitInfo : deletePitResponse.getDeletePitResults()) { + assertTrue(deletePitInfo.isSuccessful()); + } + pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + ActionListener deletePitListener = new ActionListener<>() { + @Override + public void onResponse(DeletePitResponse response) { + for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { + assertTrue(deletePitInfo.isSuccessful()); + } + } + + @Override + public void onFailure(Exception e) { + if (!(e instanceof OpenSearchStatusException)) { + throw new AssertionError("Delete all failed"); + } + } + }; + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + // validate no pits case + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + } +} diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 97c0f2f475826..ee5795deb165d 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -53,6 +53,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -131,6 +133,7 @@ import java.util.Locale; import java.util.Map; import java.util.StringJoiner; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -1303,6 +1306,47 @@ public void testClearScroll() throws IOException { assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } + public void testCreatePit() throws IOException { + String[] indices = randomIndicesNames(0, 5); + Map expectedParams = new HashMap<>(); + expectedParams.put("keep_alive", "1d"); + expectedParams.put("allow_partial_pit_creation", "true"); + CreatePitRequest createPitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, indices); + setRandomIndicesOptions(createPitRequest::indicesOptions, createPitRequest::indicesOptions, expectedParams); + Request request = RequestConverters.createPit(createPitRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_search/point_in_time"); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(createPitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeletePit() throws IOException { + List pitIdsList = new ArrayList<>(); + pitIdsList.add("pitId1"); + pitIdsList.add("pitId2"); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIdsList); + Request request = RequestConverters.deletePit(deletePitRequest); + String endpoint = "/_search/point_in_time"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + assertToXContentBody(deletePitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeleteAllPits() { + Request request = RequestConverters.deleteAllPits(); + String endpoint = "/_search/point_in_time/_all"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + } + public void testSearchTemplate() throws Exception { // Create a random request. String[] indices = randomIndicesNames(0, 5); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 3da0f81023f72..1384e99019793 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -134,6 +134,7 @@ public class RestHighLevelClientTests extends OpenSearchTestCase { // core "ping", "info", + "delete_all_pits", // security "security.get_ssl_certificates", "security.authenticate", @@ -886,7 +887,9 @@ public void testApiNamingConventions() throws Exception { "nodes.usage", "nodes.reload_secure_settings", "search_shards", - "remote_store.restore", }; + "remote_store.restore", + "cluster.put_decommission_awareness", + "cluster.get_decommission_awareness", }; List booleanReturnMethods = Arrays.asList("security.enable_user", "security.disable_user", "security.change_password"); Set deprecatedMethods = new HashSet<>(); deprecatedMethods.add("indices.force_merge"); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 19e287fb91be5..8b509e5d19e92 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -43,6 +43,10 @@ import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -89,6 +93,7 @@ import org.opensearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.opensearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.FetchSourceContext; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; @@ -100,11 +105,13 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -762,6 +769,46 @@ public void testSearchScroll() throws Exception { } } + public void testSearchWithPit() throws Exception { + for (int i = 0; i < 100; i++) { + XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject(); + Request doc = new Request(HttpPut.METHOD_NAME, "/test/_doc/" + Integer.toString(i)); + doc.setJsonEntity(Strings.toString(builder)); + client().performRequest(doc); + } + client().performRequest(new Request(HttpPost.METHOD_NAME, "/test/_refresh")); + + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "test"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35) + .sort("field", SortOrder.ASC) + .pointInTimeBuilder(new PointInTimeBuilder(pitResponse.getId())); + SearchRequest searchRequest = new SearchRequest().source(searchSourceBuilder); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + + try { + long counter = 0; + assertSearchHeader(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(35)); + for (SearchHit hit : searchResponse.getHits()) { + assertThat(((Number) hit.getSortValues()[0]).longValue(), equalTo(counter++)); + } + } finally { + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute( + deletePitRequest, + highLevelClient()::deletePit, + highLevelClient()::deletePitAsync + ); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); + } + } + public void testMultiSearch() throws Exception { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); SearchRequest searchRequest1 = new SearchRequest("index1"); diff --git a/client/rest/licenses/commons-codec-1.13.jar.sha1 b/client/rest/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/client/rest/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/client/rest/licenses/commons-codec-1.15.jar.sha1 b/client/rest/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/client/rest/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 deleted file mode 100644 index 8360ab45c7ab3..0000000000000 --- a/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3a3240681faae3fa46b573a4c7e50cec9db0d86 \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 new file mode 100644 index 0000000000000..366a9e31069a6 --- /dev/null +++ b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 @@ -0,0 +1 @@ +cd18227f1eb8e9a263286c1d7362ceb24f6f9b32 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-4.4.12.jar.sha1 b/client/rest/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/client/rest/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/client/rest/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 deleted file mode 100644 index 4de932dc5aca0..0000000000000 --- a/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84cd29eca842f31db02987cfedea245af020198b \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..251b35ab6a1a5 --- /dev/null +++ b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 @@ -0,0 +1 @@ +85d2b6825d42db909a1474f0ffbd6328429b7a32 \ No newline at end of file diff --git a/client/sniffer/licenses/commons-codec-1.13.jar.sha1 b/client/sniffer/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/client/sniffer/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/client/sniffer/licenses/commons-codec-1.15.jar.sha1 b/client/sniffer/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/client/sniffer/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index ac70ee04444c7..1376b8d419f6e 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -151,6 +151,13 @@ distribution_archives { } } + linuxS390xTar { + archiveClassifier = 'linux-s390x' + content { + archiveFiles(modulesFiles('linux-s390x'), 'tar', 'linux', 's390x', false) + } + } + windowsZip { archiveClassifier = 'windows-x64' content { diff --git a/distribution/build.gradle b/distribution/build.gradle index 21b7d85a7ef2b..ee9016210efc7 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -280,7 +280,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { // Setup all required JDKs project.jdks { ['darwin', 'linux', 'windows'].each { platform -> - (platform == 'linux' || platform == 'darwin' ? ['x64', 'aarch64'] : ['x64']).each { architecture -> + (platform == 'linux' || platform == 'darwin' ? ['x64', 'aarch64', 's390x'] : ['x64']).each { architecture -> "bundled_${platform}_${architecture}" { it.platform = platform it.version = VersionProperties.getBundledJdk(platform) @@ -353,7 +353,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } def buildModules = buildModulesTaskProvider - List excludePlatforms = ['darwin-x64', 'freebsd-x64', 'linux-x64', 'linux-arm64', 'windows-x64', 'darwin-arm64'] + List excludePlatforms = ['darwin-x64', 'freebsd-x64', 'linux-x64', 'linux-arm64', 'linux-s390x', 'windows-x64', 'darwin-arm64'] if (platform != null) { excludePlatforms.remove(excludePlatforms.indexOf(platform)) } else { diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index f5d8048a06276..7e0007f04c940 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -27,11 +27,13 @@ testFixtures.useFixture() configurations { arm64DockerSource + s390xDockerSource dockerSource } dependencies { arm64DockerSource project(path: ":distribution:archives:linux-arm64-tar", configuration:"default") + s390xDockerSource project(path: ":distribution:archives:linux-s390x-tar", configuration:"default") dockerSource project(path: ":distribution:archives:linux-tar", configuration:"default") } @@ -42,6 +44,8 @@ ext.expansions = { Architecture architecture, DockerBase base, boolean local -> classifier = "linux-arm64" } else if (architecture == Architecture.X64) { classifier = "linux-x64" + } else if (architecture == Architecture.S390X) { + classifier = "linux-s390x" } else { throw new IllegalArgumentException("Unsupported architecture [" + architecture + "]") } @@ -85,12 +89,14 @@ RUN curl --retry 8 -S -L \\ private static String buildPath(Architecture architecture, DockerBase base) { return 'build/' + (architecture == Architecture.ARM64 ? 'arm64-' : '') + + (architecture == Architecture.S390X ? 's390x-' : '') + 'docker' } private static String taskName(String prefix, Architecture architecture, DockerBase base, String suffix) { return prefix + (architecture == Architecture.ARM64 ? 'Arm64' : '') + + (architecture == Architecture.S390X ? 'S390x' : '') + suffix } @@ -127,6 +133,8 @@ void addCopyDockerContextTask(Architecture architecture, DockerBase base) { if (architecture == Architecture.ARM64) { from configurations.arm64DockerSource + } else if (architecture == Architecture.S390X) { + from configurations.s390xDockerSource } else { from configurations.dockerSource } diff --git a/distribution/docker/docker-s390x-export/build.gradle b/distribution/docker/docker-s390x-export/build.gradle new file mode 100644 index 0000000000000..3506c4e39c234 --- /dev/null +++ b/distribution/docker/docker-s390x-export/build.gradle @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// export is done in the parent project. diff --git a/distribution/src/bin/opensearch-cli.bat b/distribution/src/bin/opensearch-cli.bat index 734669e1f9349..f080346a4478a 100644 --- a/distribution/src/bin/opensearch-cli.bat +++ b/distribution/src/bin/opensearch-cli.bat @@ -16,7 +16,7 @@ rem use a small heap size for the CLI tools, and thus the serial collector to rem avoid stealing many CPU cycles; a user can override by setting OPENSEARCH_JAVA_OPTS set OPENSEARCH_JAVA_OPTS=-Xms4m -Xmx64m -XX:+UseSerialGC %OPENSEARCH_JAVA_OPTS% -%JAVA% ^ +"%JAVA%" ^ %OPENSEARCH_JAVA_OPTS% ^ -Dopensearch.path.home="%OPENSEARCH_HOME%" ^ -Dopensearch.path.conf="%OPENSEARCH_PATH_CONF%" ^ diff --git a/distribution/src/bin/opensearch-env.bat b/distribution/src/bin/opensearch-env.bat index 96770f72f35c8..95088aaee7d3d 100644 --- a/distribution/src/bin/opensearch-env.bat +++ b/distribution/src/bin/opensearch-env.bat @@ -43,14 +43,14 @@ rem comparing to empty string makes this equivalent to bash -v check on env var rem and allows to effectively force use of the bundled jdk when launching OpenSearch rem by setting OPENSEARCH_JAVA_HOME= and JAVA_HOME= if not "%OPENSEARCH_JAVA_HOME%" == "" ( - set JAVA="%OPENSEARCH_JAVA_HOME%\bin\java.exe" + set "JAVA=%OPENSEARCH_JAVA_HOME%\bin\java.exe" set JAVA_TYPE=OPENSEARCH_JAVA_HOME ) else if not "%JAVA_HOME%" == "" ( - set JAVA="%JAVA_HOME%\bin\java.exe" + set "JAVA=%JAVA_HOME%\bin\java.exe" set JAVA_TYPE=JAVA_HOME ) else ( - set JAVA="%OPENSEARCH_HOME%\jdk\bin\java.exe" - set JAVA_HOME="%OPENSEARCH_HOME%\jdk" + set "JAVA=%OPENSEARCH_HOME%\jdk\bin\java.exe" + set "JAVA_HOME=%OPENSEARCH_HOME%\jdk" set JAVA_TYPE=bundled jdk ) @@ -73,4 +73,4 @@ if defined JAVA_OPTS ( ) rem check the Java version -%JAVA% -cp "%OPENSEARCH_CLASSPATH%" "org.opensearch.tools.java_version_checker.JavaVersionChecker" || exit /b 1 +"%JAVA%" -cp "%OPENSEARCH_CLASSPATH%" "org.opensearch.tools.java_version_checker.JavaVersionChecker" || exit /b 1 diff --git a/distribution/src/bin/opensearch-service.bat b/distribution/src/bin/opensearch-service.bat index 4dd8356340d10..c1f3f264ec4a0 100644 --- a/distribution/src/bin/opensearch-service.bat +++ b/distribution/src/bin/opensearch-service.bat @@ -8,6 +8,10 @@ if /i "%1" == "install" set NOJAVA= call "%~dp0opensearch-env.bat" %NOJAVA% || exit /b 1 +rem opensearch-service-x64.exe is based off of the Apache Commons Daemon procrun service application. +rem Run "opensearch-service-x64.exe version" for version information. +rem Run "opensearch-service-x64.exe help" for command options. +rem See https://commons.apache.org/proper/commons-daemon/procrun.html for more information. set EXECUTABLE=%OPENSEARCH_HOME%\bin\opensearch-service-x64.exe if "%SERVICE_ID%" == "" set SERVICE_ID=opensearch-service-x64 set ARCH=64-bit @@ -20,6 +24,10 @@ exit /B 1 set OPENSEARCH_VERSION=${project.version} if "%SERVICE_LOG_DIR%" == "" set SERVICE_LOG_DIR=%OPENSEARCH_HOME%\logs +rem The logs directory must exist for the service to start. +if not exist "%SERVICE_LOG_DIR%" ( + mkdir "%SERVICE_LOG_DIR%" +) if "x%1x" == "xx" goto displayUsage set SERVICE_CMD=%1 @@ -45,7 +53,8 @@ echo Usage: opensearch-service.bat install^|remove^|start^|stop^|manager [SERVIC goto:eof :doStart -"%EXECUTABLE%" //OPENSEARCH//%SERVICE_ID% %LOG_OPTS% +rem //ES == Execute Service +"%EXECUTABLE%" //ES//%SERVICE_ID% %LOG_OPTS% if not errorlevel 1 goto started echo Failed starting '%SERVICE_ID%' service exit /B 1 @@ -55,6 +64,7 @@ echo The service '%SERVICE_ID%' has been started goto:eof :doStop +rem //SS == Stop Service "%EXECUTABLE%" //SS//%SERVICE_ID% %LOG_OPTS% if not errorlevel 1 goto stopped echo Failed stopping '%SERVICE_ID%' service @@ -65,8 +75,11 @@ echo The service '%SERVICE_ID%' has been stopped goto:eof :doManagment +rem opensearch-service-mgr.exe is based off of the Apache Commons Daemon procrun monitor application. +rem See https://commons.apache.org/proper/commons-daemon/procrun.html for more information. set EXECUTABLE_MGR=%OPENSEARCH_HOME%\bin\opensearch-service-mgr -"%EXECUTABLE_MGR%" //OPENSEARCH//%SERVICE_ID% +rem //ES == Edit Service +"%EXECUTABLE_MGR%" //ES//%SERVICE_ID% if not errorlevel 1 goto managed echo Failed starting service manager for '%SERVICE_ID%' exit /B 1 @@ -77,6 +90,7 @@ goto:eof :doRemove rem Remove the service +rem //DS == Delete Service "%EXECUTABLE%" //DS//%SERVICE_ID% %LOG_OPTS% if not errorlevel 1 goto removed echo Failed removing '%SERVICE_ID%' service @@ -107,7 +121,7 @@ if exist "%JAVA_HOME%\bin\server\jvm.dll" ( :foundJVM if not defined OPENSEARCH_TMPDIR ( - for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.TempDirectory"`) do set OPENSEARCH_TMPDIR=%%a + for /f "tokens=* usebackq" %%a in (`CALL "%JAVA%" -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.TempDirectory"`) do set OPENSEARCH_TMPDIR=%%a ) rem The JVM options parser produces the final JVM options to start @@ -121,7 +135,7 @@ rem - third, JVM options from OPENSEARCH_JAVA_OPTS are applied rem - fourth, ergonomic JVM options are applied @setlocal -for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.JvmOptionsParser" "!OPENSEARCH_PATH_CONF!" ^|^| echo jvm_options_parser_failed`) do set OPENSEARCH_JAVA_OPTS=%%a +for /F "usebackq delims=" %%a in (`CALL "%JAVA%" -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.JvmOptionsParser" "!OPENSEARCH_PATH_CONF!" ^|^| echo jvm_options_parser_failed`) do set OPENSEARCH_JAVA_OPTS=%%a @endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%OPENSEARCH_JAVA_OPTS%" & set OPENSEARCH_JAVA_OPTS=%OPENSEARCH_JAVA_OPTS% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( @@ -207,6 +221,7 @@ if not "%SERVICE_USERNAME%" == "" ( set SERVICE_PARAMS=%SERVICE_PARAMS% --ServiceUser "%SERVICE_USERNAME%" --ServicePassword "%SERVICE_PASSWORD%" ) ) +rem //IS == Install Service "%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %OPENSEARCH_START_TYPE% --StopTimeout %OPENSEARCH_STOP_TIMEOUT% --StartClass org.opensearch.bootstrap.OpenSearch --StartMethod main ++StartParams --quiet --StopClass org.opensearch.bootstrap.OpenSearch --StopMethod close --Classpath "%OPENSEARCH_CLASSPATH%" --JvmMs %JVM_MS% --JvmMx %JVM_MX% --JvmSs %JVM_SS% --JvmOptions %OTHER_JAVA_OPTS% ++JvmOptions %OPENSEARCH_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "%SERVICE_DISPLAY_NAME%" --Description "%SERVICE_DESCRIPTION%" --Jvm "%JAVA_HOME%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%OPENSEARCH_HOME%" %SERVICE_PARAMS% ++Environment HOSTNAME="%%COMPUTERNAME%%" if not errorlevel 1 goto installed diff --git a/distribution/src/bin/opensearch.bat b/distribution/src/bin/opensearch.bat index 49a12aa5c968d..cce21504c55b7 100644 --- a/distribution/src/bin/opensearch.bat +++ b/distribution/src/bin/opensearch.bat @@ -56,6 +56,12 @@ IF ERRORLEVEL 1 ( EXIT /B %ERRORLEVEL% ) +if "%SERVICE_LOG_DIR%" == "" set SERVICE_LOG_DIR=%OPENSEARCH_HOME%\logs +rem The logs directory must exist for the service to start. +if not exist "%SERVICE_LOG_DIR%" ( + mkdir "%SERVICE_LOG_DIR%" +) + SET KEYSTORE_PASSWORD= IF "%checkpassword%"=="Y" ( CALL "%~dp0opensearch-keystore.bat" has-passwd --silent @@ -69,7 +75,7 @@ IF "%checkpassword%"=="Y" ( ) if not defined OPENSEARCH_TMPDIR ( - for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.TempDirectory"`) do set OPENSEARCH_TMPDIR=%%a + for /f "tokens=* usebackq" %%a in (`CALL "%JAVA%" -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.TempDirectory"`) do set OPENSEARCH_TMPDIR=%%a ) rem The JVM options parser produces the final JVM options to start @@ -82,7 +88,7 @@ rem jvm.options.d/*.options rem - third, JVM options from OPENSEARCH_JAVA_OPTS are applied rem - fourth, ergonomic JVM options are applied @setlocal -for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.JvmOptionsParser" "!OPENSEARCH_PATH_CONF!" ^|^| echo jvm_options_parser_failed`) do set OPENSEARCH_JAVA_OPTS=%%a +for /F "usebackq delims=" %%a in (`CALL "%JAVA%" -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.JvmOptionsParser" "!OPENSEARCH_PATH_CONF!" ^|^| echo jvm_options_parser_failed`) do set OPENSEARCH_JAVA_OPTS=%%a @endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%OPENSEARCH_JAVA_OPTS%" & set OPENSEARCH_JAVA_OPTS=%OPENSEARCH_JAVA_OPTS% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( @@ -97,7 +103,7 @@ SET KEYSTORE_PASSWORD=!KEYSTORE_PASSWORD:^<=^^^=^^^>! SET KEYSTORE_PASSWORD=!KEYSTORE_PASSWORD:^\=^^^\! -ECHO.!KEYSTORE_PASSWORD!| %JAVA% %OPENSEARCH_JAVA_OPTS% -Dopensearch ^ +ECHO.!KEYSTORE_PASSWORD!| "%JAVA%" %OPENSEARCH_JAVA_OPTS% -Dopensearch ^ -Dopensearch.path.home="%OPENSEARCH_HOME%" -Dopensearch.path.conf="%OPENSEARCH_PATH_CONF%" ^ -Dopensearch.distribution.type="%OPENSEARCH_DISTRIBUTION_TYPE%" ^ -Dopensearch.bundled_jdk="%OPENSEARCH_BUNDLED_JDK%" ^ diff --git a/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 deleted file mode 100644 index fde3aba8edad0..0000000000000 --- a/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a78a8747147d2c5807683e76ec2b633e95c14fe9 \ No newline at end of file diff --git a/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 new file mode 100644 index 0000000000000..1ac9b78b88687 --- /dev/null +++ b/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 @@ -0,0 +1 @@ +cf26b7b05fef01e7bec00cb88ab4feeeba743e12 \ No newline at end of file diff --git a/modules/geo/build.gradle b/modules/geo/build.gradle index 0b8e623c24ac6..7f687a414e566 100644 --- a/modules/geo/build.gradle +++ b/modules/geo/build.gradle @@ -37,9 +37,16 @@ opensearchplugin { restResources { restApi { - includeCore '_common', 'indices', 'index', 'search' + includeCore '_common', 'indices', 'index', 'search', 'bulk' } } artifacts { restTests(project.file('src/yamlRestTest/resources/rest-api-spec/test')) } +/** + * These compiler arguments needs to be removed, as there are raw types being used in the GeoGrid and GeoTile aggregations. + */ +tasks.withType(JavaCompile).configureEach { + options.compilerArgs -= '-Xlint:rawtypes' + options.compilerArgs -= '-Xlint:unchecked' +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoHashGridIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java similarity index 89% rename from server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoHashGridIT.java rename to modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java index 56d918feef9d8..6ab7dd5254679 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket; +package org.opensearch.geo.search.aggregations.bucket; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; @@ -41,12 +41,12 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.geo.GeoModulePluginIntegTestCase; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; +import org.opensearch.geo.tests.common.AggregationBuilders; import org.opensearch.index.query.GeoBoundingBoxQueryBuilder; -import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.filter.Filter; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGrid; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGrid.Bucket; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; @@ -57,17 +57,16 @@ import java.util.Random; import java.util.Set; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.geometry.utils.Geohash.PRECISION; import static org.opensearch.geometry.utils.Geohash.stringEncode; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.aggregations.AggregationBuilders.geohashGrid; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoHashGridIT extends OpenSearchIntegTestCase { +public class GeoHashGridIT extends GeoModulePluginIntegTestCase { @Override protected boolean forbidPrivateIndexSettings() { @@ -158,13 +157,13 @@ public void setupSuiteScopeCluster() throws Exception { public void testSimple() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").precision(precision)) .get(); assertSearchResponse(response); GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - List buckets = geoGrid.getBuckets(); + List buckets = geoGrid.getBuckets(); Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoGrid).getProperty("_key"); Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoGrid).getProperty("_count"); for (int i = 0; i < buckets.size(); i++) { @@ -185,7 +184,7 @@ public void testSimple() throws Exception { public void testMultivalued() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("multi_valued_idx") - .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").precision(precision)) .get(); assertSearchResponse(response); @@ -208,8 +207,8 @@ public void testFiltered() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") .addAggregation( - AggregationBuilders.filter("filtered", bbox) - .subAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + org.opensearch.search.aggregations.AggregationBuilders.filter("filtered", bbox) + .subAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").precision(precision)) ) .get(); @@ -233,7 +232,7 @@ public void testFiltered() throws Exception { public void testUnmapped() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").precision(precision)) .get(); assertSearchResponse(response); @@ -247,7 +246,7 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").precision(precision)) .get(); assertSearchResponse(response); @@ -267,7 +266,9 @@ public void testPartiallyUnmapped() throws Exception { public void testTopMatch() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid").field("location").size(1).shardSize(100).precision(precision)) + .addAggregation( + AggregationBuilders.geohashGrid("geohashgrid").field("location").size(1).shardSize(100).precision(precision) + ) .get(); assertSearchResponse(response); @@ -296,7 +297,7 @@ public void testSizeIsZero() { IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) .get() ); assertThat(exception.getMessage(), containsString("[size] must be greater than 0. Found [0] in [geohashgrid]")); @@ -308,7 +309,7 @@ public void testShardSizeIsZero() { IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) .get() ); assertThat(exception.getMessage(), containsString("[shardSize] must be greater than 0. Found [0] in [geohashgrid]")); diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java new file mode 100644 index 0000000000000..5b4dd052a2f65 --- /dev/null +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.bucket; + +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.geo.GeoModulePluginIntegTestCase; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; +import org.opensearch.geo.tests.common.AggregationBuilders; +import org.opensearch.geometry.utils.Geohash; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.histogram.Histogram; +import org.opensearch.test.OpenSearchIntegTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +/** + * Tests making sure that the reduce is propagated to all aggregations in the hierarchy when executing on a single shard + * These tests are based on the date histogram in combination of min_doc_count=0. In order for the date histogram to + * compute empty buckets, its {@code reduce()} method must be called. So by adding the date histogram under other buckets, + * we can make sure that the reduce is properly propagated by checking that empty buckets were created. + */ +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class ShardReduceIT extends GeoModulePluginIntegTestCase { + + private IndexRequestBuilder indexDoc(String date, int value) throws Exception { + return client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field("value", value) + .field("ip", "10.0.0." + value) + .field("location", Geohash.stringEncode(5, 52, Geohash.PRECISION)) + .field("date", date) + .field("term-l", 1) + .field("term-d", 1.5) + .field("term-s", "term") + .startObject("nested") + .field("date", date) + .endObject() + .endObject() + ); + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + assertAcked( + prepareCreate("idx").setMapping( + "nested", + "type=nested", + "ip", + "type=ip", + "location", + "type=geo_point", + "term-s", + "type=keyword" + ) + ); + + indexRandom(true, indexDoc("2014-01-01", 1), indexDoc("2014-01-02", 2), indexDoc("2014-01-04", 3)); + ensureSearchable(); + } + + public void testGeoHashGrid() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + AggregationBuilders.geohashGrid("grid") + .field("location") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); + + assertSearchResponse(response); + + GeoGrid grid = response.getAggregations().get("grid"); + Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + + public void testGeoTileGrid() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + AggregationBuilders.geotileGrid("grid") + .field("location") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); + + assertSearchResponse(response); + + GeoGrid grid = response.getAggregations().get("grid"); + Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } +} diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java similarity index 99% rename from modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java rename to modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java index 0065cca7d6101..92987d407f51d 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java @@ -42,7 +42,7 @@ * to copy the code as we cannot depend on this class. * GitHub issue */ -public abstract class AbstractGeoAggregatorTestCaseModulePlugin extends GeoModulePluginIntegTestCase { +public abstract class AbstractGeoAggregatorModulePluginTestCase extends GeoModulePluginIntegTestCase { protected static final String SINGLE_VALUED_FIELD_NAME = "geo_value"; protected static final String MULTI_VALUED_FIELD_NAME = "geo_values"; diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java similarity index 99% rename from modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java rename to modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java index 5cbd98a4936e4..8cc82da12d69a 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java @@ -57,7 +57,7 @@ import static org.opensearch.geo.tests.common.AggregationBuilders.geoBounds; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoBoundsIT extends AbstractGeoAggregatorTestCaseModulePlugin { +public class GeoBoundsITTestCase extends AbstractGeoAggregatorModulePluginTestCase { private static final String aggName = "geoBounds"; public void testSingleValuedField() throws Exception { diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java new file mode 100644 index 0000000000000..e6d45e27b8f70 --- /dev/null +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.geo.search.aggregations.metrics; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.geo.GeoPoint; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; +import org.opensearch.geo.tests.common.AggregationBuilders; +import org.opensearch.search.aggregations.metrics.GeoCentroid; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class GeoCentroidITTestCase extends AbstractGeoAggregatorModulePluginTestCase { + private static final String aggName = "geoCentroid"; + + public void testSingleValueFieldAsSubAggToGeohashGrid() throws Exception { + SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) + .addAggregation( + AggregationBuilders.geohashGrid("geoGrid") + .field(SINGLE_VALUED_FIELD_NAME) + .subAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); + assertSearchResponse(response); + + GeoGrid grid = response.getAggregations().get("geoGrid"); + assertThat(grid, notNullValue()); + assertThat(grid.getName(), equalTo("geoGrid")); + List buckets = grid.getBuckets(); + for (GeoGrid.Bucket cell : buckets) { + String geohash = cell.getKeyAsString(); + GeoPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash); + GeoCentroid centroidAgg = cell.getAggregations().get(aggName); + assertThat( + "Geohash " + geohash + " has wrong centroid latitude ", + expectedCentroid.lat(), + closeTo(centroidAgg.centroid().lat(), GEOHASH_TOLERANCE) + ); + assertThat( + "Geohash " + geohash + " has wrong centroid longitude", + expectedCentroid.lon(), + closeTo(centroidAgg.centroid().lon(), GEOHASH_TOLERANCE) + ); + } + } +} diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index 64aac66b7eef3..25dcf8db2c407 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -32,6 +32,12 @@ package org.opensearch.geo; +import org.opensearch.geo.search.aggregations.bucket.composite.GeoTileGridValuesSourceBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; +import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; @@ -40,6 +46,7 @@ import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SearchPlugin; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregation; import java.util.Collections; import java.util.List; @@ -57,11 +64,42 @@ public Map getMappers() { */ @Override public List getAggregations() { - final AggregationSpec spec = new AggregationSpec( + final AggregationSpec geoBounds = new AggregationSpec( GeoBoundsAggregationBuilder.NAME, GeoBoundsAggregationBuilder::new, GeoBoundsAggregationBuilder.PARSER ).addResultReader(InternalGeoBounds::new).setAggregatorRegistrar(GeoBoundsAggregationBuilder::registerAggregators); - return Collections.singletonList(spec); + + final AggregationSpec geoHashGrid = new AggregationSpec( + GeoHashGridAggregationBuilder.NAME, + GeoHashGridAggregationBuilder::new, + GeoHashGridAggregationBuilder.PARSER + ).addResultReader(InternalGeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); + + final AggregationSpec geoTileGrid = new AggregationSpec( + GeoTileGridAggregationBuilder.NAME, + GeoTileGridAggregationBuilder::new, + GeoTileGridAggregationBuilder.PARSER + ).addResultReader(InternalGeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); + return List.of(geoBounds, geoHashGrid, geoTileGrid); + } + + /** + * Registering the {@link GeoTileGridAggregator} in the {@link CompositeAggregation}. + * + * @return a {@link List} of {@link CompositeAggregationSpec} + */ + @Override + public List getCompositeAggregations() { + return Collections.singletonList( + new CompositeAggregationSpec( + GeoTileGridValuesSourceBuilder::register, + GeoTileGridValuesSourceBuilder.class, + GeoTileGridValuesSourceBuilder.COMPOSITE_AGGREGATION_SERIALISATION_BYTE_CODE, + GeoTileGridValuesSourceBuilder::new, + GeoTileGridValuesSourceBuilder::parse, + GeoTileGridValuesSourceBuilder.TYPE + ) + ); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java similarity index 87% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java index 4b01a08d29a43..84d5943da287f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.composite; +package org.opensearch.geo.search.aggregations.bucket.composite; import org.apache.lucene.index.IndexReader; import org.opensearch.LegacyESVersion; @@ -43,12 +43,15 @@ import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.geo.search.aggregations.bucket.geogrid.CellIdSource; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; -import org.opensearch.search.aggregations.bucket.geogrid.CellIdSource; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceConfig; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceParserHelper; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSource; @@ -88,13 +91,19 @@ CompositeValuesSourceConfig apply( ); } - static final String TYPE = "geotile_grid"; + public static final String TYPE = "geotile_grid"; + /* + use the TYPE parameter instead of Byte code. The byte code is added for backward compatibility and will be + removed in the next version. + */ + @Deprecated + public static final Byte COMPOSITE_AGGREGATION_SERIALISATION_BYTE_CODE = 3; static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey( TYPE, GeoTileCompositeSuppier.class ); - private static final ObjectParser PARSER; + static final ObjectParser PARSER; static { PARSER = new ObjectParser<>(GeoTileGridValuesSourceBuilder.TYPE); PARSER.declareInt(GeoTileGridValuesSourceBuilder::precision, new ParseField("precision")); @@ -106,11 +115,11 @@ CompositeValuesSourceConfig apply( CompositeValuesSourceParserHelper.declareValuesSourceFields(PARSER); } - static GeoTileGridValuesSourceBuilder parse(String name, XContentParser parser) throws IOException { + public static GeoTileGridValuesSourceBuilder parse(String name, XContentParser parser) throws IOException { return PARSER.parse(parser, new GeoTileGridValuesSourceBuilder(name), null); } - static void register(ValuesSourceRegistry.Builder builder) { + public static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, @@ -163,7 +172,7 @@ static void register(ValuesSourceRegistry.Builder builder) { super(name); } - GeoTileGridValuesSourceBuilder(StreamInput in) throws IOException { + public GeoTileGridValuesSourceBuilder(StreamInput in) throws IOException { super(in); this.precision = in.readInt(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { @@ -203,7 +212,7 @@ protected void doXContentBody(XContentBuilder builder, Params params) throws IOE } @Override - String type() { + protected String type() { return TYPE; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileValuesSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java similarity index 88% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileValuesSource.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java index 819dfc573bbe4..303e577e99e7b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileValuesSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.composite; +package org.opensearch.geo.search.aggregations.bucket.composite; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; @@ -38,7 +38,9 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.composite.LongValuesSource; +import org.opensearch.search.aggregations.bucket.composite.SingleDimensionValuesSource; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import java.io.IOException; @@ -68,7 +70,7 @@ class GeoTileValuesSource extends LongValuesSource { } @Override - void setAfter(Comparable value) { + protected void setAfter(Comparable value) { if (missingBucket && value == null) { afterValue = null; } else if (value instanceof Number) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BoundedCellValues.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BoundedCellValues.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BoundedCellValues.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BoundedCellValues.java index ba824fc8f21dd..06d2dcaee3932 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BoundedCellValues.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BoundedCellValues.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.index.fielddata.MultiGeoPointValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java index d6cfde0c46eae..70d0552b3e80b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.apache.lucene.util.PriorityQueue; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellIdSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java similarity index 98% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellIdSource.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java index 12d9043a2fd5f..d40029e9a762d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellIdSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellValues.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellValues.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellValues.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellValues.java index 9dc357659aae8..d01896c8136fa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellValues.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellValues.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.index.fielddata.AbstractSortingNumericDocValues; import org.opensearch.index.fielddata.MultiGeoPointValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java index cfdb08f9ee3d7..4ae888640efc8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java similarity index 99% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index b08c40268c5cf..4a904b3aa2b16 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java similarity index 99% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java index 1ef8ba6c697f4..909772c61a960 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index 4049bf2c73640..bbaf9613fb216 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoUtils; @@ -40,7 +40,7 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; -import org.opensearch.search.aggregations.metrics.GeoGridAggregatorSupplier; +import org.opensearch.geo.search.aggregations.metrics.GeoGridAggregatorSupplier; import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 1106320c7431f..6ca7a4d8a9cb8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java similarity index 98% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index cdc801aaedffb..1914c07e831f7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.geometry.utils.Geohash; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java similarity index 95% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index f73360e3cb826..76ad515f34fe5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.io.stream.StreamInput; @@ -39,7 +39,8 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; -import org.opensearch.search.aggregations.metrics.GeoGridAggregatorSupplier; +import org.opensearch.geo.search.aggregations.metrics.GeoGridAggregatorSupplier; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java index 7a2b908148c4c..a205a9afde41e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index ef8cd11a22498..b830988a3d410 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.index.query.QueryShardContext; @@ -40,6 +40,7 @@ import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.NonCollectingAggregator; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java similarity index 99% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java index 94a5ad5717854..9dbed7b27307a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java similarity index 98% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java index a187bfefb661f..93fcdbd098400 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index 7811b8774d04f..ff1247300939a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.aggregations.InternalAggregations; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java index f9c45dc41ceb1..659909e868651 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java index efbd9a05d6a4d..fa544b5893f0c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.aggregations.InternalAggregations; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java similarity index 94% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java index f200f55232e00..65d736cfceb32 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java @@ -30,11 +30,12 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGrid.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGrid.java index 3f85cf350c89c..adfffeddba59d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGrid.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.CheckedFunction; import org.opensearch.common.xcontent.ObjectParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java index 08e5c15188ee6..80124cda50b19 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregation; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java index f20f972c1ce0a..109524e755c4d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java index 05c7a1c8d1663..4e6e454b08324 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java index 06915cc4210e1..8734c96a15578 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java similarity index 93% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java index c8dec16f322ef..fd47c35f13de1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java @@ -30,10 +30,11 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/UnboundedCellValues.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/UnboundedCellValues.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/UnboundedCellValues.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/UnboundedCellValues.java index f5a139cdb8d9d..c628c7bfdc8ec 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/UnboundedCellValues.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/UnboundedCellValues.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.index.fielddata.MultiGeoPointValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/package-info.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/package-info.java similarity index 79% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/package-info.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/package-info.java index c59685e06cf79..d9183a0f742ef 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/package-info.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/package-info.java @@ -7,4 +7,4 @@ */ /** geo_grid Aggregation package. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoGridAggregatorSupplier.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoGridAggregatorSupplier.java similarity index 93% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/GeoGridAggregatorSupplier.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoGridAggregatorSupplier.java index 183c64f4e4af2..43ccb8b89545a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoGridAggregatorSupplier.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoGridAggregatorSupplier.java @@ -30,13 +30,13 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.common.geo.GeoBoundingBox; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGridAggregator; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.CardinalityUpperBound; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGridAggregator; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.internal.SearchContext; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridAggregationBuilderTests.java similarity index 72% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridAggregationBuilderTests.java index 5e230a445ec98..00cb162e64c19 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridAggregationBuilderTests.java @@ -30,14 +30,23 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket; +package org.opensearch.geo.search.aggregations.bucket; -import org.opensearch.common.geo.GeoBoundingBoxTests; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; +import org.opensearch.geo.tests.common.RandomGeoGenerator; +import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.BaseAggregationTestCase; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -public class GeoHashGridTests extends BaseAggregationTestCase { +import java.util.Collection; +import java.util.Collections; + +public class GeoHashGridAggregationBuilderTests extends BaseAggregationTestCase { + + protected Collection> getPlugins() { + return Collections.singletonList(GeoModulePlugin.class); + } @Override protected GeoHashGridAggregationBuilder createTestAggregatorBuilder() { @@ -55,7 +64,7 @@ protected GeoHashGridAggregationBuilder createTestAggregatorBuilder() { factory.shardSize(randomIntBetween(1, Integer.MAX_VALUE)); } if (randomBoolean()) { - factory.setGeoBoundingBox(GeoBoundingBoxTests.randomBBox()); + factory.setGeoBoundingBox(RandomGeoGenerator.randomBBox()); } return factory; } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridAggregationBuilderTests.java similarity index 70% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridAggregationBuilderTests.java index d54667fb4f1a6..c7c0be21273bd 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridAggregationBuilderTests.java @@ -30,15 +30,24 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket; +package org.opensearch.geo.search.aggregations.bucket; -import org.opensearch.common.geo.GeoBoundingBoxTests; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.opensearch.geo.tests.common.RandomGeoGenerator; +import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.BaseAggregationTestCase; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; -public class GeoTileGridTests extends BaseAggregationTestCase { +import java.util.Collection; +import java.util.Collections; + +public class GeoTileGridAggregationBuilderTests extends BaseAggregationTestCase { + + protected Collection> getPlugins() { + return Collections.singletonList(GeoModulePlugin.class); + } @Override protected GeoTileGridAggregationBuilder createTestAggregatorBuilder() { @@ -55,7 +64,7 @@ protected GeoTileGridAggregationBuilder createTestAggregatorBuilder() { factory.shardSize(randomIntBetween(1, Integer.MAX_VALUE)); } if (randomBoolean()) { - factory.setGeoBoundingBox(GeoBoundingBoxTests.randomBBox()); + factory.setGeoBoundingBox(RandomGeoGenerator.randomBBox()); } return factory; } diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java new file mode 100644 index 0000000000000..3c7c292f9d193 --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java @@ -0,0 +1,174 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.bucket.composite; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.junit.Before; +import org.opensearch.common.geo.GeoPoint; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; +import org.opensearch.index.mapper.GeoPointFieldMapper; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; +import org.opensearch.search.aggregations.composite.BaseCompositeAggregatorTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Testing the {@link GeoTileGridAggregator} as part of CompositeAggregation. + */ +public class GeoTileGridAggregationCompositeAggregatorTests extends BaseCompositeAggregatorTestCase { + + protected List getSearchPlugins() { + return Collections.singletonList(new GeoModulePlugin()); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + FIELD_TYPES.add(new GeoPointFieldMapper.GeoPointFieldType("geo_point")); + } + + public void testUnmappedFieldWithGeopoint() throws Exception { + final List>> dataset = new ArrayList<>(); + final String mappedFieldName = "geo_point"; + dataset.addAll( + Arrays.asList( + createDocument(mappedFieldName, new GeoPoint(48.934059, 41.610741)), + createDocument(mappedFieldName, new GeoPoint(-23.065941, 113.610741)), + createDocument(mappedFieldName, new GeoPoint(90.0, 0.0)), + createDocument(mappedFieldName, new GeoPoint(37.2343, -115.8067)), + createDocument(mappedFieldName, new GeoPoint(90.0, 0.0)) + ) + ); + + // just unmapped = no results + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), + dataset, + () -> new CompositeAggregationBuilder("name", Arrays.asList(new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped"))), + (result) -> assertEquals(0, result.getBuckets().size()) + ); + + // unmapped missing bucket = one result + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList(new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped").missingBucket(true)) + ), + (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{unmapped=null}", result.afterKey().toString()); + assertEquals("{unmapped=null}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(5L, result.getBuckets().get(0).getDocCount()); + } + ); + + // field + unmapped, no missing bucket = no results + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new GeoTileGridValuesSourceBuilder(mappedFieldName).field(mappedFieldName), + new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped") + ) + ), + (result) -> assertEquals(0, result.getBuckets().size()) + ); + + // field + unmapped with missing bucket = multiple results + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new GeoTileGridValuesSourceBuilder(mappedFieldName).field(mappedFieldName), + new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped").missingBucket(true) + ) + ), + (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{geo_point=7/64/56, unmapped=null}", result.afterKey().toString()); + assertEquals("{geo_point=7/32/56, unmapped=null}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{geo_point=7/64/56, unmapped=null}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(1).getDocCount()); + } + ); + + } + + public void testWithGeoPoint() throws Exception { + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("geo_point", new GeoPoint(48.934059, 41.610741)), + createDocument("geo_point", new GeoPoint(-23.065941, 113.610741)), + createDocument("geo_point", new GeoPoint(90.0, 0.0)), + createDocument("geo_point", new GeoPoint(37.2343, -115.8067)), + createDocument("geo_point", new GeoPoint(90.0, 0.0)) + ) + ); + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("geo_point")), dataset, () -> { + GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder("geo_point").field("geo_point"); + return new CompositeAggregationBuilder("name", Collections.singletonList(geoTile)); + }, (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{geo_point=7/64/56}", result.afterKey().toString()); + assertEquals("{geo_point=7/32/56}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{geo_point=7/64/56}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(1).getDocCount()); + }); + + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("geo_point")), dataset, () -> { + GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder("geo_point").field("geo_point"); + return new CompositeAggregationBuilder("name", Collections.singletonList(geoTile)).aggregateAfter( + Collections.singletonMap("geo_point", "7/32/56") + ); + }, (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{geo_point=7/64/56}", result.afterKey().toString()); + assertEquals("{geo_point=7/64/56}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(0).getDocCount()); + }); + } + + @Override + protected boolean addValueToDocument(final Document doc, final String name, final Object value) { + if (value instanceof GeoPoint) { + GeoPoint point = (GeoPoint) value; + doc.add( + new SortedNumericDocValuesField( + name, + GeoTileUtils.longEncode(point.lon(), point.lat(), GeoTileGridAggregationBuilder.DEFAULT_PRECISION) + ) + ); + doc.add(new LatLonPoint(name, point.lat(), point.lon())); + return true; + } + return false; + } +} diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridCompositeAggregationBuilderTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridCompositeAggregationBuilderTests.java new file mode 100644 index 0000000000000..ea7a2a83945c2 --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridCompositeAggregationBuilderTests.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.bucket.composite; + +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.tests.common.RandomGeoGenerator; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.aggregations.BaseAggregationTestCase; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class GeoTileGridCompositeAggregationBuilderTests extends BaseAggregationTestCase { + + protected Collection> getPlugins() { + return Collections.singletonList(GeoModulePlugin.class); + } + + private GeoTileGridValuesSourceBuilder randomGeoTileGridValuesSourceBuilder() { + GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)); + if (randomBoolean()) { + geoTile.precision(randomIntBetween(0, GeoTileUtils.MAX_ZOOM)); + } + if (randomBoolean()) { + geoTile.geoBoundingBox(RandomGeoGenerator.randomBBox()); + } + return geoTile; + } + + @Override + protected CompositeAggregationBuilder createTestAggregatorBuilder() { + int numSources = randomIntBetween(1, 10); + List> sources = new ArrayList<>(); + for (int i = 0; i < numSources; i++) { + sources.add(randomGeoTileGridValuesSourceBuilder()); + } + return new CompositeAggregationBuilder(randomAlphaOfLength(10), sources); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java similarity index 90% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java index 2b1700676f549..c6276c06c4511 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java @@ -30,8 +30,9 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.composite; +package org.opensearch.geo.search.aggregations.bucket.composite; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.opensearch.test.OpenSearchTestCase; public class GeoTileGridValuesSourceBuilderTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java similarity index 95% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index 17fddb8978499..d6153637f656d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; @@ -45,17 +45,19 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.geo.GeoBoundingBox; -import org.opensearch.common.geo.GeoBoundingBoxTests; import org.opensearch.common.geo.GeoUtils; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.tests.common.AggregationInspectionHelper; +import org.opensearch.geo.tests.common.RandomGeoGenerator; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.search.aggregations.bucket.terms.StringTerms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.opensearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.ArrayList; @@ -91,6 +93,16 @@ public abstract class GeoGridAggregatorTestCase */ protected abstract GeoGridAggregationBuilder createBuilder(String name); + /** + * Overriding the Search Plugins list with {@link GeoModulePlugin} so that the testcase will know that this plugin is + * to be loaded during the tests. + * @return List of {@link SearchPlugin} + */ + @Override + protected List getSearchPlugins() { + return Collections.singletonList(new GeoModulePlugin()); + } + public void testNoDocs() throws IOException { testCase( new MatchAllDocsQuery(), @@ -225,7 +237,7 @@ public void testBounds() throws IOException { // only consider bounding boxes that are at least GEOHASH_TOLERANCE wide and have quantized coordinates GeoBoundingBox bbox = randomValueOtherThanMany( (b) -> Math.abs(GeoUtils.normalizeLon(b.right()) - GeoUtils.normalizeLon(b.left())) < GEOHASH_TOLERANCE, - GeoBoundingBoxTests::randomBBox + RandomGeoGenerator::randomBBox ); Function encodeDecodeLat = (lat) -> GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(lat)); Function encodeDecodeLon = (lon) -> GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(lon)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java similarity index 79% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java index ce286a4443660..432736a2b43fe 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java @@ -29,9 +29,16 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.apache.lucene.index.IndexWriter; +import org.opensearch.common.ParseField; +import org.opensearch.common.xcontent.ContextParser; +import org.opensearch.common.xcontent.NamedXContentRegistry; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.search.aggregations.metrics.ParsedGeoBounds; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.ParsedMultiBucketAggregation; import org.opensearch.test.InternalMultiBucketAggregationTestCase; @@ -76,6 +83,36 @@ protected int maxNumberOfBuckets() { return 3; } + /** + * Overriding the method so that tests can get the aggregation specs for namedWriteable. + * + * @return GeoPlugin + */ + @Override + protected SearchPlugin registerPlugin() { + return new GeoModulePlugin(); + } + + /** + * Overriding with the {@link ParsedGeoBounds} so that it can be parsed. We need to do this as {@link GeoModulePlugin} + * is registering this Aggregation. + * + * @return a List of {@link NamedXContentRegistry.Entry} + */ + @Override + protected List getNamedXContents() { + final List namedXContents = new ArrayList<>(getDefaultNamedXContents()); + final ContextParser hashGridParser = (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c); + final ContextParser geoTileParser = (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c); + namedXContents.add( + new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(GeoHashGridAggregationBuilder.NAME), hashGridParser) + ); + namedXContents.add( + new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(GeoTileGridAggregationBuilder.NAME), geoTileParser) + ); + return namedXContents; + } + @Override protected T createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final int precision = randomPrecision(); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java similarity index 96% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java index 5c63b15c7f614..04fa815366f6b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import static org.opensearch.geometry.utils.Geohash.stringEncode; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java similarity index 99% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java index e81e22b3b562f..44f292e898a61 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java similarity index 97% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java index 5a26ec759281c..c84c6ef5ec076 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.geometry.utils.Geohash; import org.opensearch.search.aggregations.InternalAggregations; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java similarity index 94% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java index 4e88111ac2dfc..f2f641ea794c0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java @@ -30,7 +30,9 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; + +import org.opensearch.search.aggregations.bucket.GeoTileUtils; public class GeoTileGridAggregatorTests extends GeoGridAggregatorTestCase { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java similarity index 97% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java index 567bcd57d23e5..a5b000d5e6ab3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.ExceptionsHelper; import org.opensearch.common.xcontent.XContentParseException; @@ -37,6 +37,7 @@ import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.geo.GeometryTestUtils; import org.opensearch.geometry.Rectangle; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.containsString; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java similarity index 94% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java index 50b9a8cd762d1..ead67e0455d94 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java @@ -29,9 +29,10 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import java.util.List; import java.util.Map; diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java index c1f27b71c326d..c0d7e51047c6b 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java @@ -8,6 +8,10 @@ package org.opensearch.geo.tests.common; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; @@ -18,4 +22,18 @@ public class AggregationBuilders { public static GeoBoundsAggregationBuilder geoBounds(String name) { return new GeoBoundsAggregationBuilder(name); } + + /** + * Create a new {@link InternalGeoHashGrid} aggregation with the given name. + */ + public static GeoHashGridAggregationBuilder geohashGrid(String name) { + return new GeoHashGridAggregationBuilder(name); + } + + /** + * Create a new {@link InternalGeoTileGrid} aggregation with the given name. + */ + public static GeoTileGridAggregationBuilder geotileGrid(String name) { + return new GeoTileGridAggregationBuilder(name); + } } diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java index 208187bf34a5c..3473cf2d94b76 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java @@ -8,6 +8,7 @@ package org.opensearch.geo.tests.common; +import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoGrid; import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; public class AggregationInspectionHelper { @@ -15,4 +16,8 @@ public class AggregationInspectionHelper { public static boolean hasValue(InternalGeoBounds agg) { return (agg.topLeft() == null && agg.bottomRight() == null) == false; } + + public static boolean hasValue(InternalGeoGrid agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } } diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java index 2cf32c36b97ec..2fb403155e2bc 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java @@ -8,7 +8,10 @@ package org.opensearch.geo.tests.common; +import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.geo.GeometryTestUtils; +import org.opensearch.geometry.Rectangle; import java.util.Random; @@ -83,4 +86,12 @@ private static double normalizeLongitude(double longitude) { return -180 + off; } } + + public static GeoBoundingBox randomBBox() { + Rectangle rectangle = GeometryTestUtils.randomRectangle(); + return new GeoBoundingBox( + new GeoPoint(rectangle.getMaxLat(), rectangle.getMinLon()), + new GeoPoint(rectangle.getMinLat(), rectangle.getMaxLon()) + ); + } } diff --git a/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/230_composite.yml b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/230_composite.yml new file mode 100644 index 0000000000000..211f3c3f46b88 --- /dev/null +++ b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/230_composite.yml @@ -0,0 +1,168 @@ +--- +setup: + - do: + indices.create: + index: test + body: + mappings: + properties: + date: + type: date + keyword: + type: keyword + long: + type: long + geo_point: + type: geo_point + nested: + type: nested + properties: + nested_long: + type: long + + - do: + indices.create: + index: other + body: + mappings: + properties: + date: + type: date + long: + type: long + nested: + type: nested + properties: + nested_long: + type: long + + - do: + index: + index: test + id: 1 + body: { "keyword": "foo", "long": [10, 20], "geo_point": "37.2343,-115.8067", "nested": [{"nested_long": 10}, {"nested_long": 20}] } + + - do: + index: + index: test + id: 2 + body: { "keyword": ["foo", "bar"], "geo_point": "41.12,-71.34" } + + - do: + index: + index: test + id: 3 + body: { "keyword": "bar", "long": [100, 0], "geo_point": "90.0,0.0", "nested": [{"nested_long": 10}, {"nested_long": 0}] } + + - do: + index: + index: test + id: 4 + body: { "keyword": "bar", "long": [1000, 0], "geo_point": "41.12,-71.34", "nested": [{"nested_long": 1000}, {"nested_long": 20}] } + + - do: + index: + index: test + id: 5 + body: { "date": "2017-10-20T03:08:45" } + + - do: + index: + index: test + id: 6 + body: { "date": "2017-10-21T07:00:00" } + + - do: + index: + index: other + id: 0 + body: { "date": "2017-10-20T03:08:45" } + + - do: + indices.refresh: + index: [test, other] +--- +"Simple Composite aggregation with GeoTile grid": + - skip: + version: " - 7.4.99" + reason: geotile_grid is not supported until 7.5.0 + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + sources: [ + "geo": { + "geotile_grid": { + "field": "geo_point", + "precision": 12 + } + }, + { + "kw": { + "terms": { + "field": "keyword" + } + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.test.buckets: 4 } + - match: { aggregations.test.buckets.0.key.geo: "12/730/1590" } + - match: { aggregations.test.buckets.0.key.kw: "foo" } + - match: { aggregations.test.buckets.0.doc_count: 1 } + - match: { aggregations.test.buckets.1.key.geo: "12/1236/1533" } + - match: { aggregations.test.buckets.1.key.kw: "bar" } + - match: { aggregations.test.buckets.1.doc_count: 2 } + - match: { aggregations.test.buckets.2.key.geo: "12/1236/1533" } + - match: { aggregations.test.buckets.2.key.kw: "foo" } + - match: { aggregations.test.buckets.2.doc_count: 1 } + - match: { aggregations.test.buckets.3.key.geo: "12/2048/0" } + - match: { aggregations.test.buckets.3.key.kw: "bar" } + - match: { aggregations.test.buckets.3.doc_count: 1 } + +--- +"Simple Composite aggregation with geotile grid add aggregate after": + - skip: + version: " - 7.4.99" + reason: geotile_grid is not supported until 7.5.0 + - do: + search: + index: test + body: + aggregations: + test: + composite: + sources: [ + "geo": { + "geotile_grid": { + "field": "geo_point", + "precision": 12 + } + }, + { + "kw": { + "terms": { + "field": "keyword" + } + } + } + ] + after: { "geo": "12/730/1590", "kw": "foo" } + + - match: { hits.total.value: 6 } + - match: { hits.total.relation: "eq" } + - length: { aggregations.test.buckets: 3 } + - match: { aggregations.test.buckets.0.key.geo: "12/1236/1533" } + - match: { aggregations.test.buckets.0.key.kw: "bar" } + - match: { aggregations.test.buckets.0.doc_count: 2 } + - match: { aggregations.test.buckets.1.key.geo: "12/1236/1533" } + - match: { aggregations.test.buckets.1.key.kw: "foo" } + - match: { aggregations.test.buckets.1.doc_count: 1 } + - match: { aggregations.test.buckets.2.key.geo: "12/2048/0" } + - match: { aggregations.test.buckets.2.key.kw: "bar" } + - match: { aggregations.test.buckets.2.doc_count: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/280_geohash_grid.yml b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/280_geohash_grid.yml similarity index 100% rename from rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/280_geohash_grid.yml rename to modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/280_geohash_grid.yml diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/290_geotile_grid.yml similarity index 100% rename from rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml rename to modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/290_geotile_grid.yml diff --git a/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 b/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 deleted file mode 100644 index f40f0242448e8..0000000000000 --- a/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index b72cb6d868d79..5d2047d7f18a2 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -58,6 +58,7 @@ dependencies { api "io.netty:netty-buffer:${versions.netty}" api "io.netty:netty-codec:${versions.netty}" api "io.netty:netty-codec-http:${versions.netty}" + api "io.netty:netty-codec-http2:${versions.netty}" api "io.netty:netty-common:${versions.netty}" api "io.netty:netty-handler:${versions.netty}" api "io.netty:netty-resolver:${versions.netty}" diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..f2989024cfce1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java new file mode 100644 index 0000000000000..1424b392af8e7 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; +import org.opensearch.OpenSearchNetty4IntegTestCase; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.Collection; +import java.util.Locale; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class Netty4Http2IT extends OpenSearchNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + public void testThatNettyHttpServerSupportsHttp2() throws Exception { + String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" }; + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http2()) { + Collection responses = nettyHttpClient.get(transportAddress.address(), requests); + try { + assertThat(responses, hasSize(5)); + + Collection opaqueIds = Netty4HttpClient.returnOpaqueIds(responses); + assertOpaqueIdsInAnyOrder(opaqueIds); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertOpaqueIdsInAnyOrder(Collection opaqueIds) { + // check if opaque ids are present in any order, since for HTTP/2 we use streaming (no head of line blocking) + // and responses may come back at any order + int i = 0; + String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be in any order, got [%s]", opaqueIds); + assertThat(msg, opaqueIds, containsInAnyOrder(IntStream.range(0, 5).mapToObj(Integer::toString).toArray())); + } + +} diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index 08df9259d475f..db76c0b145840 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -100,7 +100,7 @@ public void testLimitsInFlightRequests() throws Exception { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection singleResponse = nettyHttpClient.post(transportAddress.address(), requests.subList(0, 1)); try { assertThat(singleResponse, hasSize(1)); @@ -130,7 +130,7 @@ public void testDoesNotLimitExcludedRequests() throws Exception { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.put(transportAddress.address(), requestUris); try { assertThat(responses, hasSize(requestUris.size())); diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java index 2bd1fa07f8afc..96193b0ecb954 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java @@ -61,7 +61,7 @@ public void testThatNettyHttpServerSupportsPipelining() throws Exception { TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); TransportAddress transportAddress = randomFrom(boundAddresses); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests); try { assertThat(responses, hasSize(5)); diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index 66d60032d11a8..2dd7aaf41986f 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -33,7 +33,10 @@ package org.opensearch.http.netty4; import io.netty.channel.Channel; +import io.netty.channel.ChannelPipeline; + import org.opensearch.action.ActionListener; +import org.opensearch.common.Nullable; import org.opensearch.common.concurrent.CompletableContext; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpResponse; @@ -45,9 +48,15 @@ public class Netty4HttpChannel implements HttpChannel { private final Channel channel; private final CompletableContext closeContext = new CompletableContext<>(); + private final ChannelPipeline inboundPipeline; Netty4HttpChannel(Channel channel) { + this(channel, null); + } + + Netty4HttpChannel(Channel channel, ChannelPipeline inboundPipeline) { this.channel = channel; + this.inboundPipeline = inboundPipeline; Netty4TcpChannel.addListener(this.channel.closeFuture(), closeContext); } @@ -81,6 +90,10 @@ public void close() { channel.close(); } + public @Nullable ChannelPipeline inboundPipeline() { + return inboundPipeline; + } + public Channel getNettyChannel() { return channel; } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index decab45ffca38..1e0a4d89f2fd5 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -40,18 +40,36 @@ import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; import io.netty.channel.FixedRecvByteBufAllocator; import io.netty.channel.RecvByteBufAllocator; +import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.socket.nio.NioChannelOption; import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; +import io.netty.handler.codec.http.HttpMessage; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.codec.http.HttpServerCodec; +import io.netty.handler.codec.http.HttpServerUpgradeHandler; +import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodec; +import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodecFactory; +import io.netty.handler.codec.http2.CleartextHttp2ServerUpgradeHandler; +import io.netty.handler.codec.http2.Http2CodecUtil; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.handler.codec.http2.Http2ServerUpgradeCodec; +import io.netty.handler.codec.http2.Http2StreamFrameToHttpObjectCodec; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.ReadTimeoutHandler; +import io.netty.util.AsciiString; import io.netty.util.AttributeKey; +import io.netty.util.ReferenceCountUtil; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; @@ -335,38 +353,152 @@ protected HttpChannelHandler(final Netty4HttpServerTransport transport, final Ht this.responseCreator = new Netty4HttpResponseCreator(); } + public ChannelHandler getRequestHandler() { + return requestHandler; + } + @Override protected void initChannel(Channel ch) throws Exception { Netty4HttpChannel nettyHttpChannel = new Netty4HttpChannel(ch); ch.attr(HTTP_CHANNEL_KEY).set(nettyHttpChannel); ch.pipeline().addLast("byte_buf_sizer", byteBufSizer); ch.pipeline().addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)); + + configurePipeline(ch); + transport.serverAcceptedChannel(nettyHttpChannel); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + super.exceptionCaught(ctx, cause); + } + + protected void configurePipeline(Channel ch) { + final UpgradeCodecFactory upgradeCodecFactory = new UpgradeCodecFactory() { + @Override + public UpgradeCodec newUpgradeCodec(CharSequence protocol) { + if (AsciiString.contentEquals(Http2CodecUtil.HTTP_UPGRADE_PROTOCOL_NAME, protocol)) { + return new Http2ServerUpgradeCodec( + Http2FrameCodecBuilder.forServer().build(), + new Http2MultiplexHandler(createHttp2ChannelInitializer(ch.pipeline())) + ); + } else { + return null; + } + } + }; + + final HttpServerCodec sourceCodec = new HttpServerCodec( + handlingSettings.getMaxInitialLineLength(), + handlingSettings.getMaxHeaderSize(), + handlingSettings.getMaxChunkSize() + ); + + final HttpServerUpgradeHandler upgradeHandler = new HttpServerUpgradeHandler(sourceCodec, upgradeCodecFactory); + final CleartextHttp2ServerUpgradeHandler cleartextUpgradeHandler = new CleartextHttp2ServerUpgradeHandler( + sourceCodec, + upgradeHandler, + createHttp2ChannelInitializerPriorKnowledge() + ); + + ch.pipeline().addLast(cleartextUpgradeHandler).addLast(new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws Exception { + final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); + aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); + + // If this handler is hit then no upgrade has been attempted and the client is just talking HTTP + final ChannelPipeline pipeline = ctx.pipeline(); + pipeline.addAfter(ctx.name(), "handler", getRequestHandler()); + pipeline.replace(this, "aggregator", aggregator); + + ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); + ch.pipeline().addLast("encoder", new HttpResponseEncoder()); + if (handlingSettings.isCompression()) { + ch.pipeline() + .addAfter("aggregator", "encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + } + ch.pipeline().addBefore("handler", "request_creator", requestCreator); + ch.pipeline().addBefore("handler", "response_creator", responseCreator); + ch.pipeline() + .addBefore("handler", "pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); + + ctx.fireChannelRead(ReferenceCountUtil.retain(msg)); + } + }); + } + + protected void configureDefaultHttpPipeline(ChannelPipeline pipeline) { final HttpRequestDecoder decoder = new HttpRequestDecoder( handlingSettings.getMaxInitialLineLength(), handlingSettings.getMaxHeaderSize(), handlingSettings.getMaxChunkSize() ); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); - ch.pipeline().addLast("decoder", decoder); - ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); - ch.pipeline().addLast("encoder", new HttpResponseEncoder()); + pipeline.addLast("decoder", decoder); + pipeline.addLast("decoder_compress", new HttpContentDecompressor()); + pipeline.addLast("encoder", new HttpResponseEncoder()); final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); - ch.pipeline().addLast("aggregator", aggregator); + pipeline.addLast("aggregator", aggregator); if (handlingSettings.isCompression()) { - ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + pipeline.addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); } - ch.pipeline().addLast("request_creator", requestCreator); - ch.pipeline().addLast("response_creator", responseCreator); - ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); - ch.pipeline().addLast("handler", requestHandler); - transport.serverAcceptedChannel(nettyHttpChannel); + pipeline.addLast("request_creator", requestCreator); + pipeline.addLast("response_creator", responseCreator); + pipeline.addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); + pipeline.addLast("handler", requestHandler); } - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - super.exceptionCaught(ctx, cause); + protected void configureDefaultHttp2Pipeline(ChannelPipeline pipeline) { + pipeline.addLast(Http2FrameCodecBuilder.forServer().build()) + .addLast(new Http2MultiplexHandler(createHttp2ChannelInitializer(pipeline))); + } + + private ChannelInitializer createHttp2ChannelInitializerPriorKnowledge() { + return new ChannelInitializer() { + @Override + protected void initChannel(Channel childChannel) throws Exception { + configureDefaultHttp2Pipeline(childChannel.pipeline()); + } + }; + } + + /** + * Http2MultiplexHandler creates new pipeline, we are preserving the old one in case some handlers need to be + * access (like for example opensearch-security plugin which accesses SSL handlers). + */ + private ChannelInitializer createHttp2ChannelInitializer(ChannelPipeline inboundPipeline) { + return new ChannelInitializer() { + @Override + protected void initChannel(Channel childChannel) throws Exception { + final Netty4HttpChannel nettyHttpChannel = new Netty4HttpChannel(childChannel, inboundPipeline); + childChannel.attr(HTTP_CHANNEL_KEY).set(nettyHttpChannel); + + final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); + aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); + + childChannel.pipeline() + .addLast(new LoggingHandler(LogLevel.DEBUG)) + .addLast(new Http2StreamFrameToHttpObjectCodec(true)) + .addLast("byte_buf_sizer", byteBufSizer) + .addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)) + .addLast("decoder_decompress", new HttpContentDecompressor()); + + if (handlingSettings.isCompression()) { + childChannel.pipeline() + .addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + } + + childChannel.pipeline() + .addLast("aggregator", aggregator) + .addLast("request_creator", requestCreator) + .addLast("response_creator", responseCreator) + .addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)) + .addLast("handler", getRequestHandler()); + } + }; } } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java index a0100930c7dcb..c18fe6efc4736 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java @@ -117,7 +117,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, httpServerTransport.start(); final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { final Collection responses = nettyHttpClient.get( transportAddress.address(), "/_cluster/settings?pretty=%" diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java index 57f95a022a33f..6fdd698c117f2 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java @@ -37,14 +37,19 @@ import io.netty.buffer.Unpooled; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandler; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPromise; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpClientCodec; +import io.netty.handler.codec.http.HttpClientUpgradeHandler; import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpMethod; @@ -55,6 +60,17 @@ import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseDecoder; import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.DefaultHttp2Connection; +import io.netty.handler.codec.http2.DelegatingDecompressorFrameListener; +import io.netty.handler.codec.http2.Http2ClientUpgradeCodec; +import io.netty.handler.codec.http2.Http2Connection; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandler; +import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandlerBuilder; +import io.netty.handler.codec.http2.InboundHttp2ToHttpAdapterBuilder; +import io.netty.util.AttributeKey; + import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; @@ -70,6 +86,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; import static io.netty.handler.codec.http.HttpHeaderNames.HOST; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; @@ -97,11 +114,32 @@ static Collection returnOpaqueIds(Collection responses } private final Bootstrap clientBootstrap; + private final BiFunction, AwaitableChannelInitializer> handlerFactory; + + Netty4HttpClient( + Bootstrap clientBootstrap, + BiFunction, AwaitableChannelInitializer> handlerFactory + ) { + this.clientBootstrap = clientBootstrap; + this.handlerFactory = handlerFactory; + } + + static Netty4HttpClient http() { + return new Netty4HttpClient( + new Bootstrap().channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)), + CountDownLatchHandlerHttp::new + ); + } - Netty4HttpClient() { - clientBootstrap = new Bootstrap().channel(NettyAllocator.getChannelType()) - .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) - .group(new NioEventLoopGroup(1)); + static Netty4HttpClient http2() { + return new Netty4HttpClient( + new Bootstrap().channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)), + CountDownLatchHandlerHttp2::new + ); } public List get(SocketAddress remoteAddress, String... uris) throws InterruptedException { @@ -110,6 +148,7 @@ public List get(SocketAddress remoteAddress, String... uris) t final HttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); httpRequest.headers().add(HOST, "localhost"); httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); requests.add(httpRequest); } return sendRequests(remoteAddress, requests); @@ -143,6 +182,7 @@ private List processRequestsWithBody( request.headers().add(HttpHeaderNames.HOST, "localhost"); request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); requests.add(request); } return sendRequests(remoteAddress, requests); @@ -153,12 +193,14 @@ private synchronized List sendRequests(final SocketAddress rem final CountDownLatch latch = new CountDownLatch(requests.size()); final List content = Collections.synchronizedList(new ArrayList<>(requests.size())); - clientBootstrap.handler(new CountDownLatchHandler(latch, content)); + final AwaitableChannelInitializer handler = handlerFactory.apply(latch, content); + clientBootstrap.handler(handler); ChannelFuture channelFuture = null; try { channelFuture = clientBootstrap.connect(remoteAddress); channelFuture.sync(); + handler.await(); for (HttpRequest request : requests) { channelFuture.channel().writeAndFlush(request); @@ -184,12 +226,12 @@ public void close() { /** * helper factory which adds returned data to a list and uses a count down latch to decide when done */ - private static class CountDownLatchHandler extends ChannelInitializer { + private static class CountDownLatchHandlerHttp extends AwaitableChannelInitializer { private final CountDownLatch latch; private final Collection content; - CountDownLatchHandler(final CountDownLatch latch, final Collection content) { + CountDownLatchHandlerHttp(final CountDownLatch latch, final Collection content) { this.latch = latch; this.content = content; } @@ -222,4 +264,145 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E } + /** + * The channel initializer with the ability to await for initialization to be completed + * + */ + private static abstract class AwaitableChannelInitializer extends ChannelInitializer { + void await() { + // do nothing + } + } + + /** + * helper factory which adds returned data to a list and uses a count down latch to decide when done + */ + private static class CountDownLatchHandlerHttp2 extends AwaitableChannelInitializer { + + private final CountDownLatch latch; + private final Collection content; + private Http2SettingsHandler settingsHandler; + + CountDownLatchHandlerHttp2(final CountDownLatch latch, final Collection content) { + this.latch = latch; + this.content = content; + } + + @Override + protected void initChannel(SocketChannel ch) { + final int maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(); + final Http2Connection connection = new DefaultHttp2Connection(false); + settingsHandler = new Http2SettingsHandler(ch.newPromise()); + + final ChannelInboundHandler responseHandler = new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { + final FullHttpResponse response = (FullHttpResponse) msg; + + // this is upgrade request, skipping it over + if (Boolean.TRUE.equals(ctx.channel().attr(AttributeKey.valueOf("upgrade")).getAndRemove())) { + return; + } + + // We copy the buffer manually to avoid a huge allocation on a pooled allocator. We have + // a test that tracks huge allocations, so we want to avoid them in this test code. + ByteBuf newContent = Unpooled.copiedBuffer(((FullHttpResponse) msg).content()); + content.add(response.replace(newContent)); + latch.countDown(); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + super.exceptionCaught(ctx, cause); + latch.countDown(); + } + }; + + final HttpToHttp2ConnectionHandler connectionHandler = new HttpToHttp2ConnectionHandlerBuilder().connection(connection) + .frameListener( + new DelegatingDecompressorFrameListener( + connection, + new InboundHttp2ToHttpAdapterBuilder(connection).maxContentLength(maxContentLength).propagateSettings(true).build() + ) + ) + .build(); + + final HttpClientCodec sourceCodec = new HttpClientCodec(); + final Http2ClientUpgradeCodec upgradeCodec = new Http2ClientUpgradeCodec(connectionHandler); + final HttpClientUpgradeHandler upgradeHandler = new HttpClientUpgradeHandler(sourceCodec, upgradeCodec, maxContentLength); + + ch.pipeline().addLast(sourceCodec); + ch.pipeline().addLast(upgradeHandler); + ch.pipeline().addLast(new HttpContentDecompressor()); + ch.pipeline().addLast(new UpgradeRequestHandler(settingsHandler, responseHandler)); + } + + @Override + void await() { + try { + // Await for HTTP/2 settings being sent over before moving on to sending the requests + settingsHandler.awaitSettings(5, TimeUnit.SECONDS); + } catch (final Exception ex) { + throw new RuntimeException(ex); + } + } + } + + /** + * A handler that triggers the cleartext upgrade to HTTP/2 (h2c) by sending an + * initial HTTP request. + */ + private static class UpgradeRequestHandler extends ChannelInboundHandlerAdapter { + private final ChannelInboundHandler settingsHandler; + private final ChannelInboundHandler responseHandler; + + UpgradeRequestHandler(final ChannelInboundHandler settingsHandler, final ChannelInboundHandler responseHandler) { + this.settingsHandler = settingsHandler; + this.responseHandler = responseHandler; + } + + @Override + public void channelActive(ChannelHandlerContext ctx) throws Exception { + // The first request is HTTP/2 protocol upgrade (since we support only h2c there) + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(HttpHeaderNames.HOST, "localhost"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + + ctx.channel().attr(AttributeKey.newInstance("upgrade")).set(true); + ctx.writeAndFlush(request); + ctx.fireChannelActive(); + + ctx.pipeline().remove(this); + ctx.pipeline().addLast(settingsHandler); + ctx.pipeline().addLast(responseHandler); + } + } + + private static class Http2SettingsHandler extends SimpleChannelInboundHandler { + private ChannelPromise promise; + + Http2SettingsHandler(ChannelPromise promise) { + this.promise = promise; + } + + /** + * Wait for this handler to be added after the upgrade to HTTP/2, and for initial preface + * handshake to complete. + */ + void awaitSettings(long timeout, TimeUnit unit) throws Exception { + if (!promise.awaitUninterruptibly(timeout, unit)) { + throw new IllegalStateException("Timed out waiting for HTTP/2 settings"); + } + if (!promise.isSuccess()) { + throw new RuntimeException(promise.cause()); + } + } + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2Settings msg) throws Exception { + promise.setSuccess(); + ctx.pipeline().remove(this); + } + } + } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java index 029aed1f3cc89..cda66b8d828fa 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -109,7 +109,7 @@ public void testThatHttpPipeliningWorks() throws Exception { } } - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[] {})); try { Collection responseBodies = Netty4HttpClient.returnHttpResponseBodies(responses); @@ -163,9 +163,12 @@ private class CustomHttpChannelHandler extends Netty4HttpServerTransport.HttpCha @Override protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); - ch.pipeline().replace("handler", "handler", new PossiblySlowUpstreamHandler(executorService)); } + @Override + public ChannelHandler getRequestHandler() { + return new PossiblySlowUpstreamHandler(executorService); + } } class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler { diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java index ec879e538fe20..eb96f14f10c70 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java @@ -202,7 +202,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, ) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); request.headers().set(HttpHeaderNames.EXPECT, expectation); HttpUtil.setContentLength(request, contentLength); @@ -322,7 +322,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); @@ -384,7 +384,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); long numOfHugeAllocations = getHugeAllocationCount(); @@ -454,7 +454,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); // Test pre-flight request - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); @@ -471,7 +471,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th } // Test short-circuited request - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); request.headers().add(CorsHandler.ORIGIN, "google.com"); diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 86694b9bc9da7..af9485c991f0c 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -79,7 +79,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.1.0' + api 'org.apache.xmlbeans:xmlbeans:5.1.1' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 deleted file mode 100644 index 85f757b61048c..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3534ab896663e6f6d8a2cf46882d7407641d7a31 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 new file mode 100644 index 0000000000000..4d1d2ad0807e7 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 @@ -0,0 +1 @@ +48a369df0eccb509d46203104e4df9cb00f0f68b \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 227d7d1b68977..08cd32e80a7ca 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,9 +44,9 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.27.0' - api 'com.azure:azure-storage-common:12.16.0' - api 'com.azure:azure-core-http-netty:1.12.0' + api 'com.azure:azure-core:1.31.0' + api 'com.azure:azure-storage-common:12.18.0' + api 'com.azure:azure-core-http-netty:1.12.4' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" @@ -58,7 +58,7 @@ dependencies { api 'org.reactivestreams:reactive-streams:1.0.3' api 'io.projectreactor:reactor-core:3.4.18' api 'io.projectreactor.netty:reactor-netty:1.0.18' - api 'io.projectreactor.netty:reactor-netty-core:1.0.19' + api 'io.projectreactor.netty:reactor-netty-core:1.0.22' api 'io.projectreactor.netty:reactor-netty-http:1.0.18' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" @@ -137,7 +137,7 @@ thirdPartyAudit { 'javax.xml.bind.annotation.XmlAccessOrder', 'javax.xml.bind.annotation.XmlAccessType', 'javax.xml.bind.annotation.XmlAccessorOrder', - 'javax.xml.bind.annotation.XmlAccessorType', + 'javax.xml.bind.annotation.XmlAccessorType', 'javax.xml.bind.annotation.XmlAttribute', 'javax.xml.bind.annotation.XmlElement', 'javax.xml.bind.annotation.XmlElement$DEFAULT', diff --git a/plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 deleted file mode 100644 index 9206b697ca648..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -75a2db538d218e2bd3c2cbdf04c955b8f6db6626 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..6a5076b3da301 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 @@ -0,0 +1 @@ +39f18dae02237f90f1cd23b56701d7f9d9525531 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 deleted file mode 100644 index 1b5d162c004de..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4381e4e2801ee190ae76b61dbd992e94b40272e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 new file mode 100644 index 0000000000000..5cb180b20cf8b --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 @@ -0,0 +1 @@ +70dcc08887f2d70a8f812bf00d4fa10390fab3fd \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 deleted file mode 100644 index ebf328aa69ee8..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f652b89a30269bdff6644468632726d4ba4fbd1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 new file mode 100644 index 0000000000000..f824d6cdf4f18 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 @@ -0,0 +1 @@ +cb6fa5863f5cd8406934baec739285209165ef4b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 deleted file mode 100644 index 74df264a2b908..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adb58ba62d297b56d6b7915a50f048eddcfc81a6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 new file mode 100644 index 0000000000000..4c82e37d27043 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 @@ -0,0 +1 @@ +5c2a258ac71e525c65f2e3a0bcf458b6c79bbc16 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/release-notes/opensearch.release-notes-1.3.5.md b/release-notes/opensearch.release-notes-1.3.5.md new file mode 100644 index 0000000000000..fbf866bb6e112 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.5.md @@ -0,0 +1,9 @@ +## 2022-08-30 Version 1.3.5 Release Notes + +### Upgrades +* OpenJDK Update (July 2022 Patch releases) ([#4097](https://github.com/opensearch-project/OpenSearch/pull/4097)) +* Update Netty to 4.1.79.Final ([#3868](https://github.com/opensearch-project/OpenSearch/pull/3868)) + +### Bug Fixes +* OpenSearch crashes on closed client connection before search reply when total ops higher compared to expected ([#4143](https://github.com/opensearch-project/OpenSearch/pull/4143)) +* gradle check failing with java heap OutOfMemoryError ([#4150](https://github.com/opensearch-project/OpenSearch/pull/4150)) diff --git a/release-notes/opensearch.release-notes-2.2.1.md b/release-notes/opensearch.release-notes-2.2.1.md new file mode 100644 index 0000000000000..974ff8e09a426 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.2.1.md @@ -0,0 +1,7 @@ +## 2022-08-30 Version 2.2.1 Release Notes + +### Upgrades +* Update Gradle to 7.5.1 ([#4211](https://github.com/opensearch-project/OpenSearch/pull/4211)) + +### Bug Fixes +* gradle check failing with java heap OutOfMemoryError ([#4150](https://github.com/opensearch-project/OpenSearch/pull/4150)) diff --git a/release-notes/opensearch.release-notes-2.3.0.md b/release-notes/opensearch.release-notes-2.3.0.md new file mode 100644 index 0000000000000..1532ab31106f7 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.3.0.md @@ -0,0 +1,55 @@ +## 2022-09-08 Version 2.3.0 Release Notes + +### Features/Enhancements +* [Backport to 2.x] [Segment Replication] - Update replicas to commit SegmentInfos instead of relying on segments_N from primary shards. ([#4450](https://github.com/opensearch-project/opensearch/pull/4450)) +* [Segment Replication] [Backport] Fix timeout issue by calculating time needed to process getSegmentFiles. ([#4434](https://github.com/opensearch-project/opensearch/pull/4434)) +* [Semgnet Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test ([#4414](https://github.com/opensearch-project/opensearch/pull/4414)) ([#4425](https://github.com/opensearch-project/opensearch/pull/4425)) +* [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/opensearch/pull/4386)) ([#4424](https://github.com/opensearch-project/opensearch/pull/4424)) +* Segment Replication - Fix NoSuchFileException errors caused when computing metadata snapshot on primary shards. ([#4366](https://github.com/opensearch-project/opensearch/pull/4366)) ([#4422](https://github.com/opensearch-project/opensearch/pull/4422)) +* [Remote Store] Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/opensearch/pull/4253)) ([#4418](https://github.com/opensearch-project/opensearch/pull/4418)) +* [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/opensearch/pull/4363)) ([#4396](https://github.com/opensearch-project/opensearch/pull/4396)) +* [Segment Replication] Bump segment infos counter before commit during replica promotion ([#4365](https://github.com/opensearch-project/opensearch/pull/4365)) ([#4397](https://github.com/opensearch-project/opensearch/pull/4397)) +* Segment Replication - Implement segment replication event cancellation. ([#4225](https://github.com/opensearch-project/opensearch/pull/4225)) ([#4387](https://github.com/opensearch-project/opensearch/pull/4387)) +* [Backport 2.x] [Remote Store] Backport remote segment store changes ([#4380](https://github.com/opensearch-project/opensearch/pull/4380)) +* [Backport 2.x] Added timing data and more granular stages to SegmentReplicationState ([#4367](https://github.com/opensearch-project/opensearch/pull/4367)) +* [Backport 2.x] Support shard promotion with Segment Replication. ([#4135](https://github.com/opensearch-project/opensearch/pull/4135)) ([#4325](https://github.com/opensearch-project/opensearch/pull/4325)) +* [Segment Replication] Update PrimaryShardAllocator to prefer replicas with higher replication checkpoint ([#4041](https://github.com/opensearch-project/opensearch/pull/4041)) ([#4252](https://github.com/opensearch-project/opensearch/pull/4252)) +* [Backport 2.x] [Segment Replication] Backport all PR's containing remaining segment replication changes ([#4243](https://github.com/opensearch-project/opensearch/pull/4243)) +* [Backport 2.x] [Segment Replication] Backport PR's : #3525 #3533 #3540 #3943 #3963 From main branch ([#4181](https://github.com/opensearch-project/opensearch/pull/4181)) +* [Backport 2.x] [Segment Replication] Added source-side classes for orchestrating replication events. ([#4128](https://github.com/opensearch-project/opensearch/pull/4128)) + +### Bug Fixes +* [Bug]: gradle check failing with java heap OutOfMemoryError ([#4328](https://github.com/opensearch-project/opensearch/pull/4328)) ([#4442](https://github.com/opensearch-project/opensearch/pull/4442)) +* [Backport 2.x] Revert to Netty 4.1.79.Final ([#4432](https://github.com/opensearch-project/opensearch/pull/4432)) +* Bug fixes for dependabot changelog verifier ([#4364](https://github.com/opensearch-project/opensearch/pull/4364)) ([#4395](https://github.com/opensearch-project/opensearch/pull/4395)) +* [BUG] Create logs directory before running OpenSearch on Windows ([#4305](https://github.com/opensearch-project/opensearch/pull/4305)) ([#4335](https://github.com/opensearch-project/opensearch/pull/4335)) +* [BUG] Running "opensearch-service.bat start" and "opensearch-service.bat manager" ([#4289](https://github.com/opensearch-project/opensearch/pull/4289)) ([#4293](https://github.com/opensearch-project/opensearch/pull/4293)) +* [Backport 2.x] Do not fail replica shard due to primary closure ([#4309](https://github.com/opensearch-project/opensearch/pull/4309)) +* [Bug]: gradle check failing with java heap OutOfMemoryError ([#4150](https://github.com/opensearch-project/opensearch/pull/4150)) ([#4167](https://github.com/opensearch-project/opensearch/pull/4167)) +* OpenSearch crashes on closed client connection before search reply when total ops higher compared to expected ([#4143](https://github.com/opensearch-project/opensearch/pull/4143)) ([#4144](https://github.com/opensearch-project/opensearch/pull/4144)) + +### Infrastructure +* Add workflow for changelog verification ([#4085](https://github.com/opensearch-project/opensearch/pull/4085)) ([#4284](https://github.com/opensearch-project/opensearch/pull/4284)) +* Add 2.x version to CHANGELOG ([#4297](https://github.com/opensearch-project/opensearch/pull/4297)) ([#4303](https://github.com/opensearch-project/opensearch/pull/4303)) +* Update the head ref to changelog verifier ([#4296](https://github.com/opensearch-project/opensearch/pull/4296)) ([#4298](https://github.com/opensearch-project/opensearch/pull/4298)) +* Publish transport-netty4 module to central repository ([#4054](https://github.com/opensearch-project/opensearch/pull/4054)) ([#4078](https://github.com/opensearch-project/opensearch/pull/4078)) + +### Maintenance +* Add bwcVersion 1.3.6 to 2.x ([#4452](https://github.com/opensearch-project/opensearch/pull/4452)) +* [AUTO] [2.x] Added bwc version 2.2.2. ([#4385](https://github.com/opensearch-project/opensearch/pull/4385)) +* Update to Netty 4.1.80.Final ([#4359](https://github.com/opensearch-project/opensearch/pull/4359)) ([#4374](https://github.com/opensearch-project/opensearch/pull/4374)) +* Adding @dreamer-89 to Opensearch maintainers. ([#4342](https://github.com/opensearch-project/opensearch/pull/4342)) ([#4345](https://github.com/opensearch-project/opensearch/pull/4345)) +* [CVE] Update snakeyaml dependency ([#4341](https://github.com/opensearch-project/opensearch/pull/4341)) ([#4347](https://github.com/opensearch-project/opensearch/pull/4347)) +* Some dependency updates ([#4308](https://github.com/opensearch-project/opensearch/pull/4308)) ([#4311](https://github.com/opensearch-project/opensearch/pull/4311)) +* Added bwc version 2.2.1 ([#4193](https://github.com/opensearch-project/opensearch/pull/4193)) +* Update Gradle to 7.5.1 ([#4211](https://github.com/opensearch-project/opensearch/pull/4211)) ([#4213](https://github.com/opensearch-project/opensearch/pull/4213)) +* [Backport] Upgrade dependencies ([#4165](https://github.com/opensearch-project/opensearch/pull/4165)) +* Bumping 2.x to 2.3.0 ([#4098](https://github.com/opensearch-project/opensearch/pull/4098)) + +### Refactoring +* Refactored the src and test of GeoHashGrid and GeoTileGrid Aggregations on GeoPoint from server folder to geo module.([#4071](https://github.com/opensearch-project/opensearch/pull/4071)) ([#4072](https://github.com/opensearch-project/opensearch/pull/4072)) ([#4180](https://github.com/opensearch-project/opensearch/pull/4180)) ([#4281](https://github.com/opensearch-project/opensearch/pull/4281)) +* Update the head ref to changelog verifier ([#4296](https://github.com/opensearch-project/opensearch/pull/4296)) ([#4298](https://github.com/opensearch-project/opensearch/pull/4298)) +* [2.x] Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/opensearch/pull/4307)) ([#4324](https://github.com/opensearch-project/opensearch/pull/4324)) +* Refactored the src and test of GeoHashGrid and GeoTileGrid Aggregations on GeoPoint from server folder to geo module.([#4071](https://github.com/opensearch-project/opensearch/pull/4071)) ([#4072](https://github.com/opensearch-project/opensearch/pull/4072)) ([#4180](https://github.com/opensearch-project/opensearch/pull/4180)) ([#4281](https://github.com/opensearch-project/opensearch/pull/4281)) +* Refactors the GeoBoundsAggregation for geo_point types from the core server to the geo module. ([#4179](https://github.com/opensearch-project/opensearch/pull/4179)) +* Backporting multiple 2.* release notes from main to the 2.x branch ([#4154](https://github.com/opensearch-project/opensearch/pull/4154)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json new file mode 100644 index 0000000000000..19cb03aaf20c4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json @@ -0,0 +1,29 @@ +{ + "cluster.get_decommission_awareness": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "description": "Get details and status of decommissioned attribute" + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_cluster/decommission/awareness/_status", + "methods": [ + "GET" + ] + } + ] + }, + "params": { + "timeout": { + "type": "time", + "description": "Explicit operation timeout" + }, + "local": { + "type": "boolean", + "description": "Return local information, do not retrieve the state from master node (default: false)" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json new file mode 100644 index 0000000000000..815964e49e7a0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json @@ -0,0 +1,35 @@ +{ + "cluster.put_decommission_awareness": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "description": "Decommissions an awareness attribute" + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_cluster/decommission/awareness/{awareness_attribute_name}/{awareness_attribute_value}", + "methods": [ + "PUT" + ], + "parts": { + "awareness_attribute_name": { + "type": "string", + "description": "Awareness attribute name" + }, + "awareness_attribute_value": { + "type": "string", + "description": "Awareness attribute value" + } + } + } + ] + }, + "params": { + "timeout": { + "type": "time", + "description": "Explicit operation timeout" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json new file mode 100644 index 0000000000000..d3a2104c01bc0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json @@ -0,0 +1,44 @@ + +{ + "create_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Creates point in time context." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/{index}/_search/point_in_time", + "methods":[ + "POST" + ], + "parts":{ + "index":{ + "type":"list", + "description":"A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" + } + } + } + ] + }, + "params":{ + "allow_partial_pit_creation":{ + "type":"boolean", + "description":"Allow if point in time can be created with partial failures" + }, + "keep_alive":{ + "type":"string", + "description":"Specify the keep alive for point in time" + }, + "preference":{ + "type":"string", + "description":"Specify the node or shard the operation should be performed on (default: random)" + }, + "routing":{ + "type":"list", + "description":"A comma-separated list of specific routing values" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json new file mode 100644 index 0000000000000..5ff01aa746df9 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json @@ -0,0 +1,19 @@ +{ + "delete_all_pits":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes all active point in time searches." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time/_all", + "methods":[ + "DELETE" + ] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json new file mode 100644 index 0000000000000..b54d9f76204f4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json @@ -0,0 +1,23 @@ +{ + "delete_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes one or more point in time searches based on the IDs passed." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time", + "methods":[ + "DELETE" + ] + } + ] + }, + "body":{ + "description":"A comma-separated list of pit IDs to clear", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index aa4abc7a11eae..6ebe273d552cc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,11 +1,14 @@ --- "Help": - skip: - version: " - 7.99.99" - reason: shard path stats were added in 8.0.0 + version: " - 2.9.99" + reason: point in time stats were added in 3.0.0 + features: node_selector - do: cat.shards: help: true + node_selector: + version: "3.0.0 - " - match: $body: | @@ -67,6 +70,9 @@ search.scroll_current .+ \n search.scroll_time .+ \n search.scroll_total .+ \n + search.point_in_time_current .+ \n + search.point_in_time_time .+ \n + search.point_in_time_total .+ \n segments.count .+ \n segments.memory .+ \n segments.index_writer_memory .+ \n @@ -82,6 +88,92 @@ path.state .+ \n $/ --- +"Help before - 3.0.0": + - skip: + version: "3.0.0 - " + reason: point in time stats were added in 3.0.0 + features: node_selector + - do: + cat.shards: + help: true + node_selector: + version: " - 2.9.99" + + - match: + $body: | + /^ index .+ \n + shard .+ \n + prirep .+ \n + state .+ \n + docs .+ \n + store .+ \n + ip .+ \n + id .+ \n + node .+ \n + sync_id .+ \n + unassigned.reason .+ \n + unassigned.at .+ \n + unassigned.for .+ \n + unassigned.details .+ \n + recoverysource.type .+ \n + completion.size .+ \n + fielddata.memory_size .+ \n + fielddata.evictions .+ \n + query_cache.memory_size .+ \n + query_cache.evictions .+ \n + flush.total .+ \n + flush.total_time .+ \n + get.current .+ \n + get.time .+ \n + get.total .+ \n + get.exists_time .+ \n + get.exists_total .+ \n + get.missing_time .+ \n + get.missing_total .+ \n + indexing.delete_current .+ \n + indexing.delete_time .+ \n + indexing.delete_total .+ \n + indexing.index_current .+ \n + indexing.index_time .+ \n + indexing.index_total .+ \n + indexing.index_failed .+ \n + merges.current .+ \n + merges.current_docs .+ \n + merges.current_size .+ \n + merges.total .+ \n + merges.total_docs .+ \n + merges.total_size .+ \n + merges.total_time .+ \n + refresh.total .+ \n + refresh.time .+ \n + refresh.external_total .+ \n + refresh.external_time .+ \n + refresh.listeners .+ \n + search.fetch_current .+ \n + search.fetch_time .+ \n + search.fetch_total .+ \n + search.open_contexts .+ \n + search.query_current .+ \n + search.query_time .+ \n + search.query_total .+ \n + search.scroll_current .+ \n + search.scroll_time .+ \n + search.scroll_total .+ \n + segments.count .+ \n + segments.memory .+ \n + segments.index_writer_memory .+ \n + segments.version_map_memory .+ \n + segments.fixed_bitset_memory .+ \n + seq_no.max .+ \n + seq_no.local_checkpoint .+ \n + seq_no.global_checkpoint .+ \n + warmer.current .+ \n + warmer.total .+ \n + warmer.total_time .+ \n + path.data .+ \n + path.state .+ \n + $/ +--- "Test cat shards output": - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml new file mode 100644 index 0000000000000..2023bcc8f5c87 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml @@ -0,0 +1,130 @@ +"Create PIT, Search with PIT ID and Delete": + - skip: + version: " - 2.9.99" + reason: "mode to be introduced later than 3.0" + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {pit_id: pit_id} + - match: { _shards.failed: 0} + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + - do: + index: + index: test_pit + id: 44 + body: { foo: 3 } + + - do: + indices.refresh: {} + + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id", "keep_alive":"10m"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + + - do: + search: + rest_total_hits_as_int: true + index: test_pit + size: 1 + sort: foo + body: + query: + match_all: {} + + - match: {hits.total: 3 } + - length: {hits.hits: 1 } + + - do: + delete_pit: + body: + "pit_id": [$pit_id] + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.successful: true } + +--- +"Delete all": + - skip: + version: " - 2.9.99" + reason: "mode to be introduced later than 3.0" + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {pit_id: pit_id} + - match: { _shards.failed: 0} + + - do: + delete_all_pits: {} + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.successful: true } + + - do: + catch: missing + delete_all_pits: { } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 2e298441918bc..09278690f5d05 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -651,92 +651,6 @@ setup: } ] ---- -"Simple Composite aggregation with GeoTile grid": - - skip: - version: " - 7.4.99" - reason: geotile_grid is not supported until 7.5.0 - - do: - search: - rest_total_hits_as_int: true - index: test - body: - aggregations: - test: - composite: - sources: [ - "geo": { - "geotile_grid": { - "field": "geo_point", - "precision": 12 - } - }, - { - "kw": { - "terms": { - "field": "keyword" - } - } - } - ] - - - match: {hits.total: 6} - - length: { aggregations.test.buckets: 4 } - - match: { aggregations.test.buckets.0.key.geo: "12/730/1590" } - - match: { aggregations.test.buckets.0.key.kw: "foo" } - - match: { aggregations.test.buckets.0.doc_count: 1 } - - match: { aggregations.test.buckets.1.key.geo: "12/1236/1533" } - - match: { aggregations.test.buckets.1.key.kw: "bar" } - - match: { aggregations.test.buckets.1.doc_count: 2 } - - match: { aggregations.test.buckets.2.key.geo: "12/1236/1533" } - - match: { aggregations.test.buckets.2.key.kw: "foo" } - - match: { aggregations.test.buckets.2.doc_count: 1 } - - match: { aggregations.test.buckets.3.key.geo: "12/2048/0" } - - match: { aggregations.test.buckets.3.key.kw: "bar" } - - match: { aggregations.test.buckets.3.doc_count: 1 } - ---- -"Simple Composite aggregation with geotile grid add aggregate after": - - skip: - version: " - 7.4.99" - reason: geotile_grid is not supported until 7.5.0 - - do: - search: - index: test - body: - aggregations: - test: - composite: - sources: [ - "geo": { - "geotile_grid": { - "field": "geo_point", - "precision": 12 - } - }, - { - "kw": { - "terms": { - "field": "keyword" - } - } - } - ] - after: { "geo": "12/730/1590", "kw": "foo" } - - - match: { hits.total.value: 6 } - - match: { hits.total.relation: "eq" } - - length: { aggregations.test.buckets: 3 } - - match: { aggregations.test.buckets.0.key.geo: "12/1236/1533" } - - match: { aggregations.test.buckets.0.key.kw: "bar" } - - match: { aggregations.test.buckets.0.doc_count: 2 } - - match: { aggregations.test.buckets.1.key.geo: "12/1236/1533" } - - match: { aggregations.test.buckets.1.key.kw: "foo" } - - match: { aggregations.test.buckets.1.doc_count: 1 } - - match: { aggregations.test.buckets.2.key.geo: "12/2048/0" } - - match: { aggregations.test.buckets.2.key.kw: "bar" } - - match: { aggregations.test.buckets.2.doc_count: 1 } - --- "Mixed ip and unmapped fields": - skip: diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index ee2067c591cef..960e17b76acb5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -40,6 +40,7 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.BytesRef; +import org.hamcrest.MatcherAssert; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -48,6 +49,7 @@ import org.opensearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -108,6 +110,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; @@ -698,6 +701,104 @@ public void testReplicaCorruption() throws Exception { ensureGreen(TimeValue.timeValueSeconds(60)); } + public void testPrimaryCorruptionDuringReplicationDoesNotFailReplicaShard() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + final NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get(); + final List dataNodeStats = nodeStats.getNodes() + .stream() + .filter(stat -> stat.getNode().isDataNode()) + .collect(Collectors.toUnmodifiableList()); + MatcherAssert.assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); + + final NodeStats primaryNode = dataNodeStats.get(0); + final NodeStats replicaNode = dataNodeStats.get(1); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put("index.routing.allocation.include._name", primaryNode.getNode().getName()) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) + .put("index.allocation.max_retries", Integer.MAX_VALUE) // keep on retrying + + ) + ); + ensureGreen(); + + // Add custom send behavior between primary and replica that will + // count down a latch to indicate that a replication operation is + // currently in flight, and then block on a second latch that will + // be released once the primary shard has been corrupted. + final CountDownLatch indexingInFlight = new CountDownLatch(1); + final CountDownLatch corruptionHasHappened = new CountDownLatch(1); + final MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode.getNode().getName() + )); + mockTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode.getNode().getName()), + (connection, requestId, action, request, options) -> { + if (request instanceof TransportReplicationAction.ConcreteShardRequest) { + indexingInFlight.countDown(); + try { + corruptionHasHappened.await(); + } catch (InterruptedException e) { + logger.info("Interrupted while waiting for corruption"); + } + } + connection.sendRequest(requestId, action, request, options); + } + ); + + // Configure the modified data node as a replica + final Settings build = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") + .put("index.routing.allocation.include._name", primaryNode.getNode().getName() + "," + replicaNode.getNode().getName()) + .build(); + client().admin().indices().prepareUpdateSettings("test").setSettings(build).get(); + client().admin().cluster().prepareReroute().get(); + ensureGreen(); + + // Create a snapshot repository. This repo is used to take a snapshot after + // corrupting a file, which causes the node to notice the corrupt data and + // close the shard. + assertAcked( + client().admin() + .cluster() + .preparePutRepository("test-repo") + .setType("fs") + .setSettings( + Settings.builder() + .put("location", randomRepoPath().toAbsolutePath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + ) + ); + + client().prepareIndex("test").setSource("field", "value").execute(); + indexingInFlight.await(); + + // Corrupt a file on the primary then take a snapshot. Snapshot should + // finish in the PARTIAL state since the corrupted file will cause a checksum + // validation failure. + final ShardRouting corruptedShardRouting = corruptRandomPrimaryFile(); + logger.info("--> {} corrupted", corruptedShardRouting); + final CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test") + .get(); + final SnapshotState snapshotState = createSnapshotResponse.getSnapshotInfo().state(); + MatcherAssert.assertThat("Expect file corruption to cause PARTIAL snapshot state", snapshotState, equalTo(SnapshotState.PARTIAL)); + + // Unblock the blocked indexing thread now that corruption on the primary has been confirmed + corruptionHasHappened.countDown(); + + // Assert the cluster returns to green status because the replica will be promoted to primary + ensureGreen(); + } + private int numShards(String... index) { ClusterState state = client().admin().cluster().prepareState().get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(index, false); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 8566cc5556861..9b2ab753832d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -9,13 +9,14 @@ package org.opensearch.indices.replication; import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.lucene.index.SegmentInfos; import org.junit.BeforeClass; import org.opensearch.action.admin.indices.segments.IndexShardSegments; import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.admin.indices.segments.ShardSegments; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -31,23 +32,31 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.plugins.Plugin; import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Collection; import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchHits; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationIT extends OpenSearchIntegTestCase { @@ -61,6 +70,11 @@ public static void assumeFeatureFlag() { assumeTrue("Segment replication Feature flag is enabled", Boolean.parseBoolean(System.getProperty(FeatureFlags.REPLICATION_TYPE))); } + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + @Override public Settings indexSettings() { return Settings.builder() @@ -314,6 +328,65 @@ public void testReplicationAfterForceMerge() throws Exception { } } + public void testCancellation() throws Exception { + final String primaryNode = internalCluster().startNode(); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); + ensureYellow(INDEX_NAME); + + final String replicaNode = internalCluster().startNode(); + + final SegmentReplicationSourceService segmentReplicationSourceService = internalCluster().getInstance( + SegmentReplicationSourceService.class, + primaryNode + ); + final IndexShard primaryShard = getIndexShard(primaryNode); + + CountDownLatch latch = new CountDownLatch(1); + + MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + mockTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK)) { + FileChunkRequest req = (FileChunkRequest) request; + logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk()); + if (req.name().endsWith("cfs") && req.lastChunk()) { + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + connection.sendRequest(requestId, action, request, options); + } + ); + + final int docCount = scaledRandomIntBetween(0, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(docCount); + waitForDocs(docCount, indexer); + + flush(INDEX_NAME); + } + segmentReplicationSourceService.beforeIndexShardClosed(primaryShard.shardId(), primaryShard, indexSettings()); + latch.countDown(); + assertDocCounts(docCount, primaryNode); + } + public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { final String primaryNode = internalCluster().startNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); @@ -419,6 +492,60 @@ public void testDeleteOperations() throws Exception { } } + public void testUpdateOperations() throws Exception { + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellow(INDEX_NAME); + final String replica = internalCluster().startNode(); + + final int initialDocCount = scaledRandomIntBetween(0, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForReplicaUpdate(); + + // wait a short amount of time to give replication a chance to complete. + assertHitCount(client(primary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + + final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + waitForReplicaUpdate(); + + assertHitCount(client(primary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + + Set ids = indexer.getIds(); + String id = ids.toArray()[0].toString(); + UpdateResponse updateResponse = client(primary).prepareUpdate(INDEX_NAME, id) + .setDoc(Requests.INDEX_CONTENT_TYPE, "foo", "baz") + .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) + .get(); + assertFalse("request shouldn't have forced a refresh", updateResponse.forcedRefresh()); + assertEquals(2, updateResponse.getVersion()); + + refresh(INDEX_NAME); + waitForReplicaUpdate(); + + assertSearchHits(client(primary).prepareSearch(INDEX_NAME).setQuery(matchQuery("foo", "baz")).get(), id); + assertSearchHits(client(replica).prepareSearch(INDEX_NAME).setQuery(matchQuery("foo", "baz")).get(), id); + + } + } + private void assertSegmentStats(int numberOfReplicas) throws IOException { final IndicesSegmentResponse indicesSegmentResponse = client().admin().indices().segments(new IndicesSegmentsRequest()).actionGet(); @@ -458,13 +585,56 @@ private void assertSegmentStats(int numberOfReplicas) throws IOException { ClusterState state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); final DiscoveryNode replicaNode = state.nodes().resolveNode(replicaShardRouting.currentNodeId()); IndexShard indexShard = getIndexShard(replicaNode.getName()); - final String lastCommitSegmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(indexShard.store().directory()); // calls to readCommit will fail if a valid commit point and all its segments are not in the store. - SegmentInfos.readCommit(indexShard.store().directory(), lastCommitSegmentsFileName); + indexShard.store().readLastCommittedSegmentsInfo(); } } } + public void testDropPrimaryDuringReplication() throws Exception { + final Settings settings = Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 6) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startDataOnlyNode(Settings.EMPTY); + createIndex(INDEX_NAME, settings); + internalCluster().startDataOnlyNodes(6); + ensureGreen(INDEX_NAME); + + int initialDocCount = scaledRandomIntBetween(100, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + // don't wait for replication to complete, stop the primary immediately. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellow(INDEX_NAME); + + // start another replica. + internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + // index another doc and refresh - without this the new replica won't catch up. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").get(); + + flushAndRefresh(INDEX_NAME); + waitForReplicaUpdate(); + assertSegmentStats(6); + } + } + /** * Waits until the replica is caught up to the latest primary segments gen. * @throws Exception if assertion fails @@ -483,10 +653,12 @@ private void waitForReplicaUpdate() throws Exception { final List replicaShardSegments = segmentListMap.get(false); // if we don't have any segments yet, proceed. final ShardSegments primaryShardSegments = primaryShardSegmentsList.stream().findFirst().get(); + logger.debug("Primary Segments: {}", primaryShardSegments.getSegments()); if (primaryShardSegments.getSegments().isEmpty() == false) { final Map latestPrimarySegments = getLatestSegments(primaryShardSegments); final Long latestPrimaryGen = latestPrimarySegments.values().stream().findFirst().map(Segment::getGeneration).get(); for (ShardSegments shardSegments : replicaShardSegments) { + logger.debug("Replica {} Segments: {}", shardSegments.getShardRouting(), shardSegments.getSegments()); final boolean isReplicaCaughtUpToPrimary = shardSegments.getSegments() .stream() .anyMatch(segment -> segment.getGeneration() == latestPrimaryGen); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java index 7352dc7170a21..faa6a54394b00 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java @@ -37,7 +37,6 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.filter.Filter; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -51,8 +50,6 @@ import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.filter; -import static org.opensearch.search.aggregations.AggregationBuilders.geohashGrid; -import static org.opensearch.search.aggregations.AggregationBuilders.geotileGrid; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.ipRange; @@ -338,36 +335,4 @@ public void testDateHistogram() throws Exception { } - public void testGeoHashGrid() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - geohashGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertSearchResponse(response); - - GeoGrid grid = response.getAggregations().get("grid"); - Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); - } - - public void testGeoTileGrid() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - geotileGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertSearchResponse(response); - - GeoGrid grid = response.getAggregations().get("grid"); - Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); - } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java index 7cd8b3ed39051..ffc31b7cdb7c4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java @@ -35,15 +35,11 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; import org.opensearch.search.aggregations.InternalAggregation; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.test.OpenSearchIntegTestCase; -import java.util.List; - import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; -import static org.opensearch.search.aggregations.AggregationBuilders.geohashGrid; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.closeTo; @@ -168,33 +164,4 @@ public void testMultiValuedField() throws Exception { assertThat(centroid.lon(), closeTo(multiCentroid.lon(), GEOHASH_TOLERANCE)); assertEquals(2 * numDocs, geoCentroid.count()); } - - public void testSingleValueFieldAsSubAggToGeohashGrid() throws Exception { - SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) - .addAggregation( - geohashGrid("geoGrid").field(SINGLE_VALUED_FIELD_NAME).subAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) - ) - .get(); - assertSearchResponse(response); - - GeoGrid grid = response.getAggregations().get("geoGrid"); - assertThat(grid, notNullValue()); - assertThat(grid.getName(), equalTo("geoGrid")); - List buckets = grid.getBuckets(); - for (GeoGrid.Bucket cell : buckets) { - String geohash = cell.getKeyAsString(); - GeoPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash); - GeoCentroid centroidAgg = cell.getAggregations().get(aggName); - assertThat( - "Geohash " + geohash + " has wrong centroid latitude ", - expectedCentroid.lat(), - closeTo(centroidAgg.centroid().lat(), GEOHASH_TOLERANCE) - ); - assertThat( - "Geohash " + geohash + " has wrong centroid longitude", - expectedCentroid.lon(), - closeTo(centroidAgg.centroid().lon(), GEOHASH_TOLERANCE) - ); - } - } } diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 4ebcd9622ce38..34d7509c7afb2 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -68,6 +68,7 @@ import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.Version.V_2_1_0; +import static org.opensearch.Version.V_3_0_0; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE; import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.common.xcontent.XContentParserUtils.ensureFieldName; @@ -1601,6 +1602,24 @@ private enum OpenSearchExceptionHandle { org.opensearch.indices.replication.common.ReplicationFailedException::new, 161, V_2_1_0 + ), + PRIMARY_SHARD_CLOSED_EXCEPTION( + org.opensearch.index.shard.PrimaryShardClosedException.class, + org.opensearch.index.shard.PrimaryShardClosedException::new, + 162, + V_3_0_0 + ), + DECOMMISSIONING_FAILED_EXCEPTION( + org.opensearch.cluster.decommission.DecommissioningFailedException.class, + org.opensearch.cluster.decommission.DecommissioningFailedException::new, + 163, + V_3_0_0 + ), + NODE_DECOMMISSIONED_EXCEPTION( + org.opensearch.cluster.decommission.NodeDecommissionedException.class, + org.opensearch.cluster.decommission.NodeDecommissionedException::new, + 164, + V_3_0_0 ); final Class exceptionClass; diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index ba512d3fbcdd9..978f0ee2186f2 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -96,7 +96,9 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_1_1 = new Version(2010199, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_2_0 = new Version(2020099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 052d2ec2b5764..0ca5c1b0fc398 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -40,6 +40,12 @@ import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.TransportGetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.TransportDecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.TransportDeleteDecommissionAction; import org.opensearch.action.admin.cluster.health.ClusterHealthAction; import org.opensearch.action.admin.cluster.health.TransportClusterHealthAction; import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction; @@ -165,7 +171,9 @@ import org.opensearch.action.admin.indices.rollover.RolloverAction; import org.opensearch.action.admin.indices.rollover.TransportRolloverAction; import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; import org.opensearch.action.admin.indices.segments.TransportIndicesSegmentsAction; +import org.opensearch.action.admin.indices.segments.TransportPitSegmentsAction; import org.opensearch.action.admin.indices.settings.get.GetSettingsAction; import org.opensearch.action.admin.indices.settings.get.TransportGetSettingsAction; import org.opensearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; @@ -238,12 +246,14 @@ import org.opensearch.action.search.DeletePitAction; import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.MultiSearchAction; +import org.opensearch.action.search.NodesGetAllPitsAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; import org.opensearch.action.search.TransportCreatePitAction; import org.opensearch.action.search.TransportDeletePitAction; import org.opensearch.action.search.TransportGetAllPitsAction; +import org.opensearch.action.search.TransportNodesGetAllPitsAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -298,9 +308,11 @@ import org.opensearch.rest.action.admin.cluster.RestClusterStatsAction; import org.opensearch.rest.action.admin.cluster.RestClusterUpdateSettingsAction; import org.opensearch.rest.action.admin.cluster.RestCreateSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteDecommissionAction; import org.opensearch.rest.action.admin.cluster.RestDeleteRepositoryAction; import org.opensearch.rest.action.admin.cluster.RestDeleteSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestDeleteStoredScriptAction; +import org.opensearch.rest.action.admin.cluster.RestGetDecommissionStateAction; import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.opensearch.rest.action.admin.cluster.RestGetScriptContextAction; import org.opensearch.rest.action.admin.cluster.RestGetScriptLanguageAction; @@ -313,6 +325,7 @@ import org.opensearch.rest.action.admin.cluster.RestNodesStatsAction; import org.opensearch.rest.action.admin.cluster.RestNodesUsageAction; import org.opensearch.rest.action.admin.cluster.RestPendingClusterTasksAction; +import org.opensearch.rest.action.admin.cluster.RestDecommissionAction; import org.opensearch.rest.action.admin.cluster.RestPutRepositoryAction; import org.opensearch.rest.action.admin.cluster.RestPutStoredScriptAction; import org.opensearch.rest.action.admin.cluster.RestReloadSecureSettingsAction; @@ -406,6 +419,8 @@ import org.opensearch.rest.action.ingest.RestSimulatePipelineAction; import org.opensearch.rest.action.search.RestClearScrollAction; import org.opensearch.rest.action.search.RestCountAction; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.rest.action.search.RestDeletePitAction; import org.opensearch.rest.action.search.RestExplainAction; import org.opensearch.rest.action.search.RestMultiSearchAction; import org.opensearch.rest.action.search.RestSearchAction; @@ -671,10 +686,17 @@ public void reg actions.register(CreatePitAction.INSTANCE, TransportCreatePitAction.class); actions.register(GetAllPitsAction.INSTANCE, TransportGetAllPitsAction.class); actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); + actions.register(PitSegmentsAction.INSTANCE, TransportPitSegmentsAction.class); + actions.register(NodesGetAllPitsAction.INSTANCE, TransportNodesGetAllPitsAction.class); // Remote Store actions.register(RestoreRemoteStoreAction.INSTANCE, TransportRestoreRemoteStoreAction.class); + // Decommission actions + actions.register(DecommissionAction.INSTANCE, TransportDecommissionAction.class); + actions.register(GetDecommissionStateAction.INSTANCE, TransportGetDecommissionStateAction.class); + actions.register(DeleteDecommissionAction.INSTANCE, TransportDeleteDecommissionAction.class); + return unmodifiableMap(actions.getRegistry()); } @@ -846,6 +868,11 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRepositoriesAction()); registerHandler.accept(new RestSnapshotAction()); registerHandler.accept(new RestTemplatesAction()); + + // Point in time API + registerHandler.accept(new RestCreatePitAction()); + registerHandler.accept(new RestDeletePitAction()); + for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( settings, @@ -860,6 +887,9 @@ public void initRestHandlers(Supplier nodesInCluster) { } } registerHandler.accept(new RestCatAction(catActions)); + registerHandler.accept(new RestDecommissionAction()); + registerHandler.accept(new RestGetDecommissionStateAction()); + registerHandler.accept(new RestDeleteDecommissionAction()); // Remote Store APIs if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index a2a77a1316898..739bfaf2a3fb1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -157,7 +157,7 @@ Set resolveVotingConfigExclusions(ClusterState currentSta } else { assert nodeNames.length >= 1; Map existingNodes = StreamSupport.stream(allNodes.spliterator(), false) - .collect(Collectors.toMap(DiscoveryNode::getName, Function.identity())); + .collect(Collectors.toMap(DiscoveryNode::getName, Function.identity(), (r1, r2) -> r1)); for (String nodeName : nodeNames) { if (existingNodes.containsKey(nodeName)) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionAction.java new file mode 100644 index 0000000000000..4bb276197a912 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionAction.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.ActionType; + +public class DeleteDecommissionAction extends ActionType { + public static final DeleteDecommissionAction INSTANCE = new DeleteDecommissionAction(); + public static final String NAME = "cluster:admin/decommission/awareness/delete"; + + private DeleteDecommissionAction() { + super(NAME, DeleteDecommissionResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionRequest.java new file mode 100644 index 0000000000000..8804895d9a6a1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionRequest.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class DeleteDecommissionRequest extends ClusterManagerNodeRequest { + + public DeleteDecommissionRequest() {} + + public DeleteDecommissionRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + + @Override + public String toString() { + return "DeleteDecommissionRequest"; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionRequestBuilder.java new file mode 100644 index 0000000000000..50e282450e820 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionRequestBuilder.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; + +public class DeleteDecommissionRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< + DeleteDecommissionRequest, + DeleteDecommissionResponse, + DeleteDecommissionRequestBuilder> { + + public DeleteDecommissionRequestBuilder(OpenSearchClient client, DeleteDecommissionAction action) { + super(client, action, new DeleteDecommissionRequest()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionResponse.java new file mode 100644 index 0000000000000..a523a025b51f6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionResponse.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContentObject; + +import java.io.IOException; + +public class DeleteDecommissionResponse extends AcknowledgedResponse implements ToXContentObject { + + DeleteDecommissionResponse(StreamInput in) throws IOException { + super(in); + } + + DeleteDecommissionResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java new file mode 100644 index 0000000000000..8db770afc720c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionAction.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.decommission.DecommissionService; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +public class TransportDeleteDecommissionAction extends TransportClusterManagerNodeAction< + DeleteDecommissionRequest, + DeleteDecommissionResponse> { + + private static final Logger logger = LogManager.getLogger(TransportDeleteDecommissionAction.class); + + DecommissionService decommissionService; + + @Inject + public TransportDeleteDecommissionAction( + TransportService transportService, + ClusterService clusterService, + DecommissionService decommissionService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + DeleteDecommissionAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteDecommissionRequest::new, + indexNameExpressionResolver + ); + this.decommissionService = decommissionService; + + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected DeleteDecommissionResponse read(StreamInput in) throws IOException { + return new DeleteDecommissionResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteDecommissionRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void masterOperation( + DeleteDecommissionRequest request, + ClusterState state, + ActionListener listener + ) { + // TODO: Enable when service class change is merged + logger.info("Received delete decommission Request"); + decommissionService.clearDecommissionStatus(new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + listener.onResponse(new DeleteDecommissionResponse(true)); + } + + @Override + public void onFailure(Exception e) { + logger.error("Recommission failed with exception " + e.getMessage()); + listener.onFailure(e); + } + }); + + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateAction.java new file mode 100644 index 0000000000000..72fd1a26cb860 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateAction.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.action.ActionType; + +/** + * Get decommission action + * + * @opensearch.internal + */ +public class GetDecommissionStateAction extends ActionType { + + public static final GetDecommissionStateAction INSTANCE = new GetDecommissionStateAction(); + public static final String NAME = "cluster:admin/decommission/awareness/get"; + + private GetDecommissionStateAction() { + super(NAME, GetDecommissionStateResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java new file mode 100644 index 0000000000000..90150c71bf3f2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Get Decommissioned attribute request + * + * @opensearch.internal + */ +public class GetDecommissionStateRequest extends ClusterManagerNodeReadRequest { + + public GetDecommissionStateRequest() {} + + public GetDecommissionStateRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java new file mode 100644 index 0000000000000..2b8616d0511cd --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; + +/** + * Get decommission request builder + * + * @opensearch.internal + */ +public class GetDecommissionStateRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< + GetDecommissionStateRequest, + GetDecommissionStateResponse, + GetDecommissionStateRequestBuilder> { + + /** + * Creates new get decommissioned attributes request builder + */ + public GetDecommissionStateRequestBuilder(OpenSearchClient client, GetDecommissionStateAction action) { + super(client, action, new GetDecommissionStateRequest()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java new file mode 100644 index 0000000000000..f7b7dc5e09b0b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.ActionResponse; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * Response for decommission status + * + * @opensearch.internal + */ +public class GetDecommissionStateResponse extends ActionResponse implements ToXContentObject { + + private final DecommissionAttribute decommissionedAttribute; + private final DecommissionStatus status; + + GetDecommissionStateResponse() { + this(null, null); + } + + GetDecommissionStateResponse(DecommissionAttribute decommissionedAttribute, DecommissionStatus status) { + this.decommissionedAttribute = decommissionedAttribute; + this.status = status; + } + + GetDecommissionStateResponse(StreamInput in) throws IOException { + if (in.readBoolean()) { + this.decommissionedAttribute = new DecommissionAttribute(in); + } else { + this.decommissionedAttribute = null; + } + if (in.readBoolean()) { + this.status = DecommissionStatus.fromString(in.readString()); + } else { + this.status = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + boolean isNotNullDecommissionAttribute = this.decommissionedAttribute != null; + boolean isNotNullStatus = this.status != null; + out.writeBoolean(isNotNullDecommissionAttribute); + if (isNotNullDecommissionAttribute) { + decommissionedAttribute.writeTo(out); + } + out.writeBoolean(isNotNullStatus); + if (isNotNullStatus) { + out.writeString(status.status()); + } + } + + public DecommissionAttribute getDecommissionedAttribute() { + return decommissionedAttribute; + } + + public DecommissionStatus getDecommissionStatus() { + return status; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("awareness"); + if (decommissionedAttribute != null) { + builder.field(decommissionedAttribute.attributeName(), decommissionedAttribute.attributeValue()); + } + builder.endObject(); + if (status != null) { + builder.field("status", status); + } + builder.endObject(); + return builder; + } + + public static GetDecommissionStateResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + String attributeType = "awareness"; + XContentParser.Token token; + DecommissionAttribute decommissionAttribute = null; + DecommissionStatus status = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if (attributeType.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new OpenSearchParseException( + "failed to parse decommission attribute type [{}], expected object", + attributeType + ); + } + token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String fieldName = parser.currentName(); + String value; + token = parser.nextToken(); + if (token == XContentParser.Token.VALUE_STRING) { + value = parser.text(); + } else { + throw new OpenSearchParseException( + "failed to parse attribute [{}], expected string for attribute value", + fieldName + ); + } + decommissionAttribute = new DecommissionAttribute(fieldName, value); + token = parser.nextToken(); + } else { + throw new OpenSearchParseException("failed to parse attribute type [{}], unexpected type", attributeType); + } + } else { + throw new OpenSearchParseException("failed to parse attribute type [{}]", attributeType); + } + } else if ("status".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException( + "failed to parse status of decommissioning, expected string but found unknown type" + ); + } + status = DecommissionStatus.fromString(parser.text()); + } else { + throw new OpenSearchParseException( + "unknown field found [{}], failed to parse the decommission attribute", + currentFieldName + ); + } + } + } + return new GetDecommissionStateResponse(decommissionAttribute, status); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetDecommissionStateResponse that = (GetDecommissionStateResponse) o; + return decommissionedAttribute.equals(that.decommissionedAttribute) && status == that.status; + } + + @Override + public int hashCode() { + return Objects.hash(decommissionedAttribute, status); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java new file mode 100644 index 0000000000000..db8af85e37c07 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for getting decommission status + * + * @opensearch.internal + */ +public class TransportGetDecommissionStateAction extends TransportClusterManagerNodeReadAction< + GetDecommissionStateRequest, + GetDecommissionStateResponse> { + + @Inject + public TransportGetDecommissionStateAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + GetDecommissionStateAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + GetDecommissionStateRequest::new, + indexNameExpressionResolver + ); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected GetDecommissionStateResponse read(StreamInput in) throws IOException { + return new GetDecommissionStateResponse(in); + } + + @Override + protected void clusterManagerOperation( + GetDecommissionStateRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + Metadata metadata = state.metadata(); + DecommissionAttributeMetadata decommissionedAttributes = metadata.custom(DecommissionAttributeMetadata.TYPE); + if (decommissionedAttributes!=null) { + listener.onResponse(new GetDecommissionStateResponse(decommissionedAttributes.decommissionAttribute(), + decommissionedAttributes.status())); + } + else { + listener.onResponse(new GetDecommissionStateResponse()); + } + } + + @Override + protected ClusterBlockException checkBlock(GetDecommissionStateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/package-info.java new file mode 100644 index 0000000000000..5b88e91cf4f9d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handlers for getting status of decommission request */ +package org.opensearch.action.admin.cluster.decommission.awareness.get; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/package-info.java new file mode 100644 index 0000000000000..e1260e638c91d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Decommission transport handlers. */ +package org.opensearch.action.admin.cluster.decommission.awareness; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionAction.java new file mode 100644 index 0000000000000..56678650f6e35 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.action.ActionType; + +/** + * Register decommission action + * + * @opensearch.internal + */ +public final class DecommissionAction extends ActionType { + public static final DecommissionAction INSTANCE = new DecommissionAction(); + public static final String NAME = "cluster:admin/decommission/awareness/put"; + + private DecommissionAction() { + super(NAME, DecommissionResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java new file mode 100644 index 0000000000000..f835f9368c41e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Register decommission request. + *

+ * Registers a decommission request with decommission attribute and timeout + * + * @opensearch.internal + */ +public class DecommissionRequest extends ClusterManagerNodeRequest { + + private DecommissionAttribute decommissionAttribute; + private TimeValue timeout; + + public DecommissionRequest() {} + + public DecommissionRequest(DecommissionAttribute decommissionAttribute, TimeValue timeout) { + this.decommissionAttribute = decommissionAttribute; + this.timeout = timeout; + } + + public DecommissionRequest(StreamInput in) throws IOException { + super(in); + decommissionAttribute = new DecommissionAttribute(in); + timeout = in.readTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + decommissionAttribute.writeTo(out); + out.writeTimeValue(timeout); + } + + /** + * Sets decommission attribute for decommission request + * + * @param decommissionAttribute attribute key-value that needs to be decommissioned + * @return this request + */ + public DecommissionRequest setDecommissionAttribute(DecommissionAttribute decommissionAttribute) { + this.decommissionAttribute = decommissionAttribute; + return this; + } + + /** + * Sets the timeout for the request + * + * @param timeout time out for the request + * @return this request + */ + public DecommissionRequest setTimeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + /** + * @return Returns the decommission attribute key-value + */ + public DecommissionAttribute getDecommissionAttribute() { + return this.decommissionAttribute; + } + + public TimeValue getTimeout() { + return this.timeout; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (decommissionAttribute.attributeName() == null || decommissionAttribute.attributeName().isEmpty()) { + validationException = addValidationError("attribute name is missing", validationException); + } + if (decommissionAttribute.attributeValue() == null || decommissionAttribute.attributeValue().isEmpty()) { + validationException = addValidationError("attribute value is missing", validationException); + } + return validationException; + } + + @Override + public String toString() { + return "DecommissionRequest{" + "decommissionAttribute=" + decommissionAttribute + ", timeout=" + timeout + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java new file mode 100644 index 0000000000000..2a9de91056785 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.unit.TimeValue; + +/** + * Register decommission request builder + * + * @opensearch.internal + */ +public class DecommissionRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< + DecommissionRequest, + DecommissionResponse, + DecommissionRequestBuilder> { + + public DecommissionRequestBuilder(OpenSearchClient client, ActionType action, DecommissionRequest request) { + super(client, action, request); + } + + /** + * @param decommissionAttribute decommission attribute + * @return current object + */ + public DecommissionRequestBuilder setDecommissionedAttribute(DecommissionAttribute decommissionAttribute) { + request.setDecommissionAttribute(decommissionAttribute); + return this; + } + + /** + * @param timeout time out for the request + * @return current object + */ + public DecommissionRequestBuilder setTimeout(TimeValue timeout) { + request.setTimeout(timeout); + return this; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java new file mode 100644 index 0000000000000..499f403c8cd64 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContentObject; + +import java.io.IOException; + +/** + * Response for decommission request + * + * @opensearch.internal + */ +public class DecommissionResponse extends AcknowledgedResponse implements ToXContentObject { + + public DecommissionResponse(StreamInput in) throws IOException { + super(in); + } + + public DecommissionResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java new file mode 100644 index 0000000000000..4ed90e4fb13f9 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.decommission.DecommissionService; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for registering decommission + * + * @opensearch.internal + */ +public class TransportDecommissionAction extends TransportClusterManagerNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportDecommissionAction.class); + private DecommissionService decommissionService; + + @Inject + public TransportDecommissionAction( + TransportService transportService, + ClusterService clusterService, + DecommissionService decommissionService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + DecommissionAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DecommissionRequest::new, + indexNameExpressionResolver + ); + this.decommissionService = decommissionService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected DecommissionResponse read(StreamInput in) throws IOException { + return new DecommissionResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DecommissionRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation(DecommissionRequest request, ClusterState state, ActionListener listener) + throws Exception { + logger.info("initiating awareness attribute [{}] decommissioning", request.getDecommissionAttribute().toString()); + decommissionService.startDecommissionAction(request.getDecommissionAttribute(), listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/package-info.java new file mode 100644 index 0000000000000..c361f4b95a484 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handlers for putting a new decommission request */ +package org.opensearch.action.admin.cluster.decommission.awareness.put; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java index ee0b204c77aa3..9a7fae9f84a98 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; +import org.opensearch.action.support.master.info.ClusterInfoRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.ArrayUtils; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java index ed106c44ea36a..3019191e5570e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.get; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder; +import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java index 1fd9323edd2f8..2c9bec8398b66 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; +import org.opensearch.action.support.master.info.ClusterInfoRequest; import org.opensearch.common.io.stream.StreamInput; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 0a6d7cac79133..85bf8c2ffd9c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.mapping.get; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder; +import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsAction.java new file mode 100644 index 0000000000000..b52ef32a91b16 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsAction.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.segments; + +import org.opensearch.action.ActionType; + +/** + * Action for retrieving segment information for PITs + */ +public class PitSegmentsAction extends ActionType { + + public static final PitSegmentsAction INSTANCE = new PitSegmentsAction(); + public static final String NAME = "indices:monitor/point_in_time/segments"; + + private PitSegmentsAction() { + super(NAME, IndicesSegmentResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java new file mode 100644 index 0000000000000..84f5e5ad6a1e8 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.segments; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Transport request for retrieving PITs segment information + */ +public class PitSegmentsRequest extends BroadcastRequest { + private boolean verbose = false; + private final List pitIds = new ArrayList<>(); + + public PitSegmentsRequest() { + this(Strings.EMPTY_ARRAY); + } + + public PitSegmentsRequest(StreamInput in) throws IOException { + super(in); + pitIds.addAll(Arrays.asList(in.readStringArray())); + verbose = in.readBoolean(); + } + + public PitSegmentsRequest(String... pitIds) { + super(pitIds); + this.pitIds.addAll(Arrays.asList(pitIds)); + } + + /** + * true if detailed information about each segment should be returned, + * false otherwise. + */ + public boolean isVerbose() { + return verbose; + } + + /** + * Sets the verbose option. + * @see #isVerbose() + */ + public void setVerbose(boolean v) { + verbose = v; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArrayNullable((pitIds == null) ? null : pitIds.toArray(new String[pitIds.size()])); + out.writeBoolean(verbose); + } + + public List getPitIds() { + return Collections.unmodifiableList(pitIds); + } + + public void clearAndSetPitIds(List pitIds) { + this.pitIds.clear(); + this.pitIds.addAll(pitIds); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (pitIds == null || pitIds.isEmpty()) { + validationException = addValidationError("no pit ids specified", validationException); + } + return validationException; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java new file mode 100644 index 0000000000000..9d4ece74a7270 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java @@ -0,0 +1,261 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.action.admin.indices.segments; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.ListPitInfo; +import org.opensearch.action.search.PitService; +import org.opensearch.action.search.SearchContextId; +import org.opensearch.action.search.SearchContextIdForNode; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.DefaultShardOperationFailedException; +import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.PlainShardsIterator; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Strings; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.search.SearchService; +import org.opensearch.search.internal.PitReaderContext; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.opensearch.action.search.SearchContextId.decode; + +/** + * Transport action for retrieving segment information of PITs + */ +public class TransportPitSegmentsAction extends TransportBroadcastByNodeAction { + private final ClusterService clusterService; + private final IndicesService indicesService; + private final SearchService searchService; + private final NamedWriteableRegistry namedWriteableRegistry; + private final TransportService transportService; + private final PitService pitService; + + @Inject + public TransportPitSegmentsAction( + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + SearchService searchService, + NamedWriteableRegistry namedWriteableRegistry, + PitService pitService + ) { + super( + PitSegmentsAction.NAME, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + PitSegmentsRequest::new, + ThreadPool.Names.MANAGEMENT + ); + this.clusterService = clusterService; + this.indicesService = indicesService; + this.searchService = searchService; + this.namedWriteableRegistry = namedWriteableRegistry; + this.transportService = transportService; + this.pitService = pitService; + } + + /** + * Execute PIT segments flow for all PITs or request PIT IDs + */ + @Override + protected void doExecute(Task task, PitSegmentsRequest request, ActionListener listener) { + List pitIds = request.getPitIds(); + if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { + pitService.getAllPits(ActionListener.wrap(response -> { + request.clearAndSetPitIds(response.getPitInfos().stream().map(ListPitInfo::getPitId).collect(Collectors.toList())); + super.doExecute(task, request, listener); + }, listener::onFailure)); + } else { + super.doExecute(task, request, listener); + } + } + + /** + * This adds list of shards on which we need to retrieve pit segments details + * @param clusterState the cluster state + * @param request the underlying request + * @param concreteIndices the concrete indices on which to execute the operation + */ + @Override + protected ShardsIterator shards(ClusterState clusterState, PitSegmentsRequest request, String[] concreteIndices) { + final ArrayList iterators = new ArrayList<>(); + for (String pitId : request.getPitIds()) { + SearchContextId searchContext = decode(namedWriteableRegistry, pitId); + for (Map.Entry entry : searchContext.shards().entrySet()) { + final SearchContextIdForNode perNode = entry.getValue(); + // check if node is part of local cluster + if (Strings.isEmpty(perNode.getClusterAlias())) { + final ShardId shardId = entry.getKey(); + iterators.add( + new PitAwareShardRouting( + pitId, + shardId, + perNode.getNode(), + null, + true, + ShardRoutingState.STARTED, + null, + null, + null, + -1L + ) + ); + } + } + } + return new PlainShardsIterator(iterators); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, PitSegmentsRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, PitSegmentsRequest countRequest, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); + } + + @Override + protected ShardSegments readShardResult(StreamInput in) throws IOException { + return new ShardSegments(in); + } + + @Override + protected IndicesSegmentResponse newResponse( + PitSegmentsRequest request, + int totalShards, + int successfulShards, + int failedShards, + List results, + List shardFailures, + ClusterState clusterState + ) { + return new IndicesSegmentResponse( + results.toArray(new ShardSegments[results.size()]), + totalShards, + successfulShards, + failedShards, + shardFailures + ); + } + + @Override + protected PitSegmentsRequest readRequestFrom(StreamInput in) throws IOException { + return new PitSegmentsRequest(in); + } + + @Override + public List getShardRoutingsFromInputStream(StreamInput in) throws IOException { + return in.readList(PitAwareShardRouting::new); + } + + /** + * This retrieves segment details of PIT context + * @param request the node-level request + * @param shardRouting the shard on which to execute the operation + */ + @Override + protected ShardSegments shardOperation(PitSegmentsRequest request, ShardRouting shardRouting) { + assert shardRouting instanceof PitAwareShardRouting; + PitAwareShardRouting pitAwareShardRouting = (PitAwareShardRouting) shardRouting; + SearchContextIdForNode searchContextIdForNode = decode(namedWriteableRegistry, pitAwareShardRouting.getPitId()).shards() + .get(shardRouting.shardId()); + PitReaderContext pitReaderContext = searchService.getPitReaderContext(searchContextIdForNode.getSearchContextId()); + if (pitReaderContext == null) { + return new ShardSegments(shardRouting, Collections.emptyList()); + } + return new ShardSegments(pitReaderContext.getShardRouting(), pitReaderContext.getSegments()); + } + + /** + * This holds PIT id which is used to perform broadcast operation in PIT shards to retrieve segments information + */ + public class PitAwareShardRouting extends ShardRouting { + + private final String pitId; + + public PitAwareShardRouting(StreamInput in) throws IOException { + super(in); + this.pitId = in.readString(); + } + + public PitAwareShardRouting( + String pitId, + ShardId shardId, + String currentNodeId, + String relocatingNodeId, + boolean primary, + ShardRoutingState state, + RecoverySource recoverySource, + UnassignedInfo unassignedInfo, + AllocationId allocationId, + long expectedShardSize + ) { + super( + shardId, + currentNodeId, + relocatingNodeId, + primary, + state, + recoverySource, + unassignedInfo, + allocationId, + expectedShardSize + ); + this.pitId = pitId; + } + + public String getPitId() { + return pitId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(pitId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + super.toXContent(builder, params); + builder.field("pit_id", pitId); + return builder.endObject(); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java index 25eb9aff9e3d7..dd197a37f8616 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java @@ -28,7 +28,7 @@ * Create point in time response with point in time id and shard success / failures */ public class CreatePitResponse extends ActionResponse implements StatusToXContentObject { - private static final ParseField ID = new ParseField("id"); + private static final ParseField ID = new ParseField("pit_id"); private static final ParseField CREATION_TIME = new ParseField("creation_time"); // point in time id diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java index 943199812771a..5a167c5a6f160 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -65,11 +65,11 @@ public void writeTo(StreamOutput out) throws IOException { static { PARSER.declareBoolean(constructorArg(), new ParseField("successful")); - PARSER.declareString(constructorArg(), new ParseField("pitId")); + PARSER.declareString(constructorArg(), new ParseField("pit_id")); } private static final ParseField SUCCESSFUL = new ParseField("successful"); - private static final ParseField PIT_ID = new ParseField("pitId"); + private static final ParseField PIT_ID = new ParseField("pit_id"); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java index 945fcfd17eb6c..926e9c19a33f5 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -48,6 +48,11 @@ public DeletePitRequest(List pitIds) { this.pitIds.addAll(pitIds); } + public void clearAndSetPitIds(List pitIds) { + this.pitIds.clear(); + this.pitIds.addAll(pitIds); + } + public DeletePitRequest() {} public List getPitIds() { diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java index b4ad2f6641087..340f9b842adbf 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java @@ -21,11 +21,22 @@ */ public class GetAllPitNodesRequest extends BaseNodesRequest { + // Security plugin intercepts and sets the response with permitted PIT contexts + private GetAllPitNodesResponse getAllPitNodesResponse; + @Inject public GetAllPitNodesRequest(DiscoveryNode... concreteNodes) { super(concreteNodes); } + public void setGetAllPitNodesResponse(GetAllPitNodesResponse getAllPitNodesResponse) { + this.getAllPitNodesResponse = getAllPitNodesResponse; + } + + public GetAllPitNodesResponse getGetAllPitNodesResponse() { + return getAllPitNodesResponse; + } + public GetAllPitNodesRequest(StreamInput in) throws IOException { super(in); } diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java index 4a454e7145eff..091447798cf5f 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -52,6 +52,14 @@ public GetAllPitNodesResponse( ); } + /** + * Copy constructor that explicitly sets the list pit infos + */ + public GetAllPitNodesResponse(List listPitInfos, GetAllPitNodesResponse response) { + super(response.getClusterName(), response.getNodes(), response.failures()); + pitInfos.addAll(listPitInfos); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java new file mode 100644 index 0000000000000..af41f7d49551c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for retrieving all PIT reader contexts from nodes + */ +public class NodesGetAllPitsAction extends ActionType { + public static final NodesGetAllPitsAction INSTANCE = new NodesGetAllPitsAction(); + public static final String NAME = "cluster:admin/point_in_time/read_from_nodes"; + + private NodesGetAllPitsAction() { + super(NAME, GetAllPitNodesResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java index 0b79b77fd6014..ff068397ad94e 100644 --- a/server/src/main/java/org/opensearch/action/search/PitService.java +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -15,6 +15,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; @@ -47,12 +48,19 @@ public class PitService { private final ClusterService clusterService; private final SearchTransportService searchTransportService; private final TransportService transportService; + private final NodeClient nodeClient; @Inject - public PitService(ClusterService clusterService, SearchTransportService searchTransportService, TransportService transportService) { + public PitService( + ClusterService clusterService, + SearchTransportService searchTransportService, + TransportService transportService, + NodeClient nodeClient + ) { this.clusterService = clusterService; this.searchTransportService = searchTransportService; this.transportService = transportService; + this.nodeClient = nodeClient; } /** @@ -144,6 +152,17 @@ public void onFailure(final Exception e) { }, size); } + /** + * This method returns indices associated for each pit + */ + public Map getIndicesForPits(List pitIds) { + Map pitToIndicesMap = new HashMap<>(); + for (String pitId : pitIds) { + pitToIndicesMap.put(pitId, SearchContextId.decode(nodeClient.getNamedWriteableRegistry(), pitId).getActualIndices()); + } + return pitToIndicesMap; + } + /** * Get all active point in time contexts */ @@ -156,7 +175,7 @@ public void getAllPits(ActionListener getAllPitsListener DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[nodes.size()]); transportService.sendRequest( transportService.getLocalNode(), - GetAllPitsAction.NAME, + NodesGetAllPitsAction.NAME, new GetAllPitNodesRequest(disNodesArr), new TransportResponseHandler() { diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index f9e36c479dd54..19abe2361290d 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -57,7 +57,11 @@ public TransportDeletePitAction( @Override protected void doExecute(Task task, DeletePitRequest request, ActionListener listener) { List pitIds = request.getPitIds(); - if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { + // when security plugin intercepts the request, if PITs are not present in the cluster the PIT IDs in request will be empty + // and in this case return empty response + if (pitIds.isEmpty()) { + listener.onResponse(new DeletePitResponse(new ArrayList<>())); + } else if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { deleteAllPits(listener); } else { deletePits(listener, request); diff --git a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java index 21a64e388fa7b..c8529c5b02bd4 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java @@ -8,79 +8,31 @@ package org.opensearch.action.search; -import org.opensearch.action.FailedNodeException; +import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.TransportNodesAction; -import org.opensearch.cluster.service.ClusterService; +import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.search.SearchService; -import org.opensearch.threadpool.ThreadPool; +import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; -import java.io.IOException; -import java.util.List; - /** - * Transport action to get all active PIT contexts across all nodes + * Transport action to get all active PIT contexts across the cluster */ -public class TransportGetAllPitsAction extends TransportNodesAction< - GetAllPitNodesRequest, - GetAllPitNodesResponse, - GetAllPitNodeRequest, - GetAllPitNodeResponse> { - private final SearchService searchService; +public class TransportGetAllPitsAction extends HandledTransportAction { + private final PitService pitService; @Inject - public TransportGetAllPitsAction( - ThreadPool threadPool, - ClusterService clusterService, - TransportService transportService, - ActionFilters actionFilters, - SearchService searchService - ) { - super( - GetAllPitsAction.NAME, - threadPool, - clusterService, - transportService, - actionFilters, - GetAllPitNodesRequest::new, - GetAllPitNodeRequest::new, - ThreadPool.Names.SAME, - GetAllPitNodeResponse.class - ); - this.searchService = searchService; - } - - @Override - protected GetAllPitNodesResponse newResponse( - GetAllPitNodesRequest request, - List getAllPitNodeRespons, - List failures - ) { - return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeRespons, failures); - } - - @Override - protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { - return new GetAllPitNodeRequest(); - } - - @Override - protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { - return new GetAllPitNodeResponse(in); + public TransportGetAllPitsAction(ActionFilters actionFilters, TransportService transportService, PitService pitService) { + super(GetAllPitsAction.NAME, transportService, actionFilters, in -> new GetAllPitNodesRequest(in)); + this.pitService = pitService; } - /** - * This retrieves all active PITs in the node - */ - @Override - protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { - GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( - transportService.getLocalNode(), - searchService.getAllPITReaderContexts() - ); - return nodeResponse; + protected void doExecute(Task task, GetAllPitNodesRequest request, ActionListener listener) { + // If security plugin intercepts the request, it'll replace all PIT IDs with permitted PIT IDs + if (request.getGetAllPitNodesResponse() != null) { + listener.onResponse(request.getGetAllPitNodesResponse()); + } else { + pitService.getAllPits(listener); + } } } diff --git a/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java new file mode 100644 index 0000000000000..520830cd293f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.search.SearchService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** + * Transport action to get all active PIT contexts across all nodes + */ +public class TransportNodesGetAllPitsAction extends TransportNodesAction< + GetAllPitNodesRequest, + GetAllPitNodesResponse, + GetAllPitNodeRequest, + GetAllPitNodeResponse> { + private final SearchService searchService; + + @Inject + public TransportNodesGetAllPitsAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + SearchService searchService + ) { + super( + NodesGetAllPitsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + GetAllPitNodesRequest::new, + GetAllPitNodeRequest::new, + ThreadPool.Names.SAME, + GetAllPitNodeResponse.class + ); + this.searchService = searchService; + } + + @Override + protected GetAllPitNodesResponse newResponse( + GetAllPitNodesRequest request, + List getAllPitNodeRespons, + List failures + ) { + return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeRespons, failures); + } + + @Override + protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { + return new GetAllPitNodeRequest(); + } + + @Override + protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { + return new GetAllPitNodeResponse(in); + } + + /** + * This retrieves all active PITs in the node + */ + @Override + protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { + GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( + transportService.getLocalNode(), + searchService.getAllPITReaderContexts() + ); + return nodeResponse; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index f849be4db4e2b..9e353a35831d0 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -532,6 +532,13 @@ private void onShardOperation( } } + /** + * This method reads ShardRouting from input stream + */ + public List getShardRoutingsFromInputStream(StreamInput in) throws IOException { + return in.readList(ShardRouting::new); + } + /** * A node request * @@ -547,7 +554,7 @@ public class NodeRequest extends TransportRequest implements IndicesRequest { public NodeRequest(StreamInput in) throws IOException { super(in); indicesLevelRequest = readRequestFrom(in); - shards = in.readList(ShardRouting::new); + shards = getShardRoutingsFromInputStream(in); nodeId = in.readString(); } diff --git a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java index b305c4c8c83a7..7087b64758888 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java +++ b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.RetryableAction; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.ReplicationGroup; import org.opensearch.index.shard.ShardId; @@ -45,6 +46,7 @@ import java.util.Map; import java.util.Set; import java.util.function.Consumer; +import java.util.function.Supplier; /** * Pending Replication Actions @@ -121,7 +123,7 @@ synchronized void acceptNewTrackedAllocationIds(Set trackedAllocationIds } } - cancelActions(toCancel, "Replica left ReplicationGroup"); + cancelActions(toCancel, () -> new IndexShardClosedException(shardId, "Replica left ReplicationGroup")); } @Override @@ -129,15 +131,11 @@ public synchronized void close() { ArrayList>> toCancel = new ArrayList<>(onGoingReplicationActions.values()); onGoingReplicationActions.clear(); - cancelActions(toCancel, "Primary closed."); + cancelActions(toCancel, () -> new PrimaryShardClosedException(shardId)); } - private void cancelActions(ArrayList>> toCancel, String message) { + private void cancelActions(ArrayList>> toCancel, Supplier exceptionSupplier) { threadPool.executor(ThreadPool.Names.GENERIC) - .execute( - () -> toCancel.stream() - .flatMap(Collection::stream) - .forEach(action -> action.cancel(new IndexShardClosedException(shardId, message))) - ); + .execute(() -> toCancel.stream().flatMap(Collection::stream).forEach(action -> action.cancel(exceptionSupplier.get()))); } } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index 39fb89bc48568..7fc810808f560 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -52,6 +52,7 @@ import org.opensearch.index.IndexingPressureService; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.MapperParsingException; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.index.translog.Translog; @@ -514,15 +515,20 @@ public void failShardIfNeeded( if (TransportActions.isShardNotAvailableException(exception) == false) { logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); } - shardStateAction.remoteShardFailed( - replica.shardId(), - replica.allocationId().getId(), - primaryTerm, - true, - message, - exception, - listener - ); + // If a write action fails due to the closure of the primary shard + // then the replicas should not be marked as failed since they are + // still up-to-date with the (now closed) primary shard + if (exception instanceof PrimaryShardClosedException == false) { + shardStateAction.remoteShardFailed( + replica.shardId(), + replica.allocationId().getId(), + primaryTerm, + true, + message, + exception, + listener + ); + } } @Override diff --git a/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java b/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java index f8baee06c4315..7d567d73851a9 100644 --- a/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java +++ b/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java @@ -259,6 +259,7 @@ static class Arch { Map m = new HashMap<>(); m.put("amd64", new Arch(0xC000003E, 0x3FFFFFFF, 57, 58, 59, 322, 317)); m.put("aarch64", new Arch(0xC00000B7, 0xFFFFFFFF, 1079, 1071, 221, 281, 277)); + m.put("s390x", new Arch(0x80000016, 0xFFFFFFFF, 2, 190, 11, 354, 348)); ARCHITECTURES = Collections.unmodifiableMap(m); } diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index 1d3bbfcba43f9..94043d5c3c89f 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -34,6 +34,8 @@ import org.opensearch.action.ActionFuture; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.bulk.BulkResponse; @@ -339,6 +341,11 @@ public interface Client extends OpenSearchClient, Releasable { */ void deletePits(DeletePitRequest deletePITRequest, ActionListener listener); + /** + * Get information of segments of one or more PITs + */ + void pitSegments(PitSegmentsRequest pitSegmentsRequest, ActionListener listener); + /** * Performs multiple search requests. */ diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 7a7b98bf724f6..550659788a8e1 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -37,6 +37,15 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionResponse; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -791,4 +800,46 @@ public interface ClusterAdminClient extends OpenSearchClient { * Delete specified dangling indices. */ ActionFuture deleteDanglingIndex(DeleteDanglingIndexRequest request); + + /** + * Decommission a node + */ + ActionFuture decommission(DecommissionRequest request); + + /** + * Decommission a node + */ + void decommission(DecommissionRequest request, ActionListener listener); + + /** + * Decommission a node + */ + DecommissionRequestBuilder prepareDecommission(DecommissionRequest request); + + /** + * Get Decommissioned attribute + */ + ActionFuture getDecommission(GetDecommissionStateRequest request); + + /** + * Get Decommissioned attribute + */ + void getDecommission(GetDecommissionStateRequest request, ActionListener listener); + + /** + * Get Decommissioned attribute + */ + GetDecommissionStateRequestBuilder prepareGetDecommission(); + + ActionFuture deleteDecommission(DeleteDecommissionRequest request); + + /** + * Decommission a node + */ + void deleteDecommission(DeleteDecommissionRequest request, ActionListener listener); + + /** + * Decommission a node + */ + DeleteDecommissionRequestBuilder prepareDeleteDecommission(); } diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index b04de7830a780..8acae2788843f 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -32,6 +32,9 @@ package org.opensearch.client; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; @@ -548,4 +551,29 @@ public static DeleteSnapshotRequest deleteSnapshotRequest(String repository, Str public static SnapshotsStatusRequest snapshotsStatusRequest(String repository) { return new SnapshotsStatusRequest(repository); } + + /** + * Creates a new decommission request. + * + * @return returns put decommission request + */ + public static DecommissionRequest decommissionRequest() { + return new DecommissionRequest(); + } + + /** + * Get decommissioned attribute from metadata + * + * @return returns get decommission request + */ + public static GetDecommissionStateRequest getDecommissionStateRequest() { + return new GetDecommissionStateRequest(); + } + + /** + * Creates a new delete decommission request. + */ + public static DeleteDecommissionRequest deleteDecommissionRequest() { + return new DeleteDecommissionRequest(); + } } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 7084a856ab3d1..71fd224e2247c 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -43,6 +43,18 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionResponse; import org.opensearch.action.admin.cluster.health.ClusterHealthAction; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; @@ -240,6 +252,8 @@ import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; import org.opensearch.action.admin.indices.settings.get.GetSettingsAction; import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest; import org.opensearch.action.admin.indices.settings.get.GetSettingsRequestBuilder; @@ -593,6 +607,11 @@ public void deletePits(final DeletePitRequest deletePITRequest, final ActionList execute(DeletePitAction.INSTANCE, deletePITRequest, listener); } + @Override + public void pitSegments(final PitSegmentsRequest request, final ActionListener listener) { + execute(PitSegmentsAction.INSTANCE, request, listener); + } + @Override public ActionFuture multiSearch(MultiSearchRequest request) { return execute(MultiSearchAction.INSTANCE, request); @@ -1307,6 +1326,53 @@ public DeleteStoredScriptRequestBuilder prepareDeleteStoredScript() { public DeleteStoredScriptRequestBuilder prepareDeleteStoredScript(String id) { return prepareDeleteStoredScript().setId(id); } + + @Override + public ActionFuture decommission(DecommissionRequest request) { + return execute(DecommissionAction.INSTANCE, request); + } + + @Override + public void decommission(DecommissionRequest request, ActionListener listener) { + execute(DecommissionAction.INSTANCE, request, listener); + } + + @Override + public DecommissionRequestBuilder prepareDecommission(DecommissionRequest request) { + return new DecommissionRequestBuilder(this, DecommissionAction.INSTANCE, request); + } + + @Override + public ActionFuture getDecommission(GetDecommissionStateRequest request) { + return execute(GetDecommissionStateAction.INSTANCE, request); + } + + @Override + public void getDecommission(GetDecommissionStateRequest request, ActionListener listener) { + execute(GetDecommissionStateAction.INSTANCE, request, listener); + } + + @Override + public GetDecommissionStateRequestBuilder prepareGetDecommission() { + return new GetDecommissionStateRequestBuilder(this, GetDecommissionStateAction.INSTANCE); + } + + @Override + public ActionFuture deleteDecommission(DeleteDecommissionRequest request) { + return execute(DeleteDecommissionAction.INSTANCE, request); + } + + @Override + public void deleteDecommission(DeleteDecommissionRequest request, ActionListener listener) { + execute(DeleteDecommissionAction.INSTANCE, request, listener); + } + + @Override + public DeleteDecommissionRequestBuilder prepareDeleteDecommission() { + return new DeleteDecommissionRequestBuilder(this, DeleteDecommissionAction.INSTANCE); + } + + } static class IndicesAdmin implements IndicesAdminClient { @@ -1849,6 +1915,7 @@ public void resolveIndex(ResolveIndexAction.Request request, ActionListener resolveIndex(ResolveIndexAction.Request request) { return execute(ResolveIndexAction.INSTANCE, request); } + } @Override diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index f8ba520e465e2..892e65e2ee5b4 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.action.index.MappingUpdatedAction; import org.opensearch.cluster.action.index.NodeMappingRefreshAction; import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; import org.opensearch.cluster.metadata.ComponentTemplateMetadata; import org.opensearch.cluster.metadata.ComposableIndexTemplateMetadata; import org.opensearch.cluster.metadata.DataStreamMetadata; @@ -191,6 +192,12 @@ public static List getNamedWriteables() { ComposableIndexTemplateMetadata::readDiffFrom ); registerMetadataCustom(entries, DataStreamMetadata.TYPE, DataStreamMetadata::new, DataStreamMetadata::readDiffFrom); + registerMetadataCustom( + entries, + DecommissionAttributeMetadata.TYPE, + DecommissionAttributeMetadata::new, + DecommissionAttributeMetadata::readDiffFrom + ); // Task Status (not Diffable) entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new)); return entries; @@ -274,6 +281,13 @@ public static List getNamedXWriteables() { DataStreamMetadata::fromXContent ) ); + entries.add( + new NamedXContentRegistry.Entry( + Metadata.Custom.class, + new ParseField(DecommissionAttributeMetadata.TYPE), + DecommissionAttributeMetadata::fromXContent + ) + ); return entries; } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 1c7e7cd0419e2..dd928dd911304 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -208,19 +208,6 @@ public Coordinator( this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); this.singleNodeDiscovery = DiscoveryModule.isSingleNodeDiscovery(settings); this.electionStrategy = electionStrategy; - this.joinHelper = new JoinHelper( - settings, - allocationService, - clusterManagerService, - transportService, - this::getCurrentTerm, - this::getStateForClusterManagerService, - this::handleJoinRequest, - this::joinLeaderInTerm, - this.onJoinValidators, - rerouteService, - nodeHealthService - ); this.persistedStateSupplier = persistedStateSupplier; this.noClusterManagerBlockService = new NoClusterManagerBlockService(settings, clusterSettings); this.lastKnownLeader = Optional.empty(); @@ -244,6 +231,20 @@ public Coordinator( new HandshakingTransportAddressConnector(settings, transportService), configuredHostsResolver ); + this.joinHelper = new JoinHelper( + settings, + allocationService, + clusterManagerService, + transportService, + this::getCurrentTerm, + this::getStateForClusterManagerService, + this::handleJoinRequest, + this::joinLeaderInTerm, + this.onJoinValidators, + rerouteService, + nodeHealthService, + peerFinder.nodeCommissionedListener() + ); this.publicationHandler = new PublicationTransportHandler( transportService, namedWriteableRegistry, @@ -1438,6 +1439,11 @@ private void startElectionScheduler() { public void run() { synchronized (mutex) { if (mode == Mode.CANDIDATE) { + if(peerFinder.localNodeDecommissioned()) { + logger.debug("skip prevoting as local node is decommissioned"); + return; + } + final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); if (localNodeMayWinElection(lastAcceptedState) == false) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 656e6d220720f..9e30b9a3f7ee3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.ClusterStateTaskListener; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.coordination.Coordinator.Mode; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; @@ -57,6 +58,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; +import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; @@ -113,6 +115,9 @@ public class JoinHelper { private final TimeValue joinTimeout; // only used for Zen1 joining private final NodeHealthService nodeHealthService; + public boolean isDecommissioned; + private final ActionListener nodeCommissionedListener; + private final Set> pendingOutgoingJoins = Collections.synchronizedSet(new HashSet<>()); private final AtomicReference lastFailedJoinAttempt = new AtomicReference<>(); @@ -130,12 +135,14 @@ public class JoinHelper { Function joinLeaderInTerm, Collection> joinValidators, RerouteService rerouteService, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + ActionListener nodeCommissionedListener ) { this.clusterManagerService = clusterManagerService; this.transportService = transportService; this.nodeHealthService = nodeHealthService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); + this.nodeCommissionedListener = nodeCommissionedListener; this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor(settings, allocationService, logger, rerouteService, transportService) { private final long term = currentTermSupplier.getAsLong(); @@ -343,11 +350,22 @@ public void handleResponse(Empty response) { logger.debug("successfully joined {} with {}", destination, joinRequest); lastFailedJoinAttempt.set(null); onCompletion.run(); + if (isDecommissioned) { + isDecommissioned = false; + nodeCommissionedListener.onResponse(null); + } } @Override public void handleException(TransportException exp) { pendingOutgoingJoins.remove(dedupKey); + if (exp instanceof RemoteTransportException && (exp.getCause() instanceof NodeDecommissionedException)) { + logger.info("local node is decommissioned. Will not be able to join the cluster"); + if (!isDecommissioned) { + isDecommissioned = true; + nodeCommissionedListener.onFailure(exp); + } + } logger.info(() -> new ParameterizedMessage("failed to join {} with {}", destination, joinRequest), exp); FailedJoinAttempt attempt = new FailedJoinAttempt(destination, joinRequest, exp); attempt.logNow(); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 5afdb5b12db23..7410efc9ab60f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -39,6 +39,10 @@ import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -358,6 +362,7 @@ public boolean runOnlyOnClusterManager() { /** * a task indicates that the current node should become master + * * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #newBecomeClusterManagerTask()} */ @Deprecated @@ -384,8 +389,9 @@ public static Task newFinishElectionTask() { * Ensures that all indices are compatible with the given node version. This will ensure that all indices in the given metadata * will not be created with a newer version of opensearch as well as that all indices are newer or equal to the minimum index * compatibility version. - * @see Version#minimumIndexCompatibilityVersion() + * * @throws IllegalStateException if any index is incompatible with the given version + * @see Version#minimumIndexCompatibilityVersion() */ public static void ensureIndexCompatibility(final Version nodeVersion, Metadata metadata) { Version supportedIndexVersion = nodeVersion.minimumIndexCompatibilityVersion(); @@ -415,14 +421,18 @@ public static void ensureIndexCompatibility(final Version nodeVersion, Metadata } } - /** ensures that the joining node has a version that's compatible with all current nodes*/ + /** + * ensures that the joining node has a version that's compatible with all current nodes + */ public static void ensureNodesCompatibility(final Version joiningNodeVersion, DiscoveryNodes currentNodes) { final Version minNodeVersion = currentNodes.getMinNodeVersion(); final Version maxNodeVersion = currentNodes.getMaxNodeVersion(); ensureNodesCompatibility(joiningNodeVersion, minNodeVersion, maxNodeVersion); } - /** ensures that the joining node has a version that's compatible with a given version range */ + /** + * ensures that the joining node has a version that's compatible with a given version range + */ public static void ensureNodesCompatibility(Version joiningNodeVersion, Version minClusterNodeVersion, Version maxClusterNodeVersion) { assert minClusterNodeVersion.onOrBefore(maxClusterNodeVersion) : minClusterNodeVersion + " > " + maxClusterNodeVersion; if (joiningNodeVersion.isCompatible(maxClusterNodeVersion) == false) { @@ -466,6 +476,25 @@ public static void ensureMajorVersionBarrier(Version joiningNodeVersion, Version } } + public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) { + DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.custom(DecommissionAttributeMetadata.TYPE); + if (decommissionAttributeMetadata != null) { + DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); + DecommissionStatus status = decommissionAttributeMetadata.status(); + if (decommissionAttribute != null && status != null) { + // We will let the node join the cluster if the current status is not IN_PROGRESS or SUCCESSFUL + if (node.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()) + && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { + throw new NodeDecommissionedException( + "node [{}] has decommissioned attribute [{}].", + node.toString(), + decommissionAttribute.toString() + ); + } + } + } + } + public static Collection> addBuiltInJoinValidators( Collection> onJoinValidators ) { @@ -473,6 +502,7 @@ public static Collection> addBuiltInJoin validators.add((node, state) -> { ensureNodesCompatibility(node.getVersion(), state.getNodes()); ensureIndexCompatibility(node.getVersion(), state.getMetadata()); + ensureNodeCommissioned(node, state.getMetadata()); }); validators.addAll(onJoinValidators); return Collections.unmodifiableCollection(validators); diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java new file mode 100644 index 0000000000000..bf2487a1a0e18 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Objects; + +/** + * {@link DecommissionAttribute} encapsulates information about decommissioned node attribute like attribute name, attribute value. + * + * @opensearch.internal + */ +public final class DecommissionAttribute implements Writeable { + private final String attributeName; + private final String attributeValue; + + /** + * Constructs new decommission attribute name value pair + * + * @param attributeName attribute name + * @param attributeValue attribute value + */ + public DecommissionAttribute(String attributeName, String attributeValue) { + this.attributeName = attributeName; + this.attributeValue = attributeValue; + } + + /** + * Returns attribute name + * + * @return attributeName + */ + public String attributeName() { + return this.attributeName; + } + + /** + * Returns attribute value + * + * @return attributeValue + */ + public String attributeValue() { + return this.attributeValue; + } + + public DecommissionAttribute(StreamInput in) throws IOException { + attributeName = in.readString(); + attributeValue = in.readString(); + } + + /** + * Writes decommission attribute name value to stream output + * + * @param out stream output + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(attributeName); + out.writeString(attributeValue); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + DecommissionAttribute that = (DecommissionAttribute) o; + + if (!attributeName.equals(that.attributeName)) return false; + return attributeValue.equals(that.attributeValue); + } + + @Override + public int hashCode() { + return Objects.hash(attributeName, attributeValue); + } + + @Override + public String toString() { + return "DecommissionAttribute{" + "attributeName='" + attributeName + '\'' + ", attributeValue='" + attributeValue + '\'' + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java new file mode 100644 index 0000000000000..009161ce66fc6 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java @@ -0,0 +1,227 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; +import org.opensearch.cluster.AbstractNamedDiffable; +import org.opensearch.cluster.NamedDiff; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.Metadata.Custom; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Objects; + +/** + * Contains metadata about decommission attribute + * + * @opensearch.internal + */ +public class DecommissionAttributeMetadata extends AbstractNamedDiffable implements Custom { + + public static final String TYPE = "decommissionedAttribute"; + + private final DecommissionAttribute decommissionAttribute; + private final DecommissionStatus status; + public static final String attributeType = "awareness"; + + /** + * Constructs new decommission attribute metadata with given status + * + * @param decommissionAttribute attribute details + * @param status current status of the attribute decommission + */ + public DecommissionAttributeMetadata(DecommissionAttribute decommissionAttribute, DecommissionStatus status) { + this.decommissionAttribute = decommissionAttribute; + this.status = status; + } + + /** + * Constructs new decommission attribute metadata with status as {@link DecommissionStatus#INIT} + * + * @param decommissionAttribute attribute details + */ + public DecommissionAttributeMetadata(DecommissionAttribute decommissionAttribute) { + this(decommissionAttribute, DecommissionStatus.INIT); + } + + /** + * Returns the current decommissioned attribute + * + * @return decommissioned attributes + */ + public DecommissionAttribute decommissionAttribute() { + return this.decommissionAttribute; + } + + /** + * Returns the current status of the attribute decommission + * + * @return attribute type + */ + public DecommissionStatus status() { + return this.status; + } + + /** + * Creates a new instance that has the given decommission attribute moved to the given @{@link DecommissionStatus} + * @param status status to be updated with + * @return new instance with updated status + */ + public DecommissionAttributeMetadata withUpdatedStatus(DecommissionStatus status) { + return new DecommissionAttributeMetadata(decommissionAttribute(), status); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + DecommissionAttributeMetadata that = (DecommissionAttributeMetadata) o; + + if (!status.equals(that.status)) return false; + return decommissionAttribute.equals(that.decommissionAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(attributeType, decommissionAttribute, status); + } + + /** + * {@inheritDoc} + */ + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_3_0_0; + } + + public DecommissionAttributeMetadata(StreamInput in) throws IOException { + this.decommissionAttribute = new DecommissionAttribute(in); + this.status = DecommissionStatus.fromString(in.readString()); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Custom.class, TYPE, in); + } + + /** + * {@inheritDoc} + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + decommissionAttribute.writeTo(out); + out.writeString(status.status()); + } + + public static DecommissionAttributeMetadata fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + DecommissionAttribute decommissionAttribute = null; + DecommissionStatus status = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if (attributeType.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new OpenSearchParseException( + "failed to parse decommission attribute type [{}], expected object", + attributeType + ); + } + token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String fieldName = parser.currentName(); + String value; + token = parser.nextToken(); + if (token == XContentParser.Token.VALUE_STRING) { + value = parser.text(); + } else { + throw new OpenSearchParseException( + "failed to parse attribute [{}], expected string for attribute value", + fieldName + ); + } + decommissionAttribute = new DecommissionAttribute(fieldName, value); + parser.nextToken(); + } else { + throw new OpenSearchParseException("failed to parse attribute type [{}], unexpected type", attributeType); + } + } else { + throw new OpenSearchParseException("failed to parse attribute type [{}]", attributeType); + } + } else if ("status".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException( + "failed to parse status of decommissioning, expected string but found unknown type" + ); + } + status = DecommissionStatus.fromString(parser.text()); + } else { + throw new OpenSearchParseException( + "unknown field found [{}], failed to parse the decommission attribute", + currentFieldName + ); + } + } + } + return new DecommissionAttributeMetadata(decommissionAttribute, status); + } + + /** + * {@inheritDoc} + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + toXContent(decommissionAttribute, status, attributeType, builder, params); + return builder; + } + + @Override + public EnumSet context() { + return Metadata.API_AND_GATEWAY; + } + + /** + * @param decommissionAttribute decommission attribute + * @param status decommission status + * @param attributeType attribute type + * @param builder XContent builder + * @param params serialization parameters + */ + public static void toXContent( + DecommissionAttribute decommissionAttribute, + DecommissionStatus status, + String attributeType, + XContentBuilder builder, + ToXContent.Params params + ) throws IOException { + builder.startObject(attributeType); + builder.field(decommissionAttribute.attributeName(), decommissionAttribute.attributeValue()); + builder.endObject(); + builder.field("status", status.status()); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java new file mode 100644 index 0000000000000..715235d7bff3b --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java @@ -0,0 +1,260 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; +import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsResponse; +import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; +import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; +import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateObserver; +import org.opensearch.cluster.ClusterStateTaskConfig; +import org.opensearch.cluster.ClusterStateTaskListener; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.coordination.NodeRemovalClusterStateTaskExecutor; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Helper controller class to remove list of nodes from the cluster and update status + * + * @opensearch.internal + */ + +public class DecommissionController { + + private static final Logger logger = LogManager.getLogger(DecommissionController.class); + + private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; + private final ClusterService clusterService; + private final TransportService transportService; + private final ThreadPool threadPool; + + DecommissionController( + ClusterService clusterService, + TransportService transportService, + AllocationService allocationService, + ThreadPool threadPool + ) { + this.clusterService = clusterService; + this.transportService = transportService; + this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); + this.threadPool = threadPool; + } + + /** + * Transport call to add nodes to voting config exclusion + * + * @param nodes set of nodes Ids to be added to voting config exclusion list + * @param listener callback for response or failure + */ + public void excludeDecommissionedNodesFromVotingConfig(Set nodes, ActionListener listener) { + transportService.sendRequest( + transportService.getLocalNode(), + AddVotingConfigExclusionsAction.NAME, + new AddVotingConfigExclusionsRequest( + Strings.EMPTY_ARRAY, + nodes.toArray(String[]::new), + Strings.EMPTY_ARRAY, + TimeValue.timeValueSeconds(30) + ), + new TransportResponseHandler() { + @Override + public void handleResponse(AddVotingConfigExclusionsResponse response) { + listener.onResponse(null); + } + + @Override + public void handleException(TransportException exp) { + listener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public AddVotingConfigExclusionsResponse read(StreamInput in) throws IOException { + return new AddVotingConfigExclusionsResponse(in); + } + } + ); + } + + /** + * Transport call to clear voting config exclusion + * + * @param listener callback for response or failure + */ + public void clearVotingConfigExclusion(ActionListener listener) { + final ClearVotingConfigExclusionsRequest clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(); + transportService.sendRequest( + transportService.getLocalNode(), + ClearVotingConfigExclusionsAction.NAME, + clearVotingConfigExclusionsRequest, + new TransportResponseHandler() { + @Override + public void handleResponse(ClearVotingConfigExclusionsResponse response) { + listener.onResponse(null); + } + + @Override + public void handleException(TransportException exp) { + listener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public ClearVotingConfigExclusionsResponse read(StreamInput in) throws IOException { + return new ClearVotingConfigExclusionsResponse(in); + } + } + ); + } + + /** + * This method triggers batch of tasks for nodes to be decommissioned using executor {@link NodeRemovalClusterStateTaskExecutor} + * Once the tasks are submitted, it waits for an expected cluster state to guarantee + * that the expected decommissioned nodes are removed from the cluster + * + * @param nodesToBeDecommissioned set of the node to be decommissioned + * @param reason reason of removal + * @param timeout timeout for the request + * @param nodesRemovedListener callback for the success or failure + */ + public void removeDecommissionedNodes( + Set nodesToBeDecommissioned, + String reason, + TimeValue timeout, + ActionListener nodesRemovedListener + ) { + final Map nodesDecommissionTasks = new LinkedHashMap<>( + nodesToBeDecommissioned.size() + ); + nodesToBeDecommissioned.forEach(discoveryNode -> { + final NodeRemovalClusterStateTaskExecutor.Task task = new NodeRemovalClusterStateTaskExecutor.Task(discoveryNode, reason); + nodesDecommissionTasks.put(task, nodeRemovalExecutor); + }); + clusterService.submitStateUpdateTasks( + "node-decommissioned", + nodesDecommissionTasks, + ClusterStateTaskConfig.build(Priority.URGENT), + nodeRemovalExecutor + ); + + Predicate allDecommissionedNodesRemovedPredicate = clusterState -> { + Set intersection = Arrays.stream(clusterState.nodes().getNodes().values().toArray(DiscoveryNode.class)) + .collect(Collectors.toSet()); + intersection.retainAll(nodesToBeDecommissioned); + return intersection.size() == 0; + }; + + final ClusterStateObserver observer = new ClusterStateObserver(clusterService, timeout, logger, threadPool.getThreadContext()); + + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + logger.info("successfully removed all decommissioned nodes [{}] from the cluster", nodesToBeDecommissioned.toString()); + nodesRemovedListener.onResponse(null); + } + + @Override + public void onClusterServiceClose() { + logger.warn("cluster service closed while waiting for removal of decommissioned nodes."); + } + + @Override + public void onTimeout(TimeValue timeout) { + logger.info("timed out while waiting for removal of decommissioned nodes"); + nodesRemovedListener.onFailure( + new OpenSearchTimeoutException( + "timed out [{}] while waiting for removal of decommissioned nodes [{}] to take effect", + timeout.toString(), + nodesToBeDecommissioned.toString() + ) + ); + } + }, allDecommissionedNodesRemovedPredicate); + } + + /** + * This method updates the status in the currently registered metadata. + * This method also validates the status with its previous state before executing the request + * + * @param decommissionStatus status to update decommission metadata with + * @param listener listener for response and failure + */ + public void updateMetadataWithDecommissionStatus(DecommissionStatus decommissionStatus, ActionListener listener) { + clusterService.submitStateUpdateTask(decommissionStatus.status(), new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + Metadata metadata = currentState.metadata(); + DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.custom(DecommissionAttributeMetadata.TYPE); + assert decommissionAttributeMetadata != null && decommissionAttributeMetadata.decommissionAttribute() != null; + // we need to update the status only when the previous stage is just behind than expected stage + // if the previous stage is already ahead of expected stage, we don't need to update the stage + // For failures, we update it no matter what + int previousStage = decommissionAttributeMetadata.status().stage(); + int expectedStage = decommissionStatus.stage(); + if (previousStage >= expectedStage) return currentState; + if (expectedStage - previousStage != 1 && !decommissionStatus.equals(DecommissionStatus.FAILED)) { + throw new DecommissioningFailedException( + decommissionAttributeMetadata.decommissionAttribute(), + "invalid previous decommission status found while updating status" + ); + } + Metadata.Builder mdBuilder = Metadata.builder(metadata); + DecommissionAttributeMetadata newMetadata = decommissionAttributeMetadata.withUpdatedStatus(decommissionStatus); + mdBuilder.putCustom(DecommissionAttributeMetadata.TYPE, newMetadata); + return ClusterState.builder(currentState).metadata(mdBuilder).build(); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + DecommissionAttributeMetadata decommissionAttributeMetadata = newState.metadata() + .custom(DecommissionAttributeMetadata.TYPE); + listener.onResponse(decommissionAttributeMetadata.status()); + } + }); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java new file mode 100644 index 0000000000000..5b04c97759b94 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -0,0 +1,469 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.NotClusterManagerException; +import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING; +import static org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING; + +/** + * Service responsible for entire lifecycle of decommissioning and recommissioning an awareness attribute. + *

+ * Whenever a cluster manager initiates operation to decommission an awareness attribute, + * the service makes the best attempt to perform the following task - + *

    + *
  • Initiates nodes decommissioning by adding custom metadata with the attribute and state as {@link DecommissionStatus#INIT}
  • + *
  • Remove cluster-manager eligible nodes from voting config
  • + *
  • Triggers weigh away for nodes having given awareness attribute to drain. This marks the decommission status as {@link DecommissionStatus#IN_PROGRESS}
  • + *
  • Once weighed away, the service triggers nodes decommission
  • + *
  • Once the decommission is successful, the service clears the voting config and marks the status as {@link DecommissionStatus#SUCCESSFUL}
  • + *
  • If service fails at any step, it would mark the status as {@link DecommissionStatus#FAILED}
  • + *
+ * + * @opensearch.internal + */ +public class DecommissionService { + + private static final Logger logger = LogManager.getLogger(DecommissionService.class); + + private final ClusterService clusterService; + private final TransportService transportService; + private final ThreadPool threadPool; + private final DecommissionController decommissionController; + private volatile List awarenessAttributes; + private volatile Map> forcedAwarenessAttributes; + + @Inject + public DecommissionService( + Settings settings, + ClusterSettings clusterSettings, + ClusterService clusterService, + TransportService transportService, + ThreadPool threadPool, + AllocationService allocationService + ) { + this.clusterService = clusterService; + this.transportService = transportService; + this.threadPool = threadPool; + this.decommissionController = new DecommissionController(clusterService, transportService, allocationService, threadPool); + this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes); + + setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, + this::setForcedAwarenessAttributes + ); + } + + private void setAwarenessAttributes(List awarenessAttributes) { + this.awarenessAttributes = awarenessAttributes; + } + + private void setForcedAwarenessAttributes(Settings forceSettings) { + Map> forcedAwarenessAttributes = new HashMap<>(); + Map forceGroups = forceSettings.getAsGroups(); + for (Map.Entry entry : forceGroups.entrySet()) { + List aValues = entry.getValue().getAsList("values"); + if (aValues.size() > 0) { + forcedAwarenessAttributes.put(entry.getKey(), aValues); + } + } + this.forcedAwarenessAttributes = forcedAwarenessAttributes; + } + + /** + * Starts the new decommission request and registers the metadata with status as {@link DecommissionStatus#INIT} + * or the last known status if not {@link DecommissionStatus#FAILED} + * Once the status is updated, it tries to exclude to-be-decommissioned cluster manager nodes from Voting Configuration + * + * @param decommissionAttribute register decommission attribute in the metadata request + * @param listener register decommission listener + */ + public synchronized void startDecommissionAction( + final DecommissionAttribute decommissionAttribute, + final ActionListener listener + ) { + // validates if correct awareness attributes and forced awareness attribute set to the cluster before starting action + validateAwarenessAttribute(decommissionAttribute, awarenessAttributes, forcedAwarenessAttributes); + + // register the metadata with status as DECOMMISSION_INIT as first step + clusterService.submitStateUpdateTask("decommission [" + decommissionAttribute + "]", new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + Metadata metadata = currentState.metadata(); + Metadata.Builder mdBuilder = Metadata.builder(metadata); + DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.custom(DecommissionAttributeMetadata.TYPE); + // check if the same attribute is requested for decommission and currently not FAILED or SUCCESS, then return the current + // state as is + if (decommissionAttributeMetadata != null + && decommissionAttributeMetadata.decommissionAttribute().equals(decommissionAttribute) + && !decommissionAttributeMetadata.status().equals(DecommissionStatus.FAILED) + && !decommissionAttributeMetadata.status().equals(DecommissionStatus.SUCCESSFUL)) { + logger.info("re-request received for decommissioning [{}], will not update state", decommissionAttribute); + return currentState; + } + // check the request sanity and reject the request if there's any inflight or successful request already present + ensureNoInflightRequest(decommissionAttributeMetadata, decommissionAttribute); + decommissionAttributeMetadata = new DecommissionAttributeMetadata(decommissionAttribute); + mdBuilder.putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata); + logger.info("registering decommission metadata [{}] to execute action", decommissionAttributeMetadata.toString()); + return ClusterState.builder(currentState).metadata(mdBuilder).build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error( + () -> new ParameterizedMessage( + "failed to start decommission action for attribute [{}]", + decommissionAttribute.toString() + ), + e + ); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + DecommissionAttributeMetadata decommissionAttributeMetadata = newState.metadata() + .custom(DecommissionAttributeMetadata.TYPE); + assert decommissionAttribute.equals(decommissionAttributeMetadata.decommissionAttribute()); + decommissionClusterManagerNodes(decommissionAttributeMetadata.decommissionAttribute(), listener); + } + }); + } + + private synchronized void decommissionClusterManagerNodes( + final DecommissionAttribute decommissionAttribute, + ActionListener listener + ) { + ClusterState state = clusterService.getClusterApplierService().state(); + Set clusterManagerNodesToBeDecommissioned = filterNodesWithDecommissionAttribute(state, decommissionAttribute, true); + // This check doesn't seem to be needed as exclusion automatically shrinks the config before sending the response. + // We can guarantee that because of exclusion there wouldn't be a quorum loss and if the service gets a successful response, + // we are certain that the config is updated and nodes are ready to be kicked out. + // Please add comment if you feel there could be a edge case here. + // try { + // // this is a sanity check that the cluster will not go into a quorum loss state because of exclusion + // ensureNoQuorumLossDueToDecommissioning( + // decommissionAttribute, + // clusterManagerNodesToBeDecommissioned, + // state.getLastCommittedConfiguration() + // ); + // } catch (DecommissioningFailedException dfe) { + // listener.onFailure(dfe); + // decommissionController.updateMetadataWithDecommissionStatus(DecommissionStatus.FAILED, statusUpdateListener()); + // return; + // } + + ActionListener exclusionListener = new ActionListener() { + @Override + public void onResponse(Void unused) { + if (transportService.getLocalNode().isClusterManagerNode() + && !nodeHasDecommissionedAttribute(transportService.getLocalNode(), decommissionAttribute)) { + logger.info("will attempt to fail decommissioned nodes as local node is eligible to process the request"); + // we are good here to send the response now as the request is processed by an eligible active leader + // and to-be-decommissioned cluster manager is no more part of Voting Configuration + listener.onResponse(new DecommissionResponse(true)); + failDecommissionedNodes(clusterService.getClusterApplierService().state()); + } else { + // explicitly calling listener.onFailure with NotClusterManagerException as we can certainly say that + // the local cluster manager node will be abdicated and soon will no longer be cluster manager. + // this will ensure that request is retried until cluster manager times out + logger.info( + "local node is not eligible to process the request, " + + "throwing NotClusterManagerException to attempt a retry on an eligible node" + ); + listener.onFailure( + new NotClusterManagerException( + "node [" + + transportService.getLocalNode().toString() + + "] not eligible to execute decommission request. Will retry until timeout." + ) + ); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + // attempting to mark the status as FAILED + decommissionController.updateMetadataWithDecommissionStatus(DecommissionStatus.FAILED, statusUpdateListener()); + } + }; + + // remove all 'to-be-decommissioned' cluster manager eligible nodes from voting config + Set nodeIdsToBeExcluded = clusterManagerNodesToBeDecommissioned.stream() + .map(DiscoveryNode::getId) + .collect(Collectors.toSet()); + + final Predicate allNodesRemoved = clusterState -> { + final Set votingConfigNodeIds = clusterState.getLastCommittedConfiguration().getNodeIds(); + return nodeIdsToBeExcluded.stream().noneMatch(votingConfigNodeIds::contains); + }; + if (allNodesRemoved.test(clusterService.getClusterApplierService().state())) { + exclusionListener.onResponse(null); + } else { + // send a transport request to exclude to-be-decommissioned cluster manager eligible nodes from voting config + decommissionController.excludeDecommissionedNodesFromVotingConfig(nodeIdsToBeExcluded, new ActionListener() { + @Override + public void onResponse(Void unused) { + logger.info( + "successfully removed decommissioned cluster manager eligible nodes [{}] from voting config ", + clusterManagerNodesToBeDecommissioned.toString() + ); + exclusionListener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + logger.debug( + new ParameterizedMessage("failure in removing decommissioned cluster manager eligible nodes from voting config"), + e + ); + exclusionListener.onFailure(e); + } + }); + } + } + + private void failDecommissionedNodes(ClusterState state) { + // this method ensures no matter what, we always exit from this function after clearing the voting config exclusion + DecommissionAttributeMetadata decommissionAttributeMetadata = state.metadata().custom(DecommissionAttributeMetadata.TYPE); + DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); + decommissionController.updateMetadataWithDecommissionStatus(DecommissionStatus.IN_PROGRESS, new ActionListener<>() { + @Override + public void onResponse(DecommissionStatus status) { + logger.info("updated the decommission status to [{}]", status.toString()); + // execute nodes decommissioning + decommissionController.removeDecommissionedNodes( + filterNodesWithDecommissionAttribute(clusterService.getClusterApplierService().state(), decommissionAttribute, false), + "nodes-decommissioned", + TimeValue.timeValueSeconds(30L), // TODO - read timeout from request while integrating with API + new ActionListener() { + @Override + public void onResponse(Void unused) { + clearVotingConfigExclusionAndUpdateStatus(true); + } + + @Override + public void onFailure(Exception e) { + clearVotingConfigExclusionAndUpdateStatus(false); + } + } + ); + } + + @Override + public void onFailure(Exception e) { + logger.error( + () -> new ParameterizedMessage( + "failed to update decommission status for attribute [{}] to [{}]", + decommissionAttribute.toString(), + DecommissionStatus.IN_PROGRESS + ), + e + ); + // since we are not able to update the status, we will clear the voting config exclusion we have set earlier + clearVotingConfigExclusionAndUpdateStatus(false); + } + }); + } + + private void clearVotingConfigExclusionAndUpdateStatus(boolean decommissionSuccessful) { + decommissionController.clearVotingConfigExclusion(new ActionListener() { + @Override + public void onResponse(Void unused) { + logger.info( + "successfully cleared voting config exclusion after completing decommission action, proceeding to update metadata" + ); + DecommissionStatus updateStatusWith = decommissionSuccessful ? DecommissionStatus.SUCCESSFUL : DecommissionStatus.FAILED; + decommissionController.updateMetadataWithDecommissionStatus(updateStatusWith, statusUpdateListener()); + } + + @Override + public void onFailure(Exception e) { + logger.debug( + new ParameterizedMessage("failure in clearing voting config exclusion after processing decommission request"), + e + ); + decommissionController.updateMetadataWithDecommissionStatus(DecommissionStatus.FAILED, statusUpdateListener()); + } + }); + } + + private Set filterNodesWithDecommissionAttribute( + ClusterState clusterState, + DecommissionAttribute decommissionAttribute, + boolean onlyClusterManagerNodes + ) { + Set nodesWithDecommissionAttribute = new HashSet<>(); + Iterator nodesIter = onlyClusterManagerNodes + ? clusterState.nodes().getClusterManagerNodes().valuesIt() + : clusterState.nodes().getNodes().valuesIt(); + + while (nodesIter.hasNext()) { + final DiscoveryNode node = nodesIter.next(); + if (nodeHasDecommissionedAttribute(node, decommissionAttribute)) { + nodesWithDecommissionAttribute.add(node); + } + } + return nodesWithDecommissionAttribute; + } + + private static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { + return discoveryNode.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()); + } + + private static void validateAwarenessAttribute( + final DecommissionAttribute decommissionAttribute, + List awarenessAttributes, + Map> forcedAwarenessAttributes + ) { + String msg = null; + if (awarenessAttributes == null) { + msg = "awareness attribute not set to the cluster."; + } else if (forcedAwarenessAttributes == null) { + msg = "forced awareness attribute not set to the cluster."; + } else if (!awarenessAttributes.contains(decommissionAttribute.attributeName())) { + msg = "invalid awareness attribute requested for decommissioning"; + } else if (!forcedAwarenessAttributes.containsKey(decommissionAttribute.attributeName())) { + msg = "forced awareness attribute [" + forcedAwarenessAttributes.toString() + "] doesn't have the decommissioning attribute"; + } else if (!forcedAwarenessAttributes.get(decommissionAttribute.attributeName()).contains(decommissionAttribute.attributeValue())) { + msg = "invalid awareness attribute value requested for decommissioning. Set forced awareness values before to decommission"; + } + + if (msg != null) { + throw new DecommissioningFailedException(decommissionAttribute, msg); + } + } + + private static void ensureNoInflightRequest( + DecommissionAttributeMetadata decommissionAttributeMetadata, + DecommissionAttribute decommissionAttribute + ) { + String msg = null; + if (decommissionAttributeMetadata != null) { + switch (decommissionAttributeMetadata.status()) { + case SUCCESSFUL: + // one awareness attribute is already decommissioned. We will reject the new request + msg = "one awareness attribute [" + + decommissionAttributeMetadata.decommissionAttribute().toString() + + "] already successfully decommissioned, recommission before triggering another decommission"; + break; + case IN_PROGRESS: + case INIT: + // it means the decommission has been initiated or is inflight. In that case, will fail new request + msg = "there's an inflight decommission request for attribute [" + + decommissionAttributeMetadata.decommissionAttribute().toString() + + "] is in progress, cannot process this request"; + break; + case FAILED: + break; + } + } + if (msg != null) { + throw new DecommissioningFailedException(decommissionAttribute, msg); + } + } + + private static void ensureNoQuorumLossDueToDecommissioning( + DecommissionAttribute decommissionAttribute, + Set clusterManagerNodesToBeDecommissioned, + CoordinationMetadata.VotingConfiguration votingConfiguration + ) { + Set clusterManagerNodesIdToBeDecommissioned = new HashSet<>(); + clusterManagerNodesToBeDecommissioned.forEach(node -> clusterManagerNodesIdToBeDecommissioned.add(node.getId())); + if (!votingConfiguration.hasQuorum( + votingConfiguration.getNodeIds() + .stream() + .filter(n -> clusterManagerNodesIdToBeDecommissioned.contains(n) == false) + .collect(Collectors.toList()) + )) { + throw new DecommissioningFailedException( + decommissionAttribute, + "cannot proceed with decommission request as cluster might go into quorum loss" + ); + } + } + + private ActionListener statusUpdateListener() { + return new ActionListener() { + @Override + public void onResponse(DecommissionStatus status) { + logger.info("updated the decommission status to [{}]", status.toString()); + } + + @Override + public void onFailure(Exception e) { + logger.error("unexpected failure during status update", e); + } + }; + } + + public void clearDecommissionStatus(final ActionListener listener) { + clusterService.submitStateUpdateTask("delete_decommission_state", new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) { + return deleteDecommissionAttribute(currentState); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error(() -> new ParameterizedMessage("Failed to clear decommission attribute."), e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + // Once the cluster state is processed we can try to recommission nodes by setting the weights for the zone. + // TODO Set the weights for the recommissioning zone. + listener.onResponse(new ClusterStateUpdateResponse(true)); + } + }); + } + + ClusterState deleteDecommissionAttribute(final ClusterState currentState) { + logger.info("Delete decommission request received"); + Metadata metadata = currentState.metadata(); + Metadata.Builder mdBuilder = Metadata.builder(metadata); + mdBuilder.removeCustom(DecommissionAttributeMetadata.TYPE); + return ClusterState.builder(currentState).metadata(mdBuilder).build(); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java new file mode 100644 index 0000000000000..ba3dec4ded94a --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +/** + * An enumeration of the states during decommissioning + */ +public enum DecommissionStatus { + /** + * Decommission process is initiated, and to-be-decommissioned leader is excluded from voting config + */ + INIT("init", 0), + /** + * Decommission process has started, decommissioned nodes should be removed + */ + IN_PROGRESS("in_progress", 1), + /** + * Decommission action completed + */ + SUCCESSFUL("successful", 2), + /** + * Decommission request failed + */ + FAILED("failed", -1); + + private final String status; + private final int stage; + + DecommissionStatus(String status, int stage) { + this.status = status; + this.stage = stage; + } + + /** + * Returns status that represents the decommission state + * + * @return status + */ + public String status() { + return status; + } + + /** + * Returns stage that represents the decommission stage + */ + public int stage() { + return stage; + } + + /** + * Generate decommission status from given string + * + * @param status status in string + * @return status + */ + public static DecommissionStatus fromString(String status) { + if (status == null) { + throw new IllegalArgumentException("decommission status cannot be null"); + } + if (status.equals(INIT.status())) { + return INIT; + } else if (status.equals(IN_PROGRESS.status())) { + return IN_PROGRESS; + } else if (status.equals(SUCCESSFUL.status())) { + return SUCCESSFUL; + } else if (status.equals(FAILED.status())) { + return FAILED; + } + throw new IllegalStateException("Decommission status [" + status + "] not recognized."); + } + + /** + * Generate decommission status from given stage + * + * @param stage stage in int + * @return status + */ + public static DecommissionStatus fromStage(int stage) { + if (stage == INIT.stage()) { + return INIT; + } else if (stage == IN_PROGRESS.stage()) { + return IN_PROGRESS; + } else if (stage == SUCCESSFUL.stage()) { + return SUCCESSFUL; + } else if (stage == FAILED.stage()) { + return FAILED; + } + throw new IllegalStateException("Decommission stage [" + stage + "] not recognized."); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java new file mode 100644 index 0000000000000..fe1b9368ac712 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * This exception is thrown whenever a failure occurs in decommission request @{@link DecommissionService} + * + * @opensearch.internal + */ + +public class DecommissioningFailedException extends OpenSearchException { + + private final DecommissionAttribute decommissionAttribute; + + public DecommissioningFailedException(DecommissionAttribute decommissionAttribute, String msg) { + this(decommissionAttribute, msg, null); + } + + public DecommissioningFailedException(DecommissionAttribute decommissionAttribute, String msg, Throwable cause) { + super("[" + (decommissionAttribute == null ? "_na" : decommissionAttribute.toString()) + "] " + msg, cause); + this.decommissionAttribute = decommissionAttribute; + } + + public DecommissioningFailedException(StreamInput in) throws IOException { + super(in); + decommissionAttribute = new DecommissionAttribute(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + decommissionAttribute.writeTo(out); + } + + /** + * Returns decommission attribute + * + * @return decommission attribute + */ + public DecommissionAttribute decommissionAttribute() { + return decommissionAttribute; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/NodeDecommissionedException.java b/server/src/main/java/org/opensearch/cluster/decommission/NodeDecommissionedException.java new file mode 100644 index 0000000000000..847d5a527b017 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/decommission/NodeDecommissionedException.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * This exception is thrown if the node is decommissioned by @{@link DecommissionService} + * and this nodes needs to be removed from the cluster + * + * @opensearch.internal + */ +public class NodeDecommissionedException extends OpenSearchException { + + public NodeDecommissionedException(String msg, Object... args) { + super(msg, args); + } + + public NodeDecommissionedException(StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/package-info.java b/server/src/main/java/org/opensearch/cluster/decommission/package-info.java new file mode 100644 index 0000000000000..256c2f22253cc --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/decommission/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Decommission lifecycle classes + */ +package org.opensearch.cluster.decommission; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 759891e88039b..cd1c92a8b109f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -285,6 +285,8 @@ public Iterator> settings() { public static final String SETTING_REMOTE_STORE_ENABLED = "index.remote_store.enabled"; + public static final String SETTING_REMOTE_STORE_REPOSITORY = "index.remote_store.repository"; + public static final String SETTING_REMOTE_TRANSLOG_STORE_ENABLED = "index.remote_store.translog.enabled"; /** * Used to specify if the index data should be persisted in the remote store. @@ -322,6 +324,50 @@ public Iterator> settings() { Property.Final ); + /** + * Used to specify remote store repository to use for this index. + */ + public static final Setting INDEX_REMOTE_STORE_REPOSITORY_SETTING = Setting.simpleString( + SETTING_REMOTE_STORE_REPOSITORY, + new Setting.Validator<>() { + + @Override + public void validate(final String value) {} + + @Override + public void validate(final String value, final Map, Object> settings) { + if (value == null || value.isEmpty()) { + throw new IllegalArgumentException( + "Setting " + INDEX_REMOTE_STORE_REPOSITORY_SETTING.getKey() + " should be provided with non-empty repository ID" + ); + } else { + validateRemoteStoreSettingEnabled(settings, INDEX_REMOTE_STORE_REPOSITORY_SETTING); + } + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(INDEX_REMOTE_STORE_ENABLED_SETTING); + return settings.iterator(); + } + }, + Property.IndexScope, + Property.Final + ); + + private static void validateRemoteStoreSettingEnabled(final Map, Object> settings, Setting setting) { + final Boolean isRemoteSegmentStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); + if (isRemoteSegmentStoreEnabled == false) { + throw new IllegalArgumentException( + "Settings " + + setting.getKey() + + " can ont be set/enabled when " + + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + + " is set to true" + ); + } + } + /** * Used to specify if the index translog operations should be persisted in the remote store. */ @@ -335,16 +381,8 @@ public void validate(final Boolean value) {} @Override public void validate(final Boolean value, final Map, Object> settings) { - final Boolean isRemoteSegmentStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); - if (isRemoteSegmentStoreEnabled == false && value == true) { - throw new IllegalArgumentException( - "Settings " - + INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.getKey() - + " cannot be enabled when " - + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() - + " is set to " - + settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING) - ); + if (value == true) { + validateRemoteStoreSettingEnabled(settings, INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java index 7dec8f9c84a89..e3aa2a666d454 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public final class ShardRouting implements Writeable, ToXContentObject { +public class ShardRouting implements Writeable, ToXContentObject { /** * Used if shard size is not available @@ -78,7 +78,7 @@ public final class ShardRouting implements Writeable, ToXContentObject { * A constructor to internally create shard routing instances, note, the internal flag should only be set to true * by either this class or tests. Visible for testing. */ - ShardRouting( + protected ShardRouting( ShardId shardId, String currentNodeId, String relocatingNodeId, diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 971fb518ff1da..826500ddcf48d 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -533,6 +533,7 @@ public void apply(Settings value, Settings current, Settings previous) { PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING, + PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING, PeerFinder.DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING, ClusterFormationFailureHelper.DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING, ElectionSchedulerFactory.ELECTION_INITIAL_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index a3fa2c7ee3112..7be9adc786f24 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -223,7 +223,11 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FeatureFlags.REPLICATION_TYPE, Collections.singletonList(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), FeatureFlags.REMOTE_STORE, - Arrays.asList(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING) + Arrays.asList( + IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING + ) ); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index a601a6fbe4d82..7bf8cb208a24d 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -84,6 +84,14 @@ public abstract class PeerFinder { Setting.Property.NodeScope ); + // the time between attempts to find all peers when node is in decommissioned state, default set to 2 minutes + public static final Setting DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING = Setting.timeSetting( + "discovery.find_peers_interval_during_decommission", + TimeValue.timeValueSeconds(30L), + TimeValue.timeValueMillis(1000), + Setting.Property.NodeScope + ); + public static final Setting DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING = Setting.timeSetting( "discovery.request_peers_timeout", TimeValue.timeValueMillis(3000), @@ -91,7 +99,8 @@ public abstract class PeerFinder { Setting.Property.NodeScope ); - private final TimeValue findPeersInterval; + private final Settings settings; + private TimeValue findPeersInterval; private final TimeValue requestPeersTimeout; private final Object mutex = new Object(); @@ -101,6 +110,7 @@ public abstract class PeerFinder { private volatile long currentTerm; private boolean active; + private boolean localNodeDecommissioned = false; private DiscoveryNodes lastAcceptedNodes; private final Map peersByAddress = new LinkedHashMap<>(); private Optional leader = Optional.empty(); @@ -112,6 +122,7 @@ public PeerFinder( TransportAddressConnector transportAddressConnector, ConfiguredHostsResolver configuredHostsResolver ) { + this.settings = settings; findPeersInterval = DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings); requestPeersTimeout = DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING.get(settings); this.transportService = transportService; @@ -128,6 +139,32 @@ public PeerFinder( ); } + public ActionListener nodeCommissionedListener() { + return new ActionListener() { + @Override + public void onResponse(Void unused) { + logger.info("setting findPeersInterval to [{}], due to recommissioning", findPeersInterval); + assert localNodeDecommissioned; // TODO: Do we need this? + localNodeDecommissioned = false; + findPeersInterval = DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings); + + } + + @Override + public void onFailure(Exception e) { + logger.info("setting findPeersInterval to [{}], due to decommissioning", + DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING.get(settings)); + assert !localNodeDecommissioned; + localNodeDecommissioned = true; + findPeersInterval = DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING.get(settings); + } + }; + } + + public boolean localNodeDecommissioned() { + return localNodeDecommissioned; + } + public void activate(final DiscoveryNodes lastAcceptedNodes) { logger.trace("activating with {}", lastAcceptedNodes); diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index 953b4def9d653..c43f539243d7a 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -373,7 +373,7 @@ public NodeGatewayStartedShards(StreamInput in) throws IOException { } else { storeException = null; } - if (in.getVersion().onOrAfter(Version.V_3_0_0) && in.readBoolean()) { + if (in.getVersion().onOrAfter(Version.V_2_3_0) && in.readBoolean()) { replicationCheckpoint = new ReplicationCheckpoint(in); } else { replicationCheckpoint = null; @@ -430,7 +430,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_3_0)) { if (replicationCheckpoint != null) { out.writeBoolean(true); replicationCheckpoint.writeTo(out); diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index f8604caeab414..e52a2ba39ed52 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -70,7 +70,6 @@ import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; -import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -487,7 +486,7 @@ public IndexService newIndexService( NamedWriteableRegistry namedWriteableRegistry, BooleanSupplier idFieldDataEnabled, ValuesSourceRegistry valuesSourceRegistry, - RemoteDirectoryFactory remoteDirectoryFactory + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index e1427df1c34ab..92f957633db84 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -511,7 +511,7 @@ public synchronized IndexShard createShard( Store remoteStore = null; if (this.indexSettings.isRemoteStoreEnabled()) { Directory remoteDirectory = remoteDirectoryFactory.newDirectory( - clusterService.state().metadata().clusterUUID(), + this.indexSettings.getRemoteStoreRepository(), this.indexSettings, path ); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 657cb1ee55cb9..9c7f4804755d4 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -560,6 +560,7 @@ public final class IndexSettings { private final ReplicationType replicationType; private final boolean isRemoteStoreEnabled; private final boolean isRemoteTranslogStoreEnabled; + private final String remoteStoreRepository; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; private volatile IndexMetadata indexMetadata; @@ -721,6 +722,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); isRemoteTranslogStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false); + remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings); @@ -979,6 +981,13 @@ public boolean isRemoteTranslogStoreEnabled() { return isRemoteTranslogStoreEnabled; } + /** + * Returns if remote store is enabled for this index. + */ + public String getRemoteStoreRepository() { + return remoteStoreRepository; + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 6f5b7030ed65f..12d420aa245fa 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -54,6 +54,10 @@ public class NRTReplicationEngine extends Engine { private final LocalCheckpointTracker localCheckpointTracker; private final WriteOnlyTranslogManager translogManager; + private volatile long lastReceivedGen = SequenceNumbers.NO_OPS_PERFORMED; + + private static final int SI_COUNTER_INCREMENT = 10; + public NRTReplicationEngine(EngineConfig engineConfig) { super(engineConfig); store.incRef(); @@ -118,14 +122,16 @@ public TranslogManager translogManager() { public synchronized void updateSegments(final SegmentInfos infos, long seqNo) throws IOException { // Update the current infos reference on the Engine's reader. + final long incomingGeneration = infos.getGeneration(); readerManager.updateSegments(infos); - // only update the persistedSeqNo and "lastCommitted" infos reference if the incoming segments have a higher - // generation. We can still refresh with incoming SegmentInfos that are not part of a commit point. - if (infos.getGeneration() > lastCommittedSegmentInfos.getGeneration()) { - this.lastCommittedSegmentInfos = infos; + // Commit and roll the xlog when we receive a different generation than what was last received. + // lower/higher gens are possible from a new primary that was just elected. + if (incomingGeneration != lastReceivedGen) { + commitSegmentInfos(); translogManager.rollTranslogGeneration(); } + lastReceivedGen = incomingGeneration; localCheckpointTracker.fastForwardProcessedSeqNo(seqNo); } @@ -139,13 +145,16 @@ public synchronized void updateSegments(final SegmentInfos infos, long seqNo) th * * @throws IOException - When there is an IO error committing the SegmentInfos. */ - public void commitSegmentInfos() throws IOException { - // TODO: This method should wait for replication events to finalize. - final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); - store.commitSegmentInfos(latestSegmentInfos, localCheckpointTracker.getMaxSeqNo(), localCheckpointTracker.getProcessedCheckpoint()); + private void commitSegmentInfos(SegmentInfos infos) throws IOException { + store.commitSegmentInfos(infos, localCheckpointTracker.getMaxSeqNo(), localCheckpointTracker.getProcessedCheckpoint()); + this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); translogManager.syncTranslog(); } + protected void commitSegmentInfos() throws IOException { + commitSegmentInfos(getLatestSegmentInfos()); + } + @Override public String getHistoryUUID() { return loadHistoryUUID(lastCommittedSegmentInfos.userData); @@ -345,6 +354,15 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() : "Either the write lock must be held or the engine must be currently be failing itself"; try { + final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); + /* + This is a workaround solution which decreases the chances of conflict on replica nodes when same file is copied + from two different primaries during failover. Increasing counter helps in avoiding this conflict as counter is + used to generate new segment file names. The ideal solution is to identify the counter from previous primary. + */ + latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; + latestSegmentInfos.changed(); + commitSegmentInfos(latestSegmentInfos); IOUtils.close(readerManager, translogManager, store::decRef); } catch (Exception e) { logger.warn("failed to close engine", e); diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java index 16e615672a26f..8fbb24720aedc 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java @@ -74,6 +74,9 @@ protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader re * @throws IOException - When Refresh fails with an IOException. */ public synchronized void updateSegments(SegmentInfos infos) throws IOException { + // roll over the currentInfo's generation, this ensures the on-disk gen + // is always increased. + infos.updateGeneration(currentInfos); currentInfos = infos; maybeRefresh(); } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 67a8e691fda0d..28dc0ad49d4ec 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -48,8 +48,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.util.SetOnce; import org.apache.lucene.util.ThreadInterruptedException; import org.opensearch.Assertions; @@ -625,7 +623,7 @@ public void updateShardState( if (indexSettings.isSegRepEnabled()) { // this Shard's engine was read only, we need to update its engine before restoring local history from xlog. assert newRouting.primary() && currentRouting.primary() == false; - promoteNRTReplicaToPrimary(); + resetEngineToGlobalCheckpoint(); } replicationTracker.activatePrimaryMode(getLocalCheckpoint()); ensurePeerRecoveryRetentionLeasesExist(); @@ -3228,8 +3226,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro final List internalRefreshListener = new ArrayList<>(); internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric)); if (isRemoteStoreEnabled()) { - Directory remoteDirectory = ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - internalRefreshListener.add(new RemoteStoreRefreshListener(store.directory(), remoteDirectory)); + internalRefreshListener.add(new RemoteStoreRefreshListener(this)); } if (this.checkpointPublisher != null && indexSettings.isSegRepEnabled() && shardRouting.primary()) { internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher)); @@ -3560,7 +3557,9 @@ private void innerAcquireReplicaOperationPermit( currentGlobalCheckpoint, maxSeqNo ); - if (currentGlobalCheckpoint < maxSeqNo) { + // With Segment Replication enabled, we never want to reset a replica's engine unless + // it is promoted to primary. + if (currentGlobalCheckpoint < maxSeqNo && indexSettings.isSegRepEnabled() == false) { resetEngineToGlobalCheckpoint(); } else { getEngine().translogManager().rollTranslogGeneration(); @@ -4123,26 +4122,4 @@ RetentionLeaseSyncer getRetentionLeaseSyncer() { public GatedCloseable getSegmentInfosSnapshot() { return getEngine().getSegmentInfosSnapshot(); } - - /** - * With segment replication enabled - prepare the shard's engine to be promoted as the new primary. - * - * If this shard is currently using a replication engine, this method: - * 1. Invokes {@link NRTReplicationEngine#commitSegmentInfos()} to ensure the engine can be reopened as writeable from the latest refresh point. - * InternalEngine opens its IndexWriter from an on-disk commit point, but this replica may have recently synced from a primary's refresh point, meaning it has documents searchable in its in-memory SegmentInfos - * that are not part of a commit point. This ensures that those documents are made part of a commit and do not need to be reindexed after promotion. - * 2. Invokes resetEngineToGlobalCheckpoint - This call performs the engine swap, opening up as a writeable engine and replays any operations in the xlog. The operations indexed from xlog here will be - * any ack'd writes that were not copied to this replica before promotion. - */ - private void promoteNRTReplicaToPrimary() { - assert shardRouting.primary() && indexSettings.isSegRepEnabled(); - getReplicationEngine().ifPresentOrElse(engine -> { - try { - engine.commitSegmentInfos(); - resetEngineToGlobalCheckpoint(); - } catch (IOException e) { - throw new EngineException(shardId, "Unable to update replica to writeable engine, failing shard", e); - } - }, () -> { throw new EngineException(shardId, "Expected replica engine to be of type NRTReplicationEngine"); }); - } } diff --git a/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java b/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java new file mode 100644 index 0000000000000..d1b2bf9079289 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java @@ -0,0 +1,26 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.index.shard; + +import java.io.IOException; + +import org.opensearch.common.io.stream.StreamInput; + +/** + * Exception to indicate failures are caused due to the closure of the primary + * shard. + * + * @opensearch.internal + */ +public class PrimaryShardClosedException extends IndexShardClosedException { + public PrimaryShardClosedException(ShardId shardId) { + super(shardId, "Primary closed"); + } + + public PrimaryShardClosedException(StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 4b549ec485c0e..a8ca9891d9743 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -11,32 +11,61 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.index.engine.EngineException; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; import java.io.IOException; -import java.nio.file.NoSuchFileException; -import java.util.Arrays; -import java.util.HashSet; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; /** * RefreshListener implementation to upload newly created segment files to the remote store + * + * @opensearch.internal */ -public class RemoteStoreRefreshListener implements ReferenceManager.RefreshListener { +public final class RemoteStoreRefreshListener implements ReferenceManager.RefreshListener { + // Visible for testing + static final Set EXCLUDE_FILES = Set.of("write.lock"); + // Visible for testing + static final int LAST_N_METADATA_FILES_TO_KEEP = 10; + private final IndexShard indexShard; private final Directory storeDirectory; - private final Directory remoteDirectory; - // ToDo: This can be a map with metadata of the uploaded file as value of the map (GitHub #3398) - private final Set filesUploadedToRemoteStore; + private final RemoteSegmentStoreDirectory remoteDirectory; + private final Map localSegmentChecksumMap; + private long primaryTerm; private static final Logger logger = LogManager.getLogger(RemoteStoreRefreshListener.class); - public RemoteStoreRefreshListener(Directory storeDirectory, Directory remoteDirectory) throws IOException { - this.storeDirectory = storeDirectory; - this.remoteDirectory = remoteDirectory; - // ToDo: Handle failures in reading list of files (GitHub #3397) - this.filesUploadedToRemoteStore = new HashSet<>(Arrays.asList(remoteDirectory.listAll())); + public RemoteStoreRefreshListener(IndexShard indexShard) { + this.indexShard = indexShard; + this.storeDirectory = indexShard.store().directory(); + this.remoteDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) + .getDelegate()).getDelegate(); + this.primaryTerm = indexShard.getOperationPrimaryTerm(); + localSegmentChecksumMap = new HashMap<>(); + if (indexShard.shardRouting.primary()) { + try { + this.remoteDirectory.init(); + } catch (IOException e) { + logger.error("Exception while initialising RemoteSegmentStoreDirectory", e); + } + } } @Override @@ -46,42 +75,112 @@ public void beforeRefresh() throws IOException { /** * Upload new segment files created as part of the last refresh to the remote segment store. - * The method also deletes segment files from remote store which are not part of local filesystem. + * This method also uploads remote_segments_metadata file which contains metadata of each segment file uploaded. * @param didRefresh true if the refresh opened a new reference - * @throws IOException in case of I/O error in reading list of local files */ @Override - public void afterRefresh(boolean didRefresh) throws IOException { - if (didRefresh) { - Set localFiles = Set.of(storeDirectory.listAll()); - localFiles.stream().filter(file -> !filesUploadedToRemoteStore.contains(file)).forEach(file -> { - try { - remoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); - filesUploadedToRemoteStore.add(file); - } catch (NoSuchFileException e) { - logger.info( - () -> new ParameterizedMessage("The file {} does not exist anymore. It can happen in case of temp files", file), - e - ); - } catch (IOException e) { - // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) - logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", file), e); - } - }); + public void afterRefresh(boolean didRefresh) { + synchronized (this) { + try { + if (indexShard.shardRouting.primary()) { + if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { + this.primaryTerm = indexShard.getOperationPrimaryTerm(); + this.remoteDirectory.init(); + } + try { + String lastCommittedLocalSegmentFileName = SegmentInfos.getLastCommitSegmentsFileName(storeDirectory); + if (!remoteDirectory.containsFile( + lastCommittedLocalSegmentFileName, + getChecksumOfLocalFile(lastCommittedLocalSegmentFileName) + )) { + deleteStaleCommits(); + } + try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + Collection refreshedLocalFiles = segmentInfos.files(true); - Set remoteFilesToBeDeleted = new HashSet<>(); - // ToDo: Instead of deleting files in sync, mark them and delete in async/periodic flow (GitHub #3142) - filesUploadedToRemoteStore.stream().filter(file -> !localFiles.contains(file)).forEach(file -> { - try { - remoteDirectory.deleteFile(file); - remoteFilesToBeDeleted.add(file); - } catch (IOException e) { - // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) - logger.warn(() -> new ParameterizedMessage("Exception while deleting file {} from the remote segment store", file), e); + List segmentInfosFiles = refreshedLocalFiles.stream() + .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) + .collect(Collectors.toList()); + Optional latestSegmentInfos = segmentInfosFiles.stream() + .max(Comparator.comparingLong(IndexFileNames::parseGeneration)); + + if (latestSegmentInfos.isPresent()) { + refreshedLocalFiles.addAll(SegmentInfos.readCommit(storeDirectory, latestSegmentInfos.get()).files(true)); + segmentInfosFiles.stream() + .filter(file -> !file.equals(latestSegmentInfos.get())) + .forEach(refreshedLocalFiles::remove); + + boolean uploadStatus = uploadNewSegments(refreshedLocalFiles); + if (uploadStatus) { + remoteDirectory.uploadMetadata( + refreshedLocalFiles, + storeDirectory, + indexShard.getOperationPrimaryTerm(), + segmentInfos.getGeneration() + ); + localSegmentChecksumMap.keySet() + .stream() + .filter(file -> !refreshedLocalFiles.contains(file)) + .collect(Collectors.toSet()) + .forEach(localSegmentChecksumMap::remove); + } + } + } catch (EngineException e) { + logger.warn("Exception while reading SegmentInfosSnapshot", e); + } + } catch (IOException e) { + // We don't want to fail refresh if upload of new segments fails. The missed segments will be re-tried + // in the next refresh. This should not affect durability of the indexed data after remote trans-log integration. + logger.warn("Exception while uploading new segments to the remote segment store", e); + } } - }); + } catch (Throwable t) { + logger.error("Exception in RemoteStoreRefreshListener.afterRefresh()", t); + } + } + } + + // Visible for testing + boolean uploadNewSegments(Collection localFiles) throws IOException { + AtomicBoolean uploadSuccess = new AtomicBoolean(true); + localFiles.stream().filter(file -> !EXCLUDE_FILES.contains(file)).filter(file -> { + try { + return !remoteDirectory.containsFile(file, getChecksumOfLocalFile(file)); + } catch (IOException e) { + logger.info( + "Exception while reading checksum of local segment file: {}, ignoring the exception and re-uploading the file", + file + ); + return true; + } + }).forEach(file -> { + try { + remoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); + } catch (IOException e) { + uploadSuccess.set(false); + // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) + logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", file), e); + } + }); + return uploadSuccess.get(); + } + + private String getChecksumOfLocalFile(String file) throws IOException { + if (!localSegmentChecksumMap.containsKey(file)) { + try (IndexInput indexInput = storeDirectory.openInput(file, IOContext.DEFAULT)) { + String checksum = Long.toString(CodecUtil.retrieveChecksum(indexInput)); + localSegmentChecksumMap.put(file, checksum); + } + } + return localSegmentChecksumMap.get(file); + } - remoteFilesToBeDeleted.forEach(filesUploadedToRemoteStore::remove); + private void deleteStaleCommits() { + try { + remoteDirectory.deleteStaleSegments(LAST_N_METADATA_FILES_TO_KEEP); + } catch (IOException e) { + logger.info("Exception while deleting stale commits from remote segment store, will retry delete post next commit", e); } } } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 1190e8e6ab3d2..06916c4cc87fe 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -449,7 +449,12 @@ private void recoverFromRemoteStore(IndexShard indexShard) throws IndexShardReco } indexShard.preRecovery(); indexShard.prepareForIndexRecovery(); - final Directory remoteDirectory = remoteStore.directory(); + assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory"; + FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); + assert remoteStoreDirectory.getDelegate() instanceof FilterDirectory + : "Store.directory is not enclosing an instance of FilterDirectory"; + FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); + final Directory remoteDirectory = byteSizeCachingStoreDirectory.getDelegate(); final Store store = indexShard.store(); final Directory storeDirectory = store.directory(); store.incRef(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java index 8f8d5dd5418ae..2c809563ca961 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java @@ -27,27 +27,37 @@ public class RemoteIndexInput extends IndexInput { private final InputStream inputStream; private final long size; + private long filePointer; public RemoteIndexInput(String name, InputStream inputStream, long size) { super(name); this.inputStream = inputStream; this.size = size; + this.filePointer = 0; } @Override public byte readByte() throws IOException { byte[] buffer = new byte[1]; - inputStream.read(buffer); + int numberOfBytesRead = inputStream.read(buffer); + if (numberOfBytesRead != -1) { + filePointer += numberOfBytesRead; + } return buffer[0]; } @Override public void readBytes(byte[] b, int offset, int len) throws IOException { int bytesRead = inputStream.read(b, offset, len); - while (bytesRead > 0 && bytesRead < len) { - len -= bytesRead; - offset += bytesRead; - bytesRead = inputStream.read(b, offset, len); + if (bytesRead == len) { + filePointer += bytesRead; + } else { + while (bytesRead > 0 && bytesRead < len) { + filePointer += bytesRead; + len -= bytesRead; + offset += bytesRead; + bytesRead = inputStream.read(b, offset, len); + } } } @@ -61,11 +71,6 @@ public long length() { return size; } - @Override - public void seek(long pos) throws IOException { - inputStream.skip(pos); - } - /** * Guaranteed to throw an exception and leave the RemoteIndexInput unmodified. * This method is not implemented as it is not used for the file transfer to/from the remote store. @@ -73,10 +78,18 @@ public void seek(long pos) throws IOException { * @throws UnsupportedOperationException always */ @Override - public long getFilePointer() { + public void seek(long pos) throws IOException { throw new UnsupportedOperationException(); } + /** + * Returns the current position in this file in terms of number of bytes read so far. + */ + @Override + public long getFilePointer() { + return filePointer; + } + /** * Guaranteed to throw an exception and leave the RemoteIndexInput unmodified. * This method is not implemented as it is not used for the file transfer to/from the remote store. diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index d7d6b29d08bfc..505ad6fafd550 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -24,9 +24,13 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; /** @@ -132,8 +136,9 @@ private Map readMetadataFile(String metadataFil /** * Metadata of a segment that is uploaded to remote segment store. */ - static class UploadedSegmentMetadata { - private static final String SEPARATOR = "::"; + public static class UploadedSegmentMetadata { + // Visible for testing + static final String SEPARATOR = "::"; private final String originalFilename; private final String uploadedFilename; private final String checksum; @@ -366,7 +371,69 @@ private String getLocalSegmentFilename(String remoteFilename) { } // Visible for testing - Map getSegmentsUploadedToRemoteStore() { - return this.segmentsUploadedToRemoteStore; + public Map getSegmentsUploadedToRemoteStore() { + return Collections.unmodifiableMap(this.segmentsUploadedToRemoteStore); + } + + /** + * Delete stale segment and metadata files + * One metadata file is kept per commit (refresh updates the same file). To read segments uploaded to remote store, + * we just need to read the latest metadata file. All the stale metadata files can be safely deleted. + * @param lastNMetadataFilesToKeep number of metadata files to keep + * @throws IOException in case of I/O error while reading from / writing to remote segment store + */ + public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException { + Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX); + List sortedMetadataFileList = metadataFiles.stream().sorted(METADATA_FILENAME_COMPARATOR).collect(Collectors.toList()); + if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) { + logger.info( + "Number of commits in remote segment store={}, lastNMetadataFilesToKeep={}", + sortedMetadataFileList.size(), + lastNMetadataFilesToKeep + ); + return; + } + List latestNMetadataFiles = sortedMetadataFileList.subList( + sortedMetadataFileList.size() - lastNMetadataFilesToKeep, + sortedMetadataFileList.size() + ); + Map activeSegmentFilesMetadataMap = new HashMap<>(); + Set activeSegmentRemoteFilenames = new HashSet<>(); + for (String metadataFile : latestNMetadataFiles) { + Map segmentMetadataMap = readMetadataFile(metadataFile); + activeSegmentFilesMetadataMap.putAll(segmentMetadataMap); + activeSegmentRemoteFilenames.addAll( + segmentMetadataMap.values().stream().map(metadata -> metadata.uploadedFilename).collect(Collectors.toSet()) + ); + } + for (String metadataFile : sortedMetadataFileList.subList(0, sortedMetadataFileList.size() - lastNMetadataFilesToKeep)) { + Map staleSegmentFilesMetadataMap = readMetadataFile(metadataFile); + Set staleSegmentRemoteFilenames = staleSegmentFilesMetadataMap.values() + .stream() + .map(metadata -> metadata.uploadedFilename) + .collect(Collectors.toSet()); + AtomicBoolean deletionSuccessful = new AtomicBoolean(true); + staleSegmentRemoteFilenames.stream().filter(file -> !activeSegmentRemoteFilenames.contains(file)).forEach(file -> { + try { + remoteDataDirectory.deleteFile(file); + if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) { + segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file)); + } + } catch (NoSuchFileException e) { + logger.info("Segment file {} corresponding to metadata file {} does not exist in remote", file, metadataFile); + } catch (IOException e) { + deletionSuccessful.set(false); + logger.info( + "Exception while deleting segment file {} corresponding to metadata file {}. Deletion will be re-tried", + file, + metadataFile + ); + } + }); + if (deletionSuccessful.get()) { + logger.info("Deleting stale metadata file {} from remote segment store", metadataFile); + remoteMetadataDirectory.deleteFile(metadataFile); + } + } } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java similarity index 58% rename from server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java rename to server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 62f398cdad207..e77eb52bd3891 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -27,11 +27,11 @@ * * @opensearch.internal */ -public class RemoteDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory { +public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory { private final Supplier repositoriesService; - public RemoteDirectoryFactory(Supplier repositoriesService) { + public RemoteSegmentStoreDirectoryFactory(Supplier repositoriesService) { this.repositoriesService = repositoriesService; } @@ -39,13 +39,23 @@ public RemoteDirectoryFactory(Supplier repositoriesService) public Directory newDirectory(String repositoryName, IndexSettings indexSettings, ShardPath path) throws IOException { try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; - BlobPath blobPath = new BlobPath(); - blobPath = blobPath.add(indexSettings.getIndex().getName()).add(String.valueOf(path.getShardId().getId())); - BlobContainer blobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(blobPath); - return new RemoteDirectory(blobContainer); + BlobPath commonBlobPath = ((BlobStoreRepository) repository).basePath(); + commonBlobPath = commonBlobPath.add(indexSettings.getIndex().getUUID()) + .add(String.valueOf(path.getShardId().getId())) + .add("segments"); + + RemoteDirectory dataDirectory = createRemoteDirectory(repository, commonBlobPath, "data"); + RemoteDirectory metadataDirectory = createRemoteDirectory(repository, commonBlobPath, "metadata"); + + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } } + private RemoteDirectory createRemoteDirectory(Repository repository, BlobPath commonBlobPath, String extention) { + BlobPath extendedPath = commonBlobPath.add(extention); + BlobContainer dataBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(extendedPath); + return new RemoteDirectory(dataBlobContainer); + } } diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 58598ab2d08f4..9122c950a6ab6 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -105,6 +105,7 @@ import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -122,6 +123,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; +import static org.opensearch.index.store.Store.MetadataSnapshot.loadMetadata; /** * A Store provides plain access to files written by an opensearch index shard. Each shard @@ -334,6 +336,51 @@ public MetadataSnapshot getMetadata(SegmentInfos segmentInfos) throws IOExceptio return new MetadataSnapshot(segmentInfos, directory, logger); } + /** + * Segment Replication method - Fetch a map of StoreFileMetadata for segments, ignoring Segment_N files. + * @param segmentInfos {@link SegmentInfos} from which to compute metadata. + * @return {@link Map} map file name to {@link StoreFileMetadata}. + */ + public Map getSegmentMetadataMap(SegmentInfos segmentInfos) throws IOException { + assert indexSettings.isSegRepEnabled(); + return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; + } + + /** + * Segment Replication method + * Returns a diff between the Maps of StoreFileMetadata that can be used for getting list of files to copy over to a replica for segment replication. The returned diff will hold a list of files that are: + *
    + *
  • identical: they exist in both maps and they can be considered the same ie. they don't need to be recovered
  • + *
  • different: they exist in both maps but their they are not identical
  • + *
  • missing: files that exist in the source but not in the target
  • + *
+ */ + public static RecoveryDiff segmentReplicationDiff(Map source, Map target) { + final List identical = new ArrayList<>(); + final List different = new ArrayList<>(); + final List missing = new ArrayList<>(); + for (StoreFileMetadata value : source.values()) { + if (value.name().startsWith(IndexFileNames.SEGMENTS)) { + continue; + } + if (target.containsKey(value.name()) == false) { + missing.add(value); + } else { + final StoreFileMetadata fileMetadata = target.get(value.name()); + if (fileMetadata.isSame(value)) { + identical.add(value); + } else { + different.add(value); + } + } + } + return new RecoveryDiff( + Collections.unmodifiableList(identical), + Collections.unmodifiableList(different), + Collections.unmodifiableList(missing) + ); + } + /** * Renames all the given files from the key of the map to the * value of the map. All successfully renamed files are removed from the map in-place. @@ -709,31 +756,34 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr } /** - * This method deletes every file in this store that is not contained in either the remote or local metadata snapshots. + * Segment Replication method - + * This method deletes every file in this store that is not referenced by the passed in SegmentInfos or + * part of the latest on-disk commit point. * This method is used for segment replication when the in memory SegmentInfos can be ahead of the on disk segment file. * In this case files from both snapshots must be preserved. Verification has been done that all files are present on disk. * @param reason the reason for this cleanup operation logged for each deleted file - * @param localSnapshot The local snapshot from in memory SegmentInfos. + * @param infos {@link SegmentInfos} Files from this infos will be preserved on disk if present. * @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup. */ - public void cleanupAndPreserveLatestCommitPoint(String reason, MetadataSnapshot localSnapshot) throws IOException { + public void cleanupAndPreserveLatestCommitPoint(String reason, SegmentInfos infos) throws IOException { + assert indexSettings.isSegRepEnabled(); // fetch a snapshot from the latest on disk Segments_N file. This can be behind // the passed in local in memory snapshot, so we want to ensure files it references are not removed. metadataLock.writeLock().lock(); try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - cleanupFiles(reason, localSnapshot, getMetadata(readLastCommittedSegmentsInfo())); + cleanupFiles(reason, getMetadata(readLastCommittedSegmentsInfo()), infos.files(true)); } finally { metadataLock.writeLock().unlock(); } } - private void cleanupFiles(String reason, MetadataSnapshot localSnapshot, @Nullable MetadataSnapshot additionalSnapshot) + private void cleanupFiles(String reason, MetadataSnapshot localSnapshot, @Nullable Collection additionalFiles) throws IOException { assert metadataLock.isWriteLockedByCurrentThread(); for (String existingFile : directory.listAll()) { if (Store.isAutogenerated(existingFile) || localSnapshot.contains(existingFile) - || (additionalSnapshot != null && additionalSnapshot.contains(existingFile))) { + || (additionalFiles != null && additionalFiles.contains(existingFile))) { // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete // checksum) continue; @@ -825,17 +875,9 @@ public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, l userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); latestSegmentInfos.setUserData(userData, true); latestSegmentInfos.commit(directory()); - - // similar to TrimUnsafeCommits, create a commit with an appending IW, this will delete old commits and ensure all files - // associated with the SegmentInfos.commit are fsynced. - final List existingCommits = DirectoryReader.listCommits(directory); - assert existingCommits.isEmpty() == false : "Expected at least one commit but none found"; - final IndexCommit lastIndexCommit = existingCommits.get(existingCommits.size() - 1); - assert latestSegmentInfos.getSegmentsFileName().equals(lastIndexCommit.getSegmentsFileName()); - try (IndexWriter writer = newAppendingIndexWriter(directory, lastIndexCommit)) { - writer.setLiveCommitData(lastIndexCommit.getUserData().entrySet()); - writer.commit(); - } + directory.sync(latestSegmentInfos.files(true)); + directory.syncMetaData(); + cleanupAndPreserveLatestCommitPoint("After commit", latestSegmentInfos); } finally { metadataLock.writeLock().unlock(); } @@ -1033,6 +1075,11 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg } static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory directory, Logger logger) throws IOException { + return loadMetadata(segmentInfos, directory, logger, false); + } + + static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory directory, Logger logger, boolean ignoreSegmentsFile) + throws IOException { long numDocs = Lucene.getNumDocs(segmentInfos); Map commitUserDataBuilder = new HashMap<>(); commitUserDataBuilder.putAll(segmentInfos.getUserData()); @@ -1067,8 +1114,10 @@ static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory director if (maxVersion == null) { maxVersion = org.opensearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion; } - final String segmentsFile = segmentInfos.getSegmentsFileName(); - checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); + if (ignoreSegmentsFile == false) { + final String segmentsFile = segmentInfos.getSegmentsFileName(); + checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); + } return new LoadedMetadata(unmodifiableMap(builder), unmodifiableMap(commitUserDataBuilder), numDocs); } @@ -1148,7 +1197,6 @@ public Map asMap() { * Helper method used to group store files according to segment and commit. * * @see MetadataSnapshot#recoveryDiff(MetadataSnapshot) - * @see MetadataSnapshot#segmentReplicationDiff(MetadataSnapshot) */ private Iterable> getGroupedFilesIterable() { final Map> perSegment = new HashMap<>(); @@ -1241,51 +1289,6 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { return recoveryDiff; } - /** - * Segment Replication method - * Returns a diff between the two snapshots that can be used for getting list of files to copy over to a replica for segment replication. The given snapshot is treated as the - * target and this snapshot as the source. The returned diff will hold a list of files that are: - *
    - *
  • identical: they exist in both snapshots and they can be considered the same ie. they don't need to be recovered
  • - *
  • different: they exist in both snapshots but their they are not identical
  • - *
  • missing: files that exist in the source but not in the target
  • - *
- */ - public RecoveryDiff segmentReplicationDiff(MetadataSnapshot recoveryTargetSnapshot) { - final List identical = new ArrayList<>(); - final List different = new ArrayList<>(); - final List missing = new ArrayList<>(); - final ArrayList identicalFiles = new ArrayList<>(); - for (List segmentFiles : getGroupedFilesIterable()) { - identicalFiles.clear(); - boolean consistent = true; - for (StoreFileMetadata meta : segmentFiles) { - StoreFileMetadata storeFileMetadata = recoveryTargetSnapshot.get(meta.name()); - if (storeFileMetadata == null) { - // Do not consider missing files as inconsistent in SegRep as replicas may lag while primary updates - // documents and generate new files specific to a segment - missing.add(meta); - } else if (storeFileMetadata.isSame(meta) == false) { - consistent = false; - different.add(meta); - } else { - identicalFiles.add(meta); - } - } - if (consistent) { - identical.addAll(identicalFiles); - } else { - different.addAll(identicalFiles); - } - } - RecoveryDiff recoveryDiff = new RecoveryDiff( - Collections.unmodifiableList(identical), - Collections.unmodifiableList(different), - Collections.unmodifiableList(missing) - ); - return recoveryDiff; - } - /** * Returns the number of files in this snapshot */ diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index fdb609ba7bbff..6808803ee0988 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -132,7 +132,6 @@ import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; import org.opensearch.index.shard.ShardId; -import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -266,7 +265,7 @@ public class IndicesService extends AbstractLifecycleComponent private final Set danglingIndicesToWrite = Sets.newConcurrentHashSet(); private final boolean nodeWriteDanglingIndicesInfo; private final ValuesSourceRegistry valuesSourceRegistry; - private final RemoteDirectoryFactory remoteDirectoryFactory; + private final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory; @Override protected void doStart() { @@ -295,7 +294,7 @@ public IndicesService( Map directoryFactories, ValuesSourceRegistry valuesSourceRegistry, Map recoveryStateFactories, - RemoteDirectoryFactory remoteDirectoryFactory + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory ) { this.settings = settings; this.threadPool = threadPool; diff --git a/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java index c0056aab3fb16..40bb4894c7397 100644 --- a/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -559,8 +559,19 @@ static long fallbackRegionSize(JvmInfo jvmInfo) { // https://hg.openjdk.java.net/jdk/jdk/file/e7d0ec2d06e8/src/hotspot/share/gc/g1/heapRegion.cpp#l67 // based on this JDK "bug": // https://bugs.openjdk.java.net/browse/JDK-8241670 - long averageHeapSize = (jvmInfo.getMem().getHeapMax().getBytes() + JvmInfo.jvmInfo().getMem().getHeapMax().getBytes()) / 2; - long regionSize = Long.highestOneBit(averageHeapSize / 2048); + // JDK-17 updates: + // https://github.com/openjdk/jdk17u/blob/master/src/hotspot/share/gc/g1/heapRegionBounds.hpp + // https://github.com/openjdk/jdk17u/blob/master/src/hotspot/share/gc/g1/heapRegion.cpp#L67 + long regionSizeUnrounded = Math.min( + Math.max(JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 2048, ByteSizeUnit.MB.toBytes(1)), + ByteSizeUnit.MB.toBytes(32) + ); + + long regionSize = Long.highestOneBit(regionSizeUnrounded); + if (regionSize != regionSizeUnrounded) { + regionSize <<= 1; /* next power of 2 */ + } + if (regionSize < ByteSizeUnit.MB.toBytes(1)) { regionSize = ByteSizeUnit.MB.toBytes(1); } else if (regionSize > ByteSizeUnit.MB.toBytes(32)) { diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 8884ef2cddd0a..15a9bf9e4c492 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -81,6 +81,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationState; @@ -152,6 +153,7 @@ public IndicesClusterStateService( final ThreadPool threadPool, final PeerRecoveryTargetService recoveryTargetService, final SegmentReplicationTargetService segmentReplicationTargetService, + final SegmentReplicationSourceService segmentReplicationSourceService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, @@ -170,6 +172,7 @@ public IndicesClusterStateService( threadPool, checkpointPublisher, segmentReplicationTargetService, + segmentReplicationSourceService, recoveryTargetService, shardStateAction, nodeMappingRefreshAction, @@ -191,6 +194,7 @@ public IndicesClusterStateService( final ThreadPool threadPool, final SegmentReplicationCheckpointPublisher checkpointPublisher, final SegmentReplicationTargetService segmentReplicationTargetService, + final SegmentReplicationSourceService segmentReplicationSourceService, final PeerRecoveryTargetService recoveryTargetService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, @@ -211,6 +215,7 @@ public IndicesClusterStateService( // if segrep feature flag is not enabled, don't wire the target serivce as an IndexEventListener. if (FeatureFlags.isEnabled(FeatureFlags.REPLICATION_TYPE)) { indexEventListeners.add(segmentReplicationTargetService); + indexEventListeners.add(segmentReplicationSourceService); } this.builtInIndexListener = Collections.unmodifiableList(indexEventListeners); this.indicesService = indicesService; diff --git a/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java b/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java index cb43af3b82e09..f1cc7b8dd1d89 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java +++ b/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java @@ -28,4 +28,6 @@ void writeFileChunk( int totalTranslogOps, ActionListener listener ); + + default void cancel() {} } diff --git a/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java b/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java index 3509615052707..ec3986017afac 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java +++ b/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java @@ -156,7 +156,10 @@ private void innerWriteFileChunk(StoreFileMetadata fileMetadata, long position, + temporaryFileName + "] in " + Arrays.toString(store.directory().listAll()); - store.directory().sync(Collections.singleton(temporaryFileName)); + // With Segment Replication, we will fsync after a full commit has been received. + if (store.indexSettings().isSegRepEnabled() == false) { + store.directory().sync(Collections.singleton(temporaryFileName)); + } IndexOutput remove = removeOpenIndexOutputs(name); assert remove == null || remove == indexOutput; // remove maybe null if we got finished } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 652f3c9a55f53..7acc6b8b54fdd 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -177,51 +177,6 @@ public boolean reset(CancellableThreads newTargetCancellableThreads) throws IOEx return false; } - /** - * cancel the recovery. calling this method will clean temporary files and release the store - * unless this object is in use (in which case it will be cleaned once all ongoing users call - * {@link #decRef()} - *

- * if {@link #cancellableThreads()} was used, the threads will be interrupted. - */ - public void cancel(String reason) { - if (finished.compareAndSet(false, true)) { - try { - logger.debug("recovery canceled (reason: [{}])", reason); - cancellableThreads.cancel(reason); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - } - } - - /** - * fail the recovery and call listener - * - * @param e exception that encapsulating the failure - * @param sendShardFailure indicates whether to notify the cluster-manager of the shard failure - */ - public void fail(RecoveryFailedException e, boolean sendShardFailure) { - super.fail(e, sendShardFailure); - } - - /** mark the current recovery as done */ - public void markAsDone() { - if (finished.compareAndSet(false, true)) { - assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed"; - try { - // this might still throw an exception ie. if the shard is CLOSED due to some other event. - // it's safer to decrement the reference in a try finally here. - indexShard.postRecovery("peer recovery done"); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - listener.onDone(state()); - } - } - @Override protected void closeInternal() { try { @@ -246,8 +201,6 @@ protected String getPrefix() { @Override protected void onDone() { assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed"; - // this might still throw an exception ie. if the shard is CLOSED due to some other event. - // it's safer to decrement the reference in a try finally here. indexShard.postRecovery("peer recovery done"); } diff --git a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java index a73a3b54184da..48c2dfd30f589 100644 --- a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java +++ b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java @@ -10,13 +10,12 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.transport.TransportResponse; import java.io.IOException; -import java.util.Set; +import java.util.Map; /** * Response returned from a {@link SegmentReplicationSource} that includes the file metadata, and SegmentInfos @@ -28,52 +27,41 @@ public class CheckpointInfoResponse extends TransportResponse { private final ReplicationCheckpoint checkpoint; - private final Store.MetadataSnapshot snapshot; + private final Map metadataMap; private final byte[] infosBytes; - // pendingDeleteFiles are segments that have been merged away in the latest in memory SegmentInfos - // but are still referenced by the latest commit point (Segments_N). - private final Set pendingDeleteFiles; public CheckpointInfoResponse( final ReplicationCheckpoint checkpoint, - final Store.MetadataSnapshot snapshot, - final byte[] infosBytes, - final Set additionalFiles + final Map metadataMap, + final byte[] infosBytes ) { this.checkpoint = checkpoint; - this.snapshot = snapshot; + this.metadataMap = metadataMap; this.infosBytes = infosBytes; - this.pendingDeleteFiles = additionalFiles; } public CheckpointInfoResponse(StreamInput in) throws IOException { this.checkpoint = new ReplicationCheckpoint(in); - this.snapshot = new Store.MetadataSnapshot(in); + this.metadataMap = in.readMap(StreamInput::readString, StoreFileMetadata::new); this.infosBytes = in.readByteArray(); - this.pendingDeleteFiles = in.readSet(StoreFileMetadata::new); } @Override public void writeTo(StreamOutput out) throws IOException { checkpoint.writeTo(out); - snapshot.writeTo(out); + out.writeMap(metadataMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); out.writeByteArray(infosBytes); - out.writeCollection(pendingDeleteFiles); } public ReplicationCheckpoint getCheckpoint() { return checkpoint; } - public Store.MetadataSnapshot getSnapshot() { - return snapshot; + public Map getMetadataMap() { + return metadataMap; } public byte[] getInfosBytes() { return infosBytes; } - - public Set getPendingDeleteFiles() { - return pendingDeleteFiles; - } } diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index dfebe5f7cabf2..1a97d334df58f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -37,7 +37,6 @@ * @opensearch.internal */ class OngoingSegmentReplications { - private final RecoverySettings recoverySettings; private final IndicesService indicesService; private final Map copyStateMap; @@ -127,7 +126,7 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener handler.getCopyState().getShard().shardId().equals(shard.shardId()), reason); } + /** + * Cancel all Replication events for the given allocation ID, intended to be called when a primary is shutting down. + * + * @param allocationId {@link String} - Allocation ID. + * @param reason {@link String} - Reason for the cancel + */ + synchronized void cancel(String allocationId, String reason) { + final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); + if (handler != null) { + handler.cancel(reason); + removeCopyState(handler.getCopyState()); + } + } + /** * Cancel any ongoing replications for a given {@link DiscoveryNode} * @@ -168,7 +181,6 @@ synchronized void cancel(IndexShard shard, String reason) { */ void cancelReplication(DiscoveryNode node) { cancelHandlers(handler -> handler.getTargetNode().equals(node), "Node left"); - } /** @@ -243,11 +255,7 @@ private void cancelHandlers(Predicate p .map(SegmentReplicationSourceHandler::getAllocationId) .collect(Collectors.toList()); for (String allocationId : allocationIds) { - final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); - if (handler != null) { - handler.cancel(reason); - removeCopyState(handler.getCopyState()); - } + cancel(allocationId, reason); } } } diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index 08dc0b97b31d5..8107f99723eaf 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -13,11 +13,13 @@ import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.unit.TimeValue; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RetryableTransportClient; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportService; import java.util.List; @@ -78,6 +80,17 @@ public void getSegmentFiles( ) { final Writeable.Reader reader = GetSegmentFilesResponse::new; final ActionListener responseListener = ActionListener.map(listener, r -> r); + // Few of the below assumptions and calculations are added for experimental release of segment replication feature in 2.3 + // version. These will be changed in next release. + + // Storing the size of files to fetch in bytes. + final long sizeOfSegmentFiles = filesToFetch.stream().mapToLong(file -> file.length()).sum(); + + // Maximum size of files to fetch (segment files) in bytes, that can be processed in 1 minute for a m5.xlarge machine. + long baseSegmentFilesSize = 100000000; + + // Formula for calculating time needed to process a replication event's files to fetch process + final long timeToGetSegmentFiles = 1 + (sizeOfSegmentFiles / baseSegmentFilesSize); final GetSegmentFilesRequest request = new GetSegmentFilesRequest( replicationId, targetAllocationId, @@ -85,6 +98,15 @@ public void getSegmentFiles( filesToFetch, checkpoint ); - transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, responseListener, reader); + final TransportRequestOptions options = TransportRequestOptions.builder() + .withTimeout(TimeValue.timeValueMinutes(timeToGetSegmentFiles)) + .build(); + transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, options, responseListener, reader); } + + @Override + public void cancel() { + transportClient.cancel(); + } + } diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java b/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java index 05f1c9d757e5c..b3909a3c0f8df 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java @@ -122,4 +122,9 @@ public void writeFileChunk( reader ); } + + @Override + public void cancel() { + retryableTransportClient.cancel(); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java index 8628a266ea7d0..b2e7487fff4b2 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -9,6 +9,7 @@ package org.opensearch.indices.replication; import org.opensearch.action.ActionListener; +import org.opensearch.common.util.CancellableThreads.ExecutionCancelledException; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; @@ -47,4 +48,9 @@ void getSegmentFiles( Store store, ActionListener listener ); + + /** + * Cancel any ongoing requests, should resolve any ongoing listeners with onFailure with a {@link ExecutionCancelledException}. + */ + default void cancel() {} } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index 2d21653c1924c..b63b84a5c1eab 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -58,6 +58,8 @@ class SegmentReplicationSourceHandler { private final DiscoveryNode targetNode; private final String allocationId; + private final FileChunkWriter writer; + /** * Constructor. * @@ -96,6 +98,7 @@ class SegmentReplicationSourceHandler { ); this.allocationId = allocationId; this.copyState = copyState; + this.writer = writer; } /** @@ -113,6 +116,16 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene final Closeable releaseResources = () -> IOUtils.close(resources); try { timer.start(); + cancellableThreads.setOnCancel((reason, beforeCancelEx) -> { + final RuntimeException e = new CancellableThreads.ExecutionCancelledException( + "replication was canceled reason [" + reason + "]" + ); + if (beforeCancelEx != null) { + e.addSuppressed(beforeCancelEx); + } + IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); + throw e; + }); final Consumer onFailure = e -> { assert Transports.assertNotTransportThread(SegmentReplicationSourceHandler.this + "[onFailure]"); IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); @@ -153,6 +166,7 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene final MultiChunkTransfer transfer = segmentFileTransferHandler .createTransfer(shard.store(), storeFileMetadata, () -> 0, sendFileStep); resources.add(transfer); + cancellableThreads.checkForCancel(); transfer.start(); sendFileStep.whenComplete(r -> { @@ -175,9 +189,10 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene } /** - * Cancels the recovery and interrupts all eligible threads. + * Cancels the replication and interrupts all eligible threads. */ public void cancel(String reason) { + writer.cancel(); cancellableThreads.cancel(reason); } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index 0cee731fde2cb..91b8243440ac5 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -15,6 +15,7 @@ import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.component.AbstractLifecycleComponent; @@ -42,7 +43,25 @@ * * @opensearch.internal */ -public final class SegmentReplicationSourceService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { +public class SegmentReplicationSourceService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { + + // Empty Implementation, only required while Segment Replication is under feature flag. + public static final SegmentReplicationSourceService NO_OP = new SegmentReplicationSourceService() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + // NoOp; + } + + @Override + public void beforeIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { + // NoOp; + } + + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + // NoOp; + } + }; private static final Logger logger = LogManager.getLogger(SegmentReplicationSourceService.class); private final RecoverySettings recoverySettings; @@ -62,6 +81,14 @@ public static class Actions { private final OngoingSegmentReplications ongoingSegmentReplications; + // Used only for empty implementation. + private SegmentReplicationSourceService() { + recoverySettings = null; + ongoingSegmentReplications = null; + transportService = null; + indicesService = null; + } + public SegmentReplicationSourceService( IndicesService indicesService, TransportService transportService, @@ -106,12 +133,7 @@ public void messageReceived(CheckpointInfoRequest request, TransportChannel chan ); final CopyState copyState = ongoingSegmentReplications.prepareForReplication(request, segmentSegmentFileChunkWriter); channel.sendResponse( - new CheckpointInfoResponse( - copyState.getCheckpoint(), - copyState.getMetadataSnapshot(), - copyState.getInfosBytes(), - copyState.getPendingDeleteFiles() - ) + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) ); timer.stop(); logger.trace( @@ -163,10 +185,25 @@ protected void doClose() throws IOException { } + /** + * + * Cancels any replications on this node to a replica shard that is about to be closed. + */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { ongoingSegmentReplications.cancel(indexShard, "shard is closed"); } } + + /** + * Cancels any replications on this node to a replica that has been promoted as primary. + */ + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + if (indexShard != null && oldRouting.primary() == false && newRouting.primary()) { + ongoingSegmentReplications.cancel(indexShard.routingEntry().allocationId().getId(), "Relocating primary shard."); + } + } + } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java index f865ba1332186..2e2e6df007c5c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java @@ -35,7 +35,8 @@ public enum Stage { GET_CHECKPOINT_INFO((byte) 3), FILE_DIFF((byte) 4), GET_FILES((byte) 5), - FINALIZE_REPLICATION((byte) 6); + FINALIZE_REPLICATION((byte) 6), + CANCELLED((byte) 7); private static final Stage[] STAGES = new Stage[Stage.values().length]; @@ -118,6 +119,10 @@ protected void validateAndSetStage(Stage expected, Stage next) { "can't move replication to stage [" + next + "]. current stage: [" + stage + "] (expected [" + expected + "])" ); } + stopTimersAndSetStage(next); + } + + private void stopTimersAndSetStage(Stage next) { // save the timing data for the current step stageTimer.stop(); timingData.add(new Tuple<>(stage.name(), stageTimer.time())); @@ -155,6 +160,14 @@ public void setStage(Stage stage) { overallTimer.stop(); timingData.add(new Tuple<>("OVERALL", overallTimer.time())); break; + case CANCELLED: + if (this.stage == Stage.DONE) { + throw new IllegalStateException("can't move replication to Cancelled state from Done."); + } + stopTimersAndSetStage(Stage.CANCELLED); + overallTimer.stop(); + timingData.add(new Tuple<>("OVERALL", overallTimer.time())); + break; default: throw new IllegalArgumentException("unknown SegmentReplicationState.Stage [" + stage + "]"); } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index a658ffc09d590..26bec2203c599 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -17,11 +17,13 @@ import org.apache.lucene.store.ByteBuffersDataInput; import org.apache.lucene.store.ByteBuffersIndexInput; import org.apache.lucene.store.ChecksumIndexInput; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.common.UUIDs; import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.shard.IndexShard; @@ -36,12 +38,9 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; +import java.util.Collections; +import java.util.Map; /** * Represents the target of a replication event. @@ -55,6 +54,10 @@ public class SegmentReplicationTarget extends ReplicationTarget { private final SegmentReplicationState state; protected final MultiFileWriter multiFileWriter; + public ReplicationCheckpoint getCheckpoint() { + return this.checkpoint; + } + public SegmentReplicationTarget( ReplicationCheckpoint checkpoint, IndexShard indexShard, @@ -103,7 +106,15 @@ public String description() { @Override public void notifyListener(OpenSearchException e, boolean sendShardFailure) { - listener.onFailure(state(), e, sendShardFailure); + // Cancellations still are passed to our SegmentReplicationListner as failures, if we have failed because of cancellation + // update the stage. + final Throwable cancelledException = ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class); + if (cancelledException != null) { + state.setStage(SegmentReplicationState.Stage.CANCELLED); + listener.onFailure(state(), (CancellableThreads.ExecutionCancelledException) cancelledException, sendShardFailure); + } else { + listener.onFailure(state(), e, sendShardFailure); + } } @Override @@ -134,11 +145,20 @@ public void writeFileChunk( * @param listener {@link ActionListener} listener. */ public void startReplication(ActionListener listener) { + cancellableThreads.setOnCancel((reason, beforeCancelEx) -> { + // This method only executes when cancellation is triggered by this node and caught by a call to checkForCancel, + // SegmentReplicationSource does not share CancellableThreads. + final CancellableThreads.ExecutionCancelledException executionCancelledException = + new CancellableThreads.ExecutionCancelledException("replication was canceled reason [" + reason + "]"); + notifyListener(executionCancelledException, false); + throw executionCancelledException; + }); state.setStage(SegmentReplicationState.Stage.REPLICATING); final StepListener checkpointInfoListener = new StepListener<>(); final StepListener getFilesListener = new StepListener<>(); final StepListener finalizeListener = new StepListener<>(); + cancellableThreads.checkForCancel(); logger.trace("[shardId {}] Replica starting replication [id {}]", shardId().getId(), getId()); // Get list of files to copy from this checkpoint. state.setStage(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO); @@ -154,10 +174,9 @@ public void startReplication(ActionListener listener) { private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener getFilesListener) throws IOException { + cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FILE_DIFF); - final Store.MetadataSnapshot snapshot = checkpointInfo.getSnapshot(); - Store.MetadataSnapshot localMetadata = getMetadataSnapshot(); - final Store.RecoveryDiff diff = snapshot.segmentReplicationDiff(localMetadata); + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(checkpointInfo.getMetadataMap(), getMetadataMap()); logger.trace("Replication diff {}", diff); /* * Segments are immutable. So if the replica has any segments with the same name that differ from the one in the incoming @@ -172,28 +191,20 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener filesToFetch = new ArrayList(diff.missing); - - Set storeFiles = new HashSet<>(Arrays.asList(store.directory().listAll())); - final Set pendingDeleteFiles = checkpointInfo.getPendingDeleteFiles() - .stream() - .filter(f -> storeFiles.contains(f.name()) == false) - .collect(Collectors.toSet()); - filesToFetch.addAll(pendingDeleteFiles); - logger.trace("Files to fetch {}", filesToFetch); - - for (StoreFileMetadata file : filesToFetch) { + for (StoreFileMetadata file : diff.missing) { state.getIndex().addFileDetail(file.name(), file.length(), false); } // always send a req even if not fetching files so the primary can clear the copyState for this shard. state.setStage(SegmentReplicationState.Stage.GET_FILES); - source.getSegmentFiles(getId(), checkpointInfo.getCheckpoint(), filesToFetch, store, getFilesListener); + cancellableThreads.checkForCancel(); + source.getSegmentFiles(getId(), checkpointInfo.getCheckpoint(), diff.missing, store, getFilesListener); } private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse, ActionListener listener) { - state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); ActionListener.completeWith(listener, () -> { + cancellableThreads.checkForCancel(); + state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); multiFileWriter.renameAllTempFiles(); final Store store = store(); store.incRef(); @@ -206,7 +217,7 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse, responseCheckpoint.getSegmentsGen() ); indexShard.finalizeReplication(infos, responseCheckpoint.getSeqNo()); - store.cleanupAndPreserveLatestCommitPoint("finalize - clean with in memory infos", store.getMetadata(infos)); + store.cleanupAndPreserveLatestCommitPoint("finalize - clean with in memory infos", infos); } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. // this means we transferred files from the remote that have not be checksummed and they are @@ -255,10 +266,18 @@ private ChecksumIndexInput toIndexInput(byte[] input) { ); } - Store.MetadataSnapshot getMetadataSnapshot() throws IOException { + Map getMetadataMap() throws IOException { if (indexShard.getSegmentInfosSnapshot() == null) { - return Store.MetadataSnapshot.EMPTY; + return Collections.emptyMap(); + } + try (final GatedCloseable snapshot = indexShard.getSegmentInfosSnapshot()) { + return store.getSegmentMetadataMap(snapshot.get()); } - return store.getMetadata(indexShard.getSegmentInfosSnapshot().get()); + } + + @Override + protected void onCancel(String reason) { + cancellableThreads.cancel(reason); + source.cancel(); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index a79ce195ad83b..8fc53ccd3bc08 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -11,10 +11,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -31,7 +35,6 @@ import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportService; -import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; @@ -51,7 +54,7 @@ public class SegmentReplicationTargetService implements IndexEventListener { private final SegmentReplicationSourceFactory sourceFactory; - private final Map latestReceivedCheckpoint = new HashMap<>(); + private final Map latestReceivedCheckpoint = ConcurrentCollections.newConcurrentMap(); // Empty Implementation, only required while Segment Replication is under feature flag. public static final SegmentReplicationTargetService NO_OP = new SegmentReplicationTargetService() { @@ -64,6 +67,11 @@ public void beforeIndexShardClosed(ShardId shardId, IndexShard indexShard, Setti public synchronized void onNewCheckpoint(ReplicationCheckpoint receivedCheckpoint, IndexShard replicaShard) { // noOp; } + + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + // noOp; + } }; // Used only for empty implementation. @@ -74,6 +82,10 @@ private SegmentReplicationTargetService() { sourceFactory = null; } + public ReplicationRef get(long replicationId) { + return onGoingReplications.get(replicationId); + } + /** * The internal actions * @@ -102,6 +114,9 @@ public SegmentReplicationTargetService( ); } + /** + * Cancel any replications on this node for a replica that is about to be closed. + */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { @@ -109,11 +124,22 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh } } + /** + * Cancel any replications on this node for a replica that has just been promoted as the new primary. + */ + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + if (oldRouting != null && oldRouting.primary() == false && newRouting.primary()) { + onGoingReplications.cancelForShard(indexShard.shardId(), "shard has been promoted to primary"); + } + } + /** * Invoked when a new checkpoint is received from a primary shard. * It checks if a new checkpoint should be processed or not and starts replication if needed. - * @param receivedCheckpoint received checkpoint that is checked for processing - * @param replicaShard replica shard on which checkpoint is received + * + * @param receivedCheckpoint received checkpoint that is checked for processing + * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { logger.trace(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); @@ -125,14 +151,23 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe } else { latestReceivedCheckpoint.put(replicaShard.shardId(), receivedCheckpoint); } - if (onGoingReplications.isShardReplicating(replicaShard.shardId())) { - logger.trace( - () -> new ParameterizedMessage( - "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", - replicaShard.getLatestReplicationCheckpoint() - ) - ); - return; + SegmentReplicationTarget ongoingReplicationTarget = onGoingReplications.getOngoingReplicationTarget(replicaShard.shardId()); + if (ongoingReplicationTarget != null) { + if (ongoingReplicationTarget.getCheckpoint().getPrimaryTerm() < receivedCheckpoint.getPrimaryTerm()) { + logger.trace( + "Cancelling ongoing replication from old primary with primary term {}", + ongoingReplicationTarget.getCheckpoint().getPrimaryTerm() + ); + onGoingReplications.cancel(ongoingReplicationTarget.getId(), "Cancelling stuck target after new primary"); + } else { + logger.trace( + () -> new ParameterizedMessage( + "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", + replicaShard.getLatestReplicationCheckpoint() + ) + ); + return; + } } final Thread thread = Thread.currentThread(); if (replicaShard.shouldProcessCheckpoint(receivedCheckpoint)) { @@ -180,12 +215,19 @@ public void onReplicationFailure(SegmentReplicationState state, OpenSearchExcept } } - public void startReplication( + public SegmentReplicationTarget startReplication( final ReplicationCheckpoint checkpoint, final IndexShard indexShard, final SegmentReplicationListener listener ) { - startReplication(new SegmentReplicationTarget(checkpoint, indexShard, sourceFactory.get(indexShard), listener)); + final SegmentReplicationTarget target = new SegmentReplicationTarget( + checkpoint, + indexShard, + sourceFactory.get(indexShard), + listener + ); + startReplication(target); + return target; } // pkg-private for integration tests @@ -248,7 +290,17 @@ public void onResponse(Void o) { @Override public void onFailure(Exception e) { - onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof CancellableThreads.ExecutionCancelledException) { + if (onGoingReplications.getTarget(replicationId) != null) { + // if the target still exists in our collection, the primary initiated the cancellation, fail the replication + // but do not fail the shard. Cancellations initiated by this node from Index events will be removed with + // onGoingReplications.cancel and not appear in the collection when this listener resolves. + onGoingReplications.fail(replicationId, (CancellableThreads.ExecutionCancelledException) cause, false); + } + } else { + onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + } } }); } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java index c0e0b4dee2b3f..1dd0886fd2f36 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java @@ -15,14 +15,12 @@ import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.io.IOException; import java.io.UncheckedIOException; -import java.util.HashSet; -import java.util.Set; +import java.util.Map; /** * An Opensearch-specific version of Lucene's CopyState class that @@ -37,8 +35,7 @@ public class CopyState extends AbstractRefCounted { private final ReplicationCheckpoint requestedReplicationCheckpoint; /** Actual ReplicationCheckpoint returned by the shard */ private final ReplicationCheckpoint replicationCheckpoint; - private final Store.MetadataSnapshot metadataSnapshot; - private final HashSet pendingDeleteFiles; + private final Map metadataMap; private final byte[] infosBytes; private GatedCloseable commitRef; private final IndexShard shard; @@ -49,7 +46,7 @@ public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShar this.shard = shard; this.segmentInfosRef = shard.getSegmentInfosSnapshot(); SegmentInfos segmentInfos = this.segmentInfosRef.get(); - this.metadataSnapshot = shard.store().getMetadata(segmentInfos); + this.metadataMap = shard.store().getSegmentMetadataMap(segmentInfos); this.replicationCheckpoint = new ReplicationCheckpoint( shard.shardId(), shard.getOperationPrimaryTerm(), @@ -57,18 +54,7 @@ public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShar shard.getProcessedLocalCheckpoint(), segmentInfos.getVersion() ); - - // Send files that are merged away in the latest SegmentInfos but not in the latest on disk Segments_N. - // This ensures that the store on replicas is in sync with the store on primaries. this.commitRef = shard.acquireLastIndexCommit(false); - Store.MetadataSnapshot metadata = shard.store().getMetadata(this.commitRef.get()); - final Store.RecoveryDiff diff = metadata.recoveryDiff(this.metadataSnapshot); - this.pendingDeleteFiles = new HashSet<>(diff.missing); - if (this.pendingDeleteFiles.isEmpty()) { - // If there are no additional files we can release the last commit immediately. - this.commitRef.close(); - this.commitRef = null; - } ByteBuffersDataOutput buffer = new ByteBuffersDataOutput(); // resource description and name are not used, but resource description cannot be null @@ -95,18 +81,14 @@ public ReplicationCheckpoint getCheckpoint() { return replicationCheckpoint; } - public Store.MetadataSnapshot getMetadataSnapshot() { - return metadataSnapshot; + public Map getMetadataMap() { + return metadataMap; } public byte[] getInfosBytes() { return infosBytes; } - public Set getPendingDeleteFiles() { - return pendingDeleteFiles; - } - public IndexShard getShard() { return shard; } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java index d648ca6041ff8..20600856c9444 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java @@ -49,6 +49,7 @@ import java.util.Iterator; import java.util.List; import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; /** * This class holds a collection of all on going replication events on the current node (i.e., the node is the target node @@ -236,13 +237,18 @@ public boolean cancelForShard(ShardId shardId, String reason) { } /** - * check if a shard is currently replicating + * Get target for shard * - * @param shardId shardId for which to check if replicating - * @return true if shard is currently replicating + * @param shardId shardId + * @return ReplicationTarget for input shardId */ - public boolean isShardReplicating(ShardId shardId) { - return onGoingTargetEvents.values().stream().anyMatch(t -> t.indexShard.shardId().equals(shardId)); + public T getOngoingReplicationTarget(ShardId shardId) { + final List replicationTargetList = onGoingTargetEvents.values() + .stream() + .filter(t -> t.indexShard.shardId().equals(shardId)) + .collect(Collectors.toList()); + assert replicationTargetList.size() <= 1 : "More than one on-going replication targets"; + return replicationTargetList.size() > 0 ? replicationTargetList.get(0) : null; } /** diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 501ff46eeb2ff..42f4572fef3e4 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -155,7 +155,7 @@ public void markAsDone() { public void cancel(String reason) { if (finished.compareAndSet(false, true)) { try { - logger.debug("replication cancelled (reason: [{}])", reason); + logger.debug("replication/recovery cancelled (reason: [{}])", reason); onCancel(reason); } finally { // release the initial reference. replication files will be cleaned as soon as ref count goes to zero, potentially now diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index d3f0912cab638..92e9815313fa0 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -39,12 +39,12 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.index.IndexingPressureService; -import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.Assertions; import org.opensearch.Build; @@ -629,7 +629,9 @@ protected Node( rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); - final RemoteDirectoryFactory remoteDirectoryFactory = new RemoteDirectoryFactory(repositoriesServiceReference::get); + final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( + repositoriesServiceReference::get + ); final IndicesService indicesService = new IndicesService( settings, @@ -967,6 +969,7 @@ protected Node( .toInstance(new SegmentReplicationSourceService(indicesService, transportService, recoverySettings)); } else { b.bind(SegmentReplicationTargetService.class).toInstance(SegmentReplicationTargetService.NO_OP); + b.bind(SegmentReplicationSourceService.class).toInstance(SegmentReplicationSourceService.NO_OP); } } b.bind(HttpServerTransport.class).toInstance(httpServerTransport); @@ -1110,6 +1113,9 @@ public Node start() throws NodeValidationException { assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; injector.getInstance(PeerRecoverySourceService.class).start(); + if (FeatureFlags.isEnabled(REPLICATION_TYPE)) { + injector.getInstance(SegmentReplicationSourceService.class).start(); + } // Load (and maybe upgrade) the metadata stored on disk final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); @@ -1285,6 +1291,9 @@ public synchronized void close() throws IOException { // close filter/fielddata caches after indices toClose.add(injector.getInstance(IndicesStore.class)); toClose.add(injector.getInstance(PeerRecoverySourceService.class)); + if (FeatureFlags.isEnabled(REPLICATION_TYPE)) { + toClose.add(injector.getInstance(SegmentReplicationSourceService.class)); + } toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(injector.getInstance(ClusterService.class)); toClose.add(() -> stopWatch.stop().start("node_connections_service")); diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index a743360e1e90c..af7d4fc2e9fe5 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -55,6 +55,9 @@ import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.PipelineAggregationBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationParsingFunction; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.opensearch.search.aggregations.bucket.terms.SignificantTerms; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.opensearch.search.aggregations.pipeline.MovAvgModel; @@ -172,6 +175,15 @@ default List> getAggregationExtentions() return emptyList(); } + /** + * Allows plugins to register new Aggregation in the {@link CompositeAggregation}. + * + * @return A {@link List} of {@link CompositeAggregationSpec} + */ + default List getCompositeAggregations() { + return emptyList(); + } + /** * The new {@link PipelineAggregator}s added by this plugin. */ @@ -532,6 +544,76 @@ public AggregationSpec setAggregatorRegistrar(Consumer aggregatorRegistrar; + private final Class valueSourceBuilderClass; + @Deprecated + /** This is added for backward compatibility, you don't need to set it, as we use aggregationType instead of + * byte code + */ + private Byte byteCode; + private final CompositeAggregationParsingFunction parsingFunction; + private final String aggregationType; + private final Writeable.Reader> reader; + + /** + * Specification for registering an aggregation in Composite Aggregation + * + * @param aggregatorRegistrar function to register the + * {@link org.opensearch.search.aggregations.support.ValuesSource} to aggregator mappings for Composite + * aggregation + * @param valueSourceBuilderClass ValueSourceBuilder class name which is building the aggregation + * @param byteCode byte code which is used in serialisation and de-serialisation to indentify which + * aggregation builder to use + * @param reader Typically, a reference to a constructor that takes a {@link StreamInput}, which is + * registered with the aggregation + * @param parsingFunction a reference function which will be used to parse the Aggregation input. + * @param aggregationType a {@link String} defined in the AggregationBuilder as type. + */ + public CompositeAggregationSpec( + final Consumer aggregatorRegistrar, + final Class> valueSourceBuilderClass, + final Byte byteCode, + final Writeable.Reader> reader, + final CompositeAggregationParsingFunction parsingFunction, + final String aggregationType + ) { + this.aggregatorRegistrar = aggregatorRegistrar; + this.valueSourceBuilderClass = valueSourceBuilderClass; + this.byteCode = byteCode; + this.parsingFunction = parsingFunction; + this.aggregationType = aggregationType; + this.reader = reader; + } + + public Consumer getAggregatorRegistrar() { + return aggregatorRegistrar; + } + + public Class getValueSourceBuilderClass() { + return valueSourceBuilderClass; + } + + public Byte getByteCode() { + return byteCode; + } + + public CompositeAggregationParsingFunction getParsingFunction() { + return parsingFunction; + } + + public String getAggregationType() { + return aggregationType; + } + + public Writeable.Reader> getReader() { + return reader; + } + } + /** * Specification for a {@link PipelineAggregator}. */ diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java new file mode 100644 index 0000000000000..f48b3a38f77e1 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.PUT; + +/** + * Registers decommission action + * + * @opensearch.api + */ +public class RestDecommissionAction extends BaseRestHandler { + + private static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueSeconds(300L); + + @Override + public List routes() { + return singletonList(new Route(PUT, "/_cluster/decommission/awareness/{awareness_attribute_name}/{awareness_attribute_value}")); + } + + @Override + public String getName() { + return "decommission_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DecommissionRequest decommissionRequest = createRequest(request); + return channel -> client.admin().cluster().decommission(decommissionRequest, new RestToXContentListener<>(channel)); + } + + DecommissionRequest createRequest(RestRequest request) throws IOException { + String attributeName = null; + String attributeValue = null; + DecommissionRequest decommissionRequest = Requests.decommissionRequest(); + if (request.hasParam("awareness_attribute_name")) { + attributeName = request.param("awareness_attribute_name"); + } + + if (request.hasParam("awareness_attribute_value")) { + attributeValue = request.param("awareness_attribute_value"); + } + return decommissionRequest.setDecommissionAttribute(new DecommissionAttribute(attributeName, attributeValue)) + .setTimeout(TimeValue.parseTimeValue(request.param("timeout"), DEFAULT_TIMEOUT, getClass().getSimpleName() + ".timeout")); + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionAction.java new file mode 100644 index 0000000000000..bad6ddf7b6b4c --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionAction.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +public class RestDeleteDecommissionAction extends BaseRestHandler { + + @Override + public List routes() { + return singletonList(new Route(DELETE, "/_cluster/decommission/awareness")); + } + + @Override + public String getName() { + return "delete_decommission_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DeleteDecommissionRequest deleteDecommissionRequest = Requests.deleteDecommissionRequest(); + return channel -> client.admin().cluster().deleteDecommission(deleteDecommissionRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java new file mode 100644 index 0000000000000..2caca50501914 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Returns decommissioned attribute information + * + * @opensearch.api + */ +public class RestGetDecommissionStateAction extends BaseRestHandler { + + @Override + public List routes() { + return singletonList(new Route(GET, "/_cluster/decommission/awareness/_status")); + } + + @Override + public String getName() { + return "get_decommission_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + GetDecommissionStateRequest getDecommissionStateRequest = Requests.getDecommissionStateRequest(); + getDecommissionStateRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", getDecommissionStateRequest.clusterManagerNodeTimeout()) + ); + return channel -> client.admin().cluster().getDecommission(getDecommissionStateRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index a8cdff5775478..f04d0ab712b39 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -597,6 +597,24 @@ protected Table getTableWithHeader(final RestRequest request) { ); table.addCell("pri.search.scroll_total", "default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "sibling:pri;alias:scc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell("pri.search.point_in_time_current", "default:false;text-align:right;desc:open point in time contexts"); + + table.addCell( + "search.point_in_time_time", + "sibling:pri;alias:scti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell("pri.search.point_in_time_time", "default:false;text-align:right;desc:time point in time contexts held open"); + + table.addCell( + "search.point_in_time_total", + "sibling:pri;alias:scto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); + table.addCell("pri.search.point_in_time_total", "default:false;text-align:right;desc:completed point in time contexts"); + table.addCell("segments.count", "sibling:pri;alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("pri.segments.count", "default:false;text-align:right;desc:number of segments"); @@ -878,6 +896,15 @@ Table buildTable( table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollCount()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollCount()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitCurrent()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitCurrent()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitTime()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitTime()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitCount()); + table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getCount()); table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index 8d3081bec48e9..6346e5d23cd34 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -310,6 +310,19 @@ protected Table getTableWithHeader(final RestRequest request) { ); table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "alias:scc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell( + "search.point_in_time_time", + "alias:scti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell( + "search.point_in_time_total", + "alias:scto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); + table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); table.addCell( @@ -519,6 +532,9 @@ Table buildTable( table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitCount()); SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments(); table.addCell(segmentsStats == null ? null : segmentsStats.getCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index 6bf24951fe6c9..5cb5a7876669e 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -225,6 +225,18 @@ protected Table getTableWithHeader(final RestRequest request) { "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open" ); table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "alias:spc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell( + "search.point_in_time_time", + "alias:spti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell( + "search.point_in_time_total", + "alias:spto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); @@ -390,6 +402,9 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCurrent())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitTime())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCount())); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)); diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java new file mode 100644 index 0000000000000..9439670880015 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Rest action for creating PIT context + */ +public class RestCreatePitAction extends BaseRestHandler { + public static String ALLOW_PARTIAL_PIT_CREATION = "allow_partial_pit_creation"; + public static String KEEP_ALIVE = "keep_alive"; + + @Override + public String getName() { + return "create_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + boolean allowPartialPitCreation = request.paramAsBoolean(ALLOW_PARTIAL_PIT_CREATION, true); + String[] indices = Strings.splitStringByCommaToArray(request.param("index")); + TimeValue keepAlive = request.paramAsTime(KEEP_ALIVE, null); + CreatePitRequest createPitRequest = new CreatePitRequest(keepAlive, allowPartialPitCreation, indices); + createPitRequest.setIndicesOptions(IndicesOptions.fromRequest(request, createPitRequest.indicesOptions())); + createPitRequest.setPreference(request.param("preference")); + createPitRequest.setRouting(request.param("routing")); + + return channel -> client.createPit(createPitRequest, new RestStatusToXContentListener<>(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(POST, "/{index}/_search/point_in_time"))); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java new file mode 100644 index 0000000000000..452e66f8f5018 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action for deleting PIT contexts + */ +public class RestDeletePitAction extends BaseRestHandler { + + @Override + public String getName() { + return "delete_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String allPitIdsQualifier = "_all"; + final DeletePitRequest deletePITRequest; + if (request.path().contains(allPitIdsQualifier)) { + deletePITRequest = new DeletePitRequest(asList(allPitIdsQualifier)); + } else { + deletePITRequest = new DeletePitRequest(); + request.withContentOrSourceParamParserOrNull((xContentParser -> { + if (xContentParser != null) { + try { + deletePITRequest.fromXContent(xContentParser); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse request body", e); + } + } + })); + } + return channel -> client.deletePits(deletePITRequest, new RestStatusToXContentListener(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(DELETE, "/_search/point_in_time"), new Route(DELETE, "/_search/point_in_time/_all"))); + } +} diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index 7e7e4f83334f5..84c46e400543a 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -47,7 +47,7 @@ import org.opensearch.common.time.DateUtils; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.mapper.DateFieldMapper; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import java.io.IOException; import java.math.BigInteger; diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 80e025a3651a8..0149f9a025bcd 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -126,10 +126,6 @@ import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.opensearch.search.aggregations.bucket.filter.InternalFilter; import org.opensearch.search.aggregations.bucket.filter.InternalFilters; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.search.aggregations.bucket.geogrid.InternalGeoTileGrid; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.global.InternalGlobal; import org.opensearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -628,22 +624,6 @@ private ValuesSourceRegistry registerAggregations(List plugins) { ).addResultReader(InternalGeoDistance::new).setAggregatorRegistrar(GeoDistanceAggregationBuilder::registerAggregators), builder ); - registerAggregation( - new AggregationSpec( - GeoHashGridAggregationBuilder.NAME, - GeoHashGridAggregationBuilder::new, - GeoHashGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators), - builder - ); - registerAggregation( - new AggregationSpec( - GeoTileGridAggregationBuilder.NAME, - GeoTileGridAggregationBuilder::new, - GeoTileGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators), - builder - ); registerAggregation( new AggregationSpec(NestedAggregationBuilder.NAME, NestedAggregationBuilder::new, NestedAggregationBuilder::parse) .addResultReader(InternalNested::new), @@ -681,7 +661,7 @@ private ValuesSourceRegistry registerAggregations(List plugins) { registerAggregation( new AggregationSpec(CompositeAggregationBuilder.NAME, CompositeAggregationBuilder::new, CompositeAggregationBuilder.PARSER) .addResultReader(InternalComposite::new) - .setAggregatorRegistrar(CompositeAggregationBuilder::registerAggregators), + .setAggregatorRegistrar(reg -> CompositeAggregationBuilder.registerAggregators(reg, plugins)), builder ); registerAggregation( diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 4bd95da193668..04fab85c163a9 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -881,6 +881,7 @@ public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionL shard.awaitShardSearchActive(ignored -> { Engine.SearcherSupplier searcherSupplier = null; ReaderContext readerContext = null; + Releasable decreasePitContexts = openPitContexts::decrementAndGet; try { if (openPitContexts.incrementAndGet() > maxOpenPitContext) { throw new OpenSearchRejectedExecutionException( @@ -902,15 +903,16 @@ public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionL searchOperationListener.onNewPitContext(finalReaderContext); readerContext.addOnClose(() -> { - openPitContexts.decrementAndGet(); searchOperationListener.onFreeReaderContext(finalReaderContext); searchOperationListener.onFreePitContext(finalReaderContext); }); + readerContext.addOnClose(decreasePitContexts); // add the newly created pit reader context to active readers putReaderContext(readerContext); readerContext = null; listener.onResponse(finalReaderContext.id()); } catch (Exception exc) { + Releasables.closeWhileHandlingException(decreasePitContexts); Releasables.closeWhileHandlingException(searcherSupplier, readerContext); listener.onFailure(exc); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java index 382455093309d..9886e423bbc76 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java @@ -43,10 +43,6 @@ import org.opensearch.search.aggregations.bucket.filter.Filters; import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.opensearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.search.aggregations.bucket.geogrid.InternalGeoTileGrid; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -261,20 +257,6 @@ public static HistogramAggregationBuilder histogram(String name) { return new HistogramAggregationBuilder(name); } - /** - * Create a new {@link InternalGeoHashGrid} aggregation with the given name. - */ - public static GeoHashGridAggregationBuilder geohashGrid(String name) { - return new GeoHashGridAggregationBuilder(name); - } - - /** - * Create a new {@link InternalGeoTileGrid} aggregation with the given name. - */ - public static GeoTileGridAggregationBuilder geotileGrid(String name) { - return new GeoTileGridAggregationBuilder(name); - } - /** * Create a new {@link SignificantTerms} aggregation with the given name. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtils.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtils.java rename to server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java index 5498b2b1a7109..6cd1823622f01 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtils.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.search.aggregations.bucket; import org.apache.lucene.geo.GeoEncodingUtils; import org.apache.lucene.util.SloppyMath; @@ -104,7 +104,7 @@ private GeoTileUtils() {} * @param parser {@link XContentParser} to parse the value from * @return int representing precision */ - static int parsePrecision(XContentParser parser) throws IOException, OpenSearchParseException { + public static int parsePrecision(XContentParser parser) throws IOException, OpenSearchParseException { final Object node = parser.currentToken().equals(XContentParser.Token.VALUE_NUMBER) ? Integer.valueOf(parser.intValue()) : parser.text(); @@ -252,7 +252,7 @@ public static String stringEncode(long hash) { /** * Decode long hash as a GeoPoint (center of the tile) */ - static GeoPoint hashToGeoPoint(long hash) { + public static GeoPoint hashToGeoPoint(long hash) { int[] res = parseHash(hash); return zxyToGeoPoint(res[0], res[1], res[2]); } @@ -260,7 +260,7 @@ static GeoPoint hashToGeoPoint(long hash) { /** * Decode a string bucket key in "zoom/x/y" format to a GeoPoint (center of the tile) */ - static GeoPoint keyToGeoPoint(String hashAsString) { + public static GeoPoint keyToGeoPoint(String hashAsString) { int[] hashAsInts = parseHash(hashAsString); return zxyToGeoPoint(hashAsInts[0], hashAsInts[1], hashAsInts[2]); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 8b07df3f689bf..093c2ad42722e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -35,9 +35,11 @@ import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.xcontent.ConstructingObjectParser; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.AbstractAggregationBuilder; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; @@ -47,11 +49,14 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Function; import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -82,14 +87,55 @@ public class CompositeAggregationBuilder extends AbstractAggregationBuilder p.map(), AFTER_FIELD_NAME); } - public static void registerAggregators(ValuesSourceRegistry.Builder builder) { + static final Map, Byte> BUILDER_CLASS_TO_BYTE_CODE = new HashMap<>(); + static final Map BUILDER_TYPE_TO_PARSER = new HashMap<>(); + static final Map>> BYTE_CODE_TO_COMPOSITE_VALUE_SOURCE_READER = + new HashMap<>(); + static final Map< + String, + Writeable.Reader>> AGGREGATION_TYPE_TO_COMPOSITE_VALUE_SOURCE_READER = new HashMap<>(); + static final Map, String> BUILDER_CLASS_TO_AGGREGATION_TYPE = new HashMap<>(); + + public static void registerAggregators(ValuesSourceRegistry.Builder builder, final List plugins) { DateHistogramValuesSourceBuilder.register(builder); HistogramValuesSourceBuilder.register(builder); - GeoTileGridValuesSourceBuilder.register(builder); TermsValuesSourceBuilder.register(builder); + // Register All other aggregations that wants to be part of Composite Aggregation which are provided in + // Plugins along with their parsers and serialisation codes + registerCompositeAggregatorsPlugins(plugins, SearchPlugin::getCompositeAggregations, (compositeAggregationSpec) -> { + compositeAggregationSpec.getAggregatorRegistrar().accept(builder); + BUILDER_TYPE_TO_PARSER.put(compositeAggregationSpec.getAggregationType(), compositeAggregationSpec.getParsingFunction()); + // This is added for backward compatibility, so that we can move away from byte code in the serialisation + if (compositeAggregationSpec.getByteCode() != null) { + BYTE_CODE_TO_COMPOSITE_VALUE_SOURCE_READER.put( + (int) compositeAggregationSpec.getByteCode(), + compositeAggregationSpec.getReader() + ); + BUILDER_CLASS_TO_BYTE_CODE.put( + compositeAggregationSpec.getValueSourceBuilderClass(), + compositeAggregationSpec.getByteCode() + ); + } + AGGREGATION_TYPE_TO_COMPOSITE_VALUE_SOURCE_READER.put( + compositeAggregationSpec.getAggregationType(), + compositeAggregationSpec.getReader() + ); + BUILDER_CLASS_TO_AGGREGATION_TYPE.put( + compositeAggregationSpec.getValueSourceBuilderClass(), + compositeAggregationSpec.getAggregationType() + ); + }); builder.registerUsage(NAME); } + private static void registerCompositeAggregatorsPlugins( + final List plugins, + final Function> producer, + final Consumer consumer + ) { + plugins.forEach(searchPlugin -> producer.apply(searchPlugin).forEach(consumer)); + } + private List> sources; private Map after; private int size = 10; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationParsingFunction.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationParsingFunction.java new file mode 100644 index 0000000000000..344563ad20309 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationParsingFunction.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.composite; + +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; + +/** + * A functional interface which encapsulates the parsing function to be called for the aggregation which is + * also registered as CompositeAggregation. + */ +@FunctionalInterface +public interface CompositeAggregationParsingFunction { + CompositeValuesSourceBuilder parse(final String name, final XContentParser parser) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 7764d367a0cec..26015ae04cf76 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -69,11 +69,11 @@ public abstract class CompositeValuesSourceBuilder createValuesSource( * @param missingBucket If true an explicit null bucket will represent documents with missing values. * @param hasScript true if the source contains a script that can change the value. */ - CompositeValuesSourceConfig( + public CompositeValuesSourceConfig( String name, @Nullable MappedFieldType fieldType, ValuesSource vs, @@ -113,21 +113,21 @@ SingleDimensionValuesSource createValuesSource( /** * Returns the name associated with this configuration. */ - String name() { + protected String name() { return name; } /** * Returns the {@link MappedFieldType} for this config. */ - MappedFieldType fieldType() { + public MappedFieldType fieldType() { return fieldType; } /** * Returns the {@link ValuesSource} for this configuration. */ - ValuesSource valuesSource() { + public ValuesSource valuesSource() { return vs; } @@ -135,35 +135,35 @@ ValuesSource valuesSource() { * The {@link DocValueFormat} to use for formatting the keys. * {@link DocValueFormat#RAW} means no formatting. */ - DocValueFormat format() { + public DocValueFormat format() { return format; } /** * If true, an explicit `null bucket represents documents with missing values. */ - boolean missingBucket() { + public boolean missingBucket() { return missingBucket; } /** * Return the {@link MissingOrder} for the config. */ - MissingOrder missingOrder() { + public MissingOrder missingOrder() { return missingOrder; } /** * Returns true if the source contains a script that can change the value. */ - boolean hasScript() { + protected boolean hasScript() { return hasScript; } /** * The sort order for the values source (e.g. -1 for descending and 1 for ascending). */ - int reverseMul() { + public int reverseMul() { assert reverseMul == -1 || reverseMul == 1; return reverseMul; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java index 60d7f277f7650..d8526e684f391 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java @@ -49,6 +49,11 @@ import java.io.IOException; import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder.AGGREGATION_TYPE_TO_COMPOSITE_VALUE_SOURCE_READER; +import static org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder.BUILDER_CLASS_TO_AGGREGATION_TYPE; +import static org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder.BUILDER_CLASS_TO_BYTE_CODE; +import static org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder.BUILDER_TYPE_TO_PARSER; +import static org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder.BYTE_CODE_TO_COMPOSITE_VALUE_SOURCE_READER; /** * Helper class for obtaining values source parsers for different aggs @@ -57,7 +62,11 @@ */ public class CompositeValuesSourceParserHelper { - static , T> void declareValuesSourceFields(AbstractObjectParser objectParser) { + private static final int AGGREGATION_TYPE_REFERENCE = Byte.MAX_VALUE; + + public static , T> void declareValuesSourceFields( + AbstractObjectParser objectParser + ) { objectParser.declareField(VB::field, XContentParser::text, new ParseField("field"), ObjectParser.ValueType.STRING); objectParser.declareBoolean(VB::missingBucket, new ParseField("missing_bucket")); objectParser.declareString(VB::missingOrder, new ParseField(MissingOrder.NAME)); @@ -78,28 +87,45 @@ static , T> void declareValuesSource } public static void writeTo(CompositeValuesSourceBuilder builder, StreamOutput out) throws IOException { - final byte code; + int code = Byte.MIN_VALUE; + String aggregationType = null; if (builder.getClass() == TermsValuesSourceBuilder.class) { code = 0; } else if (builder.getClass() == DateHistogramValuesSourceBuilder.class) { code = 1; } else if (builder.getClass() == HistogramValuesSourceBuilder.class) { code = 2; - } else if (builder.getClass() == GeoTileGridValuesSourceBuilder.class) { - if (out.getVersion().before(LegacyESVersion.V_7_5_0)) { - throw new IOException( - "Attempting to serialize [" - + builder.getClass().getSimpleName() - + "] to a node with unsupported version [" - + out.getVersion() - + "]" - ); - } - code = 3; } else { - throw new IOException("invalid builder type: " + builder.getClass().getSimpleName()); + if (!BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass()) + && !BUILDER_CLASS_TO_AGGREGATION_TYPE.containsKey(builder.getClass())) { + throw new IOException("invalid builder type: " + builder.getClass().getSimpleName()); + } + aggregationType = BUILDER_CLASS_TO_AGGREGATION_TYPE.get(builder.getClass()); + if (BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass())) { + code = BUILDER_CLASS_TO_BYTE_CODE.get(builder.getClass()); + if (code == 3 && out.getVersion().before(LegacyESVersion.V_7_5_0)) { + throw new IOException( + "Attempting to serialize [" + + builder.getClass().getSimpleName() + + "] to a node with unsupported version [" + + out.getVersion() + + "]" + ); + } + } + } + + if (code != Byte.MIN_VALUE) { + out.writeByte((byte) code); + } else if (!BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass())) { + /* + * This is added for backward compatibility when 1 data node is using the new code which is using the + * aggregation type and another is using the only byte code in the serialisation. + */ + out.writeByte((byte) AGGREGATION_TYPE_REFERENCE); + assert aggregationType != null; + out.writeString(aggregationType); } - out.writeByte(code); builder.writeTo(out); } @@ -112,10 +138,17 @@ public static CompositeValuesSourceBuilder readFrom(StreamInput in) throws IO return new DateHistogramValuesSourceBuilder(in); case 2: return new HistogramValuesSourceBuilder(in); - case 3: - return new GeoTileGridValuesSourceBuilder(in); + case AGGREGATION_TYPE_REFERENCE: + final String aggregationType = in.readString(); + if (!AGGREGATION_TYPE_TO_COMPOSITE_VALUE_SOURCE_READER.containsKey(aggregationType)) { + throw new IOException("Invalid aggregation type " + aggregationType); + } + return (CompositeValuesSourceBuilder) AGGREGATION_TYPE_TO_COMPOSITE_VALUE_SOURCE_READER.get(aggregationType).read(in); default: - throw new IOException("Invalid code " + code); + if (!BYTE_CODE_TO_COMPOSITE_VALUE_SOURCE_READER.containsKey(code)) { + throw new IOException("Invalid code " + code); + } + return (CompositeValuesSourceBuilder) BYTE_CODE_TO_COMPOSITE_VALUE_SOURCE_READER.get(code).read(in); } } @@ -143,11 +176,11 @@ public static CompositeValuesSourceBuilder fromXContent(XContentParser parser case HistogramValuesSourceBuilder.TYPE: builder = HistogramValuesSourceBuilder.parse(name, parser); break; - case GeoTileGridValuesSourceBuilder.TYPE: - builder = GeoTileGridValuesSourceBuilder.parse(name, parser); - break; default: - throw new ParsingException(parser.getTokenLocation(), "invalid source type: " + type); + if (!BUILDER_TYPE_TO_PARSER.containsKey(type)) { + throw new ParsingException(parser.getTokenLocation(), "invalid source type: " + type); + } + builder = BUILDER_TYPE_TO_PARSER.get(type).parse(name, parser); } parser.nextToken(); parser.nextToken(); @@ -163,4 +196,5 @@ public static XContentBuilder toXContent(CompositeValuesSourceBuilder source, builder.endObject(); return builder; } + } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java index a7ed50507288d..ec6410c2a9377 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -66,7 +66,7 @@ * * @opensearch.internal */ -class LongValuesSource extends SingleDimensionValuesSource { +public class LongValuesSource extends SingleDimensionValuesSource { private final BigArrays bigArrays; private final CheckedFunction docValuesFunc; private final LongUnaryOperator rounding; @@ -76,7 +76,7 @@ class LongValuesSource extends SingleDimensionValuesSource { private long currentValue; private boolean missingCurrentValue; - LongValuesSource( + public LongValuesSource( BigArrays bigArrays, MappedFieldType fieldType, CheckedFunction docValuesFunc, @@ -165,7 +165,7 @@ private int compareValues(long v1, long v2) { } @Override - void setAfter(Comparable value) { + protected void setAfter(Comparable value) { if (missingBucket && value == null) { afterValue = null; } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java index 747a7017ec872..fe0801d6d230e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java @@ -53,7 +53,7 @@ * * @opensearch.internal */ -abstract class SingleDimensionValuesSource> implements Releasable { +public abstract class SingleDimensionValuesSource> implements Releasable { protected final BigArrays bigArrays; protected final DocValueFormat format; @Nullable diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java index f36c4620d5b33..b4da1d10b4b68 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java @@ -35,7 +35,6 @@ import org.opensearch.search.aggregations.bucket.composite.InternalComposite; import org.opensearch.search.aggregations.bucket.filter.InternalFilter; import org.opensearch.search.aggregations.bucket.filter.InternalFilters; -import org.opensearch.search.aggregations.bucket.geogrid.InternalGeoGrid; import org.opensearch.search.aggregations.bucket.global.InternalGlobal; import org.opensearch.search.aggregations.bucket.histogram.InternalVariableWidthHistogram; import org.opensearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram; @@ -119,10 +118,6 @@ public static boolean hasValue(InternalFilter agg) { return agg.getDocCount() > 0; } - public static boolean hasValue(InternalGeoGrid agg) { - return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); - } - public static boolean hasValue(InternalGlobal agg) { return agg.getDocCount() > 0; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java index e16e8c91b3fd0..dd2c16f1daa0e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java @@ -43,7 +43,7 @@ * output). A class hierarchy defines the type of values returned by the source. The top level sub-classes define type-specific behavior, * such as {@link org.opensearch.search.aggregations.support.ValuesSource.Numeric#isFloatingPoint()}. Second level subclasses are * then specialized based on where they read values from, e.g. script or field cases. There are also adapter classes like - * {@link org.opensearch.search.aggregations.bucket.geogrid.CellIdSource} which do run-time conversion from one type to another, often + * org.opensearch.search.aggregations.bucket.geogrid.CellIdSource which do run-time conversion from one type to another, often * dependent on a user specified parameter (precision in that case). *

* diff --git a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java index 98e84136a8847..b24a8a4172e29 100644 --- a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java +++ b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java @@ -9,12 +9,17 @@ package org.opensearch.search.internal; import org.apache.lucene.util.SetOnce; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; +import org.opensearch.index.engine.Segment; import org.opensearch.index.shard.IndexShard; +import java.util.Collections; +import java.util.List; + /** * PIT reader context containing PIT specific information such as pit id, create time etc. */ @@ -24,6 +29,15 @@ public class PitReaderContext extends ReaderContext { private final SetOnce pitId = new SetOnce<>(); // Creation time of PIT contexts which helps users to differentiate between multiple PIT reader contexts private final SetOnce creationTime = new SetOnce<>(); + /** + * Shard routing at the time of creation of PIT Reader Context + */ + private final ShardRouting shardRouting; + + /** + * Encapsulates segments constituting the shard at the time of creation of PIT Reader Context. + */ + private final List segments; public PitReaderContext( ShardSearchContextId id, @@ -34,6 +48,8 @@ public PitReaderContext( boolean singleSession ) { super(id, indexService, indexShard, searcherSupplier, keepAliveInMillis, singleSession); + shardRouting = indexShard.routingEntry(); + segments = indexShard.segments(true); } public String getPitId() { @@ -67,4 +83,12 @@ public long getCreationTime() { public void setCreationTime(final long creationTime) { this.creationTime.set(creationTime); } + + public ShardRouting getShardRouting() { + return shardRouting; + } + + public List getSegments() { + return Collections.unmodifiableList(segments); + } } diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 5a93d7c0bd86e..ff2bb77531486 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -49,6 +49,8 @@ import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.coordination.CoordinationStateRejectedException; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; +import org.opensearch.cluster.decommission.DecommissioningFailedException; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.IllegalShardRoutingStateException; import org.opensearch.cluster.routing.ShardRouting; @@ -80,6 +82,7 @@ import org.opensearch.index.seqno.RetentionLeaseNotFoundException; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShardState; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.indices.IndexTemplateMissingException; @@ -858,6 +861,9 @@ public void testIds() { ids.put(159, NodeHealthCheckFailureException.class); ids.put(160, NoSeedNodeLeftException.class); ids.put(161, ReplicationFailedException.class); + ids.put(162, PrimaryShardClosedException.class); + ids.put(163, DecommissioningFailedException.class); + ids.put(164, NodeDecommissionedException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java new file mode 100644 index 0000000000000..32f4fcf99f565 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class GetDecommissionStateResponseTests extends AbstractXContentTestCase { + @Override + protected GetDecommissionStateResponse createTestInstance() { + DecommissionStatus status = DecommissionStatus.fromValue((byte) randomIntBetween(0, 5)); + String attributeName = randomAlphaOfLength(10); + String attributeValue = randomAlphaOfLength(10); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + return new GetDecommissionStateResponse(decommissionAttribute, status); + } + + @Override + protected GetDecommissionStateResponse doParseInstance(XContentParser parser) throws IOException { + return GetDecommissionStateResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java new file mode 100644 index 0000000000000..fe03fc42d4ba4 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DecommissionRequestTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + String attributeName = "zone"; + String attributeValue = "zone-1"; + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + TimeValue timeout = TimeValue.timeValueMillis(between(0, 30000)); + final DecommissionRequest originalRequest = new DecommissionRequest(decommissionAttribute, timeout); + + final DecommissionRequest deserialized = copyWriteable(originalRequest, writableRegistry(), DecommissionRequest::new); + + assertEquals(deserialized.getDecommissionAttribute(), originalRequest.getDecommissionAttribute()); + assertEquals(deserialized.getTimeout(), originalRequest.getTimeout()); + } + + public void testValidation() { + { + String attributeName = null; + String attributeValue = "test"; + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + TimeValue timeout = TimeValue.timeValueMillis(between(0, 30000)); + + final DecommissionRequest request = new DecommissionRequest(decommissionAttribute, timeout); + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertTrue(e.getMessage().contains("attribute name is missing")); + } + { + String attributeName = "zone"; + String attributeValue = ""; + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + TimeValue timeout = TimeValue.timeValueMillis(between(0, 30000)); + + final DecommissionRequest request = new DecommissionRequest(decommissionAttribute, timeout); + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertTrue(e.getMessage().contains("attribute value is missing")); + } + { + String attributeName = "zone"; + String attributeValue = "test"; + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + TimeValue timeout = TimeValue.timeValueMillis(between(0, 30000)); + + final DecommissionRequest request = new DecommissionRequest(decommissionAttribute, timeout); + ActionRequestValidationException e = request.validate(); + assertNull(e); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponseTests.java new file mode 100644 index 0000000000000..5ee5a5f3cf016 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponseTests.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DecommissionResponseTests extends OpenSearchTestCase { + public void testSerialization() throws IOException { + final DecommissionResponse originalRequest = new DecommissionResponse(true); + copyWriteable(originalRequest, writableRegistry(), DecommissionResponse::new); + // there are no fields so we're just checking that this doesn't throw anything + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java new file mode 100644 index 0000000000000..f0d3db71c27b7 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.get; + +import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.is; + +public class GetIndexRequestTests extends OpenSearchTestCase { + public void testGetIndexRequestExtendsClusterInfoRequestOfDeprecatedClassPath() { + GetIndexRequest getIndexRequest = new GetIndexRequest().indices("test"); + assertThat(getIndexRequest instanceof ClusterInfoRequest, is(true)); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index a5c6e1c12b79c..c03c27f7d7e4d 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -14,6 +14,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.StepListener; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -70,6 +71,8 @@ public class CreatePitControllerTests extends OpenSearchTestCase { ClusterService clusterServiceMock = null; private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + Settings settings = Settings.builder().put("node.name", CreatePitControllerTests.class.getSimpleName()).build(); + NodeClient client = new NodeClient(settings, threadPool); @Override public void tearDown() throws Exception { @@ -219,7 +222,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -308,7 +311,7 @@ public void sendFreePITContexts( CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -406,7 +409,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -494,7 +497,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod }; CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, diff --git a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java index 433cd9dfa3e89..60a31c62dc32d 100644 --- a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java +++ b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java @@ -14,6 +14,9 @@ import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; import org.opensearch.client.Client; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.util.concurrent.AtomicArray; @@ -33,6 +36,8 @@ import java.util.Map; import java.util.concurrent.ExecutionException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.opensearch.test.OpenSearchTestCase.between; import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; import static org.opensearch.test.OpenSearchTestCase.randomBoolean; @@ -107,7 +112,7 @@ public static void assertUsingGetAllPits(Client client, String id, long creation GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); ActionFuture execute1 = client.execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); GetAllPitNodesResponse getPitResponse = execute1.get(); - Assert.assertTrue(getPitResponse.getPitInfos().get(0).getPitId().contains(id)); + assertTrue(getPitResponse.getPitInfos().get(0).getPitId().contains(id)); Assert.assertEquals(getPitResponse.getPitInfos().get(0).getCreationTime(), creationTime); } @@ -128,4 +133,20 @@ public static void assertGetAllPitsEmpty(Client client) throws ExecutionExceptio GetAllPitNodesResponse getPitResponse = execute1.get(); Assert.assertEquals(0, getPitResponse.getPitInfos().size()); } + + public static void assertSegments(boolean isEmpty, String index, long expectedShardSize, Client client) { + PitSegmentsRequest pitSegmentsRequest = new PitSegmentsRequest("_all"); + IndicesSegmentResponse indicesSegmentResponse = client.execute(PitSegmentsAction.INSTANCE, pitSegmentsRequest).actionGet(); + assertTrue(indicesSegmentResponse.getShardFailures() == null || indicesSegmentResponse.getShardFailures().length == 0); + assertEquals(indicesSegmentResponse.getIndices().isEmpty(), isEmpty); + if (!isEmpty) { + assertTrue(indicesSegmentResponse.getIndices().get(index) != null); + assertTrue(indicesSegmentResponse.getIndices().get(index).getIndex().equalsIgnoreCase(index)); + assertEquals(expectedShardSize, indicesSegmentResponse.getIndices().get(index).getShards().size()); + } + } + + public static void assertSegments(boolean isEmpty, Client client) { + assertSegments(isEmpty, "index", 2, client); + } } diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index 7a1d9a6fe963c..bdc0440a89f69 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -13,6 +13,7 @@ import org.opensearch.action.support.ActionFilter; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; @@ -62,6 +63,7 @@ public class TransportDeletePitActionTests extends OpenSearchTestCase { ClusterService clusterServiceMock = null; Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); private ThreadPool threadPool = new ThreadPool(settings); + NodeClient client = new NodeClient(settings, threadPool); @Override public void tearDown() throws Exception { @@ -165,7 +167,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -229,7 +231,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -312,7 +314,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -371,7 +373,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -439,7 +441,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -505,7 +507,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -581,7 +583,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -661,7 +663,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); diff --git a/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java b/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java index ec0cefed842cd..66d3b843529ab 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java @@ -38,6 +38,7 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -102,7 +103,7 @@ public void testAllocationIdActionWillBeCancelledOnClose() { pendingReplication.addPendingAction(allocationId, action); action.run(); pendingReplication.close(); - expectThrows(IndexShardClosedException.class, future::actionGet); + expectThrows(PrimaryShardClosedException.class, future::actionGet); } private class TestAction extends RetryableAction { diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java index 4da32a890fd0e..137aca4966936 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.support.replication; +import org.hamcrest.MatcherAssert; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; @@ -57,6 +58,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.translog.Translog; @@ -91,6 +93,7 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.emptyArray; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; @@ -395,6 +398,48 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { } } + public void testPrimaryClosedDoesNotFailShard() { + final CapturingTransport transport = new CapturingTransport(); + final TransportService transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> clusterService.localNode(), + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + final ShardStateAction shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); + final TestAction action = new TestAction( + Settings.EMPTY, + "internal:testAction", + transportService, + clusterService, + shardStateAction, + threadPool + ); + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + final ClusterState state = ClusterStateCreationUtils.stateWithActivePrimary(index, true, 1, 0); + ClusterServiceUtils.setState(clusterService, state); + final long primaryTerm = state.metadata().index(index).primaryTerm(0); + final ShardRouting shardRouting = state.routingTable().shardRoutingTable(shardId).replicaShards().get(0); + + // Assert that failShardIfNeeded is a no-op for the PrimaryShardClosedException failure + final AtomicInteger callbackCount = new AtomicInteger(0); + action.newReplicasProxy() + .failShardIfNeeded( + shardRouting, + primaryTerm, + "test", + new PrimaryShardClosedException(shardId), + ActionListener.wrap(callbackCount::incrementAndGet) + ); + MatcherAssert.assertThat(transport.getCapturedRequestsAndClear(), emptyArray()); + MatcherAssert.assertThat(callbackCount.get(), equalTo(0)); + } + private class TestAction extends TransportWriteAction { private final boolean withDocumentFailureOnPrimary; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index a3c945cdbac3a..50e18f25aad5b 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -33,6 +33,7 @@ import org.apache.logging.log4j.Level; import org.opensearch.Version; +import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterName; @@ -55,6 +56,7 @@ import java.util.Optional; import java.util.concurrent.atomic.AtomicReference; +import static org.mockito.Mockito.mock; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; import static org.opensearch.node.Node.NODE_NAME_SETTING; @@ -90,7 +92,8 @@ public void testJoinDeduplication() { startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - () -> new StatusInfo(HEALTHY, "info") + () -> new StatusInfo(HEALTHY, "info"), + mock(ActionListener.class) ); transportService.start(); @@ -230,7 +233,8 @@ private void assertJoinValidationRejectsMismatchedClusterUUID(String actionName, startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - null + null, + mock(ActionListener.class) ); // registers request handler transportService.start(); transportService.acceptIncomingRequests(); @@ -284,7 +288,8 @@ public void testJoinFailureOnUnhealthyNodes() { startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - () -> nodeHealthServiceStatus.get() + () -> nodeHealthServiceStatus.get(), + mock(ActionListener.class) ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 02e502e762561..a0c979f972a70 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -36,9 +36,14 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskExecutor; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; @@ -48,7 +53,9 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; +import java.util.Collections; import java.util.HashSet; +import java.util.Map; import static org.hamcrest.Matchers.is; import static org.opensearch.test.VersionUtils.allVersions; @@ -216,4 +223,71 @@ public void testIsBecomeClusterManagerTask() { JoinTaskExecutor.Task joinTaskOfClusterManager = JoinTaskExecutor.newBecomeClusterManagerTask(); assertThat(joinTaskOfClusterManager.isBecomeClusterManagerTask(), is(true)); } + + public void testJoinClusterWithNoDecommission() { + Settings.builder().build(); + Metadata.Builder metaBuilder = Metadata.builder(); + Metadata metadata = metaBuilder.build(); + DiscoveryNode discoveryNode = newDiscoveryNode(Collections.singletonMap("zone", "zone-2")); + JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata); + } + + public void testPreventJoinClusterWithDecommission() { + Settings.builder().build(); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-1"); + DecommissionStatus decommissionStatus = randomFrom(DecommissionStatus.IN_PROGRESS, DecommissionStatus.SUCCESSFUL); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + decommissionStatus + ); + Metadata.Builder metaBuilder = Metadata.builder(); + metaBuilder.putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata); + Metadata metadata = metaBuilder.build(); + + DiscoveryNode discoveryNode = newDiscoveryNode(Collections.singletonMap("zone", "zone-1")); + expectThrows(NodeDecommissionedException.class, () -> JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata)); + } + + public void testJoinClusterWithDifferentDecommission() { + Settings.builder().build(); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-1"); + DecommissionStatus decommissionStatus = randomFrom(DecommissionStatus.values()); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + decommissionStatus + ); + Metadata.Builder metaBuilder = Metadata.builder(); + metaBuilder.putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata); + Metadata metadata = metaBuilder.build(); + + DiscoveryNode discoveryNode = newDiscoveryNode(Collections.singletonMap("zone", "zone-2")); + JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata); + } + + public void testJoinClusterWithDecommissionFailedOrInitOrRecommission() { + Settings.builder().build(); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-1"); + DecommissionStatus decommissionStatus = randomFrom(DecommissionStatus.INIT, DecommissionStatus.FAILED); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + decommissionStatus + ); + Metadata.Builder metaBuilder = Metadata.builder(); + metaBuilder.putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata); + Metadata metadata = metaBuilder.build(); + + DiscoveryNode discoveryNode = newDiscoveryNode(Collections.singletonMap("zone", "zone-1")); + JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata); + } + + private DiscoveryNode newDiscoveryNode(Map attributes) { + return new DiscoveryNode( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + buildNewFakeTransportAddress(), + attributes, + Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + Version.CURRENT + ); + } } diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java new file mode 100644 index 0000000000000..4b85fa39a91e1 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -0,0 +1,365 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; +import org.opensearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateObserver; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransport; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static java.util.Collections.emptySet; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.sameInstance; +import static org.opensearch.cluster.ClusterState.builder; +import static org.opensearch.cluster.OpenSearchAllocationTestCase.createAllocationService; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; + +public class DecommissionControllerTests extends OpenSearchTestCase { + + private static ThreadPool threadPool; + private static ClusterService clusterService; + private TransportService transportService; + private AllocationService allocationService; + private DecommissionController decommissionController; + private ClusterSettings clusterSettings; + + @Before + public void setTransportServiceAndDefaultClusterState() { + threadPool = new TestThreadPool("test", Settings.EMPTY); + allocationService = createAllocationService(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).build(); + logger.info("--> adding five nodes on same zone_1"); + clusterState = addNodes(clusterState, "zone_1", "node1", "node2", "node3", "node4", "node5"); + logger.info("--> adding five nodes on same zone_2"); + clusterState = addNodes(clusterState, "zone_2", "node6", "node7", "node8", "node9", "node10"); + logger.info("--> adding five nodes on same zone_3"); + clusterState = addNodes(clusterState, "zone_3", "node11", "node12", "node13", "node14", "node15"); + clusterState = setLocalNodeAsClusterManagerNode(clusterState, "node1"); + clusterState = setThreeNodesInVotingConfig(clusterState); + final ClusterState.Builder builder = builder(clusterState); + clusterService = createClusterService(threadPool, clusterState.nodes().get("node1")); + setState(clusterService, builder); + final MockTransport transport = new MockTransport(); + transportService = transport.createTransportService( + Settings.EMPTY, + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundTransportAddress -> clusterService.state().nodes().get("node1"), + null, + emptySet() + ); + + final Settings.Builder nodeSettingsBuilder = Settings.builder(); + final Settings nodeSettings = nodeSettingsBuilder.build(); + clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + new TransportAddVotingConfigExclusionsAction( + nodeSettings, + clusterSettings, + transportService, + clusterService, + threadPool, + new ActionFilters(emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)) + ); // registers action + + new TransportClearVotingConfigExclusionsAction( + transportService, + clusterService, + threadPool, + new ActionFilters(emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)) + ); // registers action + + transportService.start(); + transportService.acceptIncomingRequests(); + decommissionController = new DecommissionController(clusterService, transportService, allocationService, threadPool); + } + + @After + public void shutdownThreadPoolAndClusterService() { + clusterService.stop(); + threadPool.shutdown(); + } + + public void testAddNodesToVotingConfigExclusion() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(2); + + ClusterStateObserver clusterStateObserver = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); + clusterStateObserver.waitForNextChange(new AdjustConfigurationForExclusions(countDownLatch)); + Set nodesToRemoveFromVotingConfig = Collections.singleton(randomFrom("node1", "node6", "node11")); + decommissionController.excludeDecommissionedNodesFromVotingConfig(nodesToRemoveFromVotingConfig, new ActionListener() { + @Override + public void onResponse(Void unused) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("unexpected failure occurred while removing node from voting config " + e); + } + }); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + clusterService.getClusterApplierService().state().getVotingConfigExclusions().forEach(vce -> { + assertTrue(nodesToRemoveFromVotingConfig.contains(vce.getNodeId())); + assertEquals(nodesToRemoveFromVotingConfig.size(), 1); + }); + } + + public void testClearVotingConfigExclusions() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + decommissionController.clearVotingConfigExclusion(new ActionListener() { + @Override + public void onResponse(Void unused) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("unexpected failure occurred while clearing voting config exclusion" + e); + } + }); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + assertThat(clusterService.getClusterApplierService().state().getVotingConfigExclusions(), empty()); + } + + public void testNodesRemovedForDecommissionRequestSuccessfulResponse() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + Set nodesToBeRemoved = new HashSet<>(); + nodesToBeRemoved.add(clusterService.state().nodes().get("node11")); + nodesToBeRemoved.add(clusterService.state().nodes().get("node12")); + nodesToBeRemoved.add(clusterService.state().nodes().get("node13")); + nodesToBeRemoved.add(clusterService.state().nodes().get("node14")); + nodesToBeRemoved.add(clusterService.state().nodes().get("node15")); + + decommissionController.removeDecommissionedNodes( + nodesToBeRemoved, + "unit-test", + TimeValue.timeValueSeconds(30L), + new ActionListener() { + @Override + public void onResponse(Void unused) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("there shouldn't have been any failure"); + } + } + ); + + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + // test all 5 nodes removed and cluster has 10 nodes + Set nodes = StreamSupport.stream(clusterService.getClusterApplierService().state().nodes().spliterator(), false) + .collect(Collectors.toSet()); + assertEquals(nodes.size(), 10); + // test no nodes part of zone-3 + for (DiscoveryNode node : nodes) { + assertNotEquals(node.getAttributes().get("zone"), "zone-1"); + } + } + + public void testTimesOut() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + Set nodesToBeRemoved = new HashSet<>(); + nodesToBeRemoved.add(clusterService.state().nodes().get("node11")); + nodesToBeRemoved.add(clusterService.state().nodes().get("node12")); + nodesToBeRemoved.add(clusterService.state().nodes().get("node13")); + nodesToBeRemoved.add(clusterService.state().nodes().get("node14")); + nodesToBeRemoved.add(clusterService.state().nodes().get("node15")); + decommissionController.removeDecommissionedNodes( + nodesToBeRemoved, + "unit-test-timeout", + TimeValue.timeValueMillis(2), + new ActionListener() { + @Override + public void onResponse(Void unused) { + fail("response shouldn't have been called"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, instanceOf(OpenSearchTimeoutException.class)); + assertThat(e.getMessage(), containsString("waiting for removal of decommissioned nodes")); + countDownLatch.countDown(); + } + } + ); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + + public void testSuccessfulDecommissionStatusMetadataUpdate() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + DecommissionAttributeMetadata oldMetadata = new DecommissionAttributeMetadata( + new DecommissionAttribute("zone", "zone-1"), + DecommissionStatus.IN_PROGRESS + ); + ClusterState state = clusterService.state(); + Metadata metadata = state.metadata(); + Metadata.Builder mdBuilder = Metadata.builder(metadata); + mdBuilder.putCustom(DecommissionAttributeMetadata.TYPE, oldMetadata); + state = ClusterState.builder(state).metadata(mdBuilder).build(); + setState(clusterService, state); + + decommissionController.updateMetadataWithDecommissionStatus( + DecommissionStatus.SUCCESSFUL, + new ActionListener() { + @Override + public void onResponse(DecommissionStatus status) { + assertEquals(DecommissionStatus.SUCCESSFUL, status); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("decommission status update failed"); + } + } + ); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + ClusterState newState = clusterService.getClusterApplierService().state(); + DecommissionAttributeMetadata decommissionAttributeMetadata = newState.metadata().custom(DecommissionAttributeMetadata.TYPE); + assertEquals(decommissionAttributeMetadata.status(), DecommissionStatus.SUCCESSFUL); + } + + private static class AdjustConfigurationForExclusions implements ClusterStateObserver.Listener { + + final CountDownLatch doneLatch; + + AdjustConfigurationForExclusions(CountDownLatch latch) { + this.doneLatch = latch; + } + + @Override + public void onNewClusterState(ClusterState state) { + clusterService.getClusterManagerService().submitStateUpdateTask("reconfiguration", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + assertThat(currentState, sameInstance(state)); + final Set votingNodeIds = new HashSet<>(); + currentState.nodes().forEach(n -> votingNodeIds.add(n.getId())); + currentState.getVotingConfigExclusions().forEach(t -> votingNodeIds.remove(t.getNodeId())); + final CoordinationMetadata.VotingConfiguration votingConfiguration = new CoordinationMetadata.VotingConfiguration( + votingNodeIds + ); + return builder(currentState).metadata( + Metadata.builder(currentState.metadata()) + .coordinationMetadata( + CoordinationMetadata.builder(currentState.coordinationMetadata()) + .lastAcceptedConfiguration(votingConfiguration) + .lastCommittedConfiguration(votingConfiguration) + .build() + ) + ).build(); + } + + @Override + public void onFailure(String source, Exception e) { + throw new AssertionError("unexpected failure", e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + doneLatch.countDown(); + } + }); + } + + @Override + public void onClusterServiceClose() { + throw new AssertionError("unexpected close"); + } + + @Override + public void onTimeout(TimeValue timeout) { + throw new AssertionError("unexpected timeout"); + } + } + + private ClusterState addNodes(ClusterState clusterState, String zone, String... nodeIds) { + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); + org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newNode(nodeId, singletonMap("zone", zone)))); + clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + return clusterState; + } + + private ClusterState setLocalNodeAsClusterManagerNode(ClusterState clusterState, String nodeId) { + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); + nodeBuilder.localNodeId(nodeId); + nodeBuilder.clusterManagerNodeId(nodeId); + clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + return clusterState; + } + + private ClusterState setThreeNodesInVotingConfig(ClusterState clusterState) { + final CoordinationMetadata.VotingConfiguration votingConfiguration = CoordinationMetadata.VotingConfiguration.of( + clusterState.nodes().get("node1"), + clusterState.nodes().get("node6"), + clusterState.nodes().get("node11") + ); + + Metadata.Builder builder = Metadata.builder() + .coordinationMetadata( + CoordinationMetadata.builder() + .lastAcceptedConfiguration(votingConfiguration) + .lastCommittedConfiguration(votingConfiguration) + .build() + ); + clusterState = ClusterState.builder(clusterState).metadata(builder).build(); + return clusterState; + } + + private static DiscoveryNode newNode(String nodeId, Map attributes) { + return new DiscoveryNode(nodeId, nodeId, buildNewFakeTransportAddress(), attributes, CLUSTER_MANAGER_DATA_ROLE, Version.CURRENT); + } + + final private static Set CLUSTER_MANAGER_DATA_ROLE = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE)) + ); +} diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java new file mode 100644 index 0000000000000..197bf57cdaa67 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -0,0 +1,245 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransport; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.emptySet; +import static java.util.Collections.singletonMap; +import static org.mockito.Mockito.mock; +import static org.opensearch.cluster.ClusterState.builder; +import static org.opensearch.cluster.OpenSearchAllocationTestCase.createAllocationService; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; + +public class DecommissionServiceTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private ClusterService clusterService; + private TransportService transportService; + private AllocationService allocationService; + private DecommissionService decommissionService; + private ClusterSettings clusterSettings; + + @Before + public void setUpService() { + threadPool = new TestThreadPool("test", Settings.EMPTY); + clusterService = createClusterService(threadPool); + allocationService = createAllocationService(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).build(); + logger.info("--> adding cluster manager node on zone_1"); + clusterState = addClusterManagerNodes(clusterState, "zone_1", "node1"); + logger.info("--> adding cluster manager node on zone_2"); + clusterState = addClusterManagerNodes(clusterState, "zone_2", "node6"); + logger.info("--> adding cluster manager node on zone_3"); + clusterState = addClusterManagerNodes(clusterState, "zone_3", "node11"); + logger.info("--> adding four data nodes on zone_1"); + clusterState = addDataNodes(clusterState, "zone_1", "node2", "node3", "node4", "node5"); + logger.info("--> adding four data nodes on zone_2"); + clusterState = addDataNodes(clusterState, "zone_2", "node7", "node8", "node9", "node10"); + logger.info("--> adding four data nodes on zone_3"); + clusterState = addDataNodes(clusterState, "zone_3", "node12", "node13", "node14", "node15"); + clusterState = setLocalNodeAsClusterManagerNode(clusterState, "node1"); + clusterState = setNodesInVotingConfig( + clusterState, + clusterState.nodes().get("node1"), + clusterState.nodes().get("node6"), + clusterState.nodes().get("node11") + ); + final ClusterState.Builder builder = builder(clusterState); + setState(clusterService, builder); + final MockTransport transport = new MockTransport(); + transportService = transport.createTransportService( + Settings.EMPTY, + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundTransportAddress -> clusterService.state().nodes().get("node1"), + null, + emptySet() + ); + + final Settings.Builder nodeSettingsBuilder = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "zone_1,zone_2,zone_3"); + + clusterSettings = new ClusterSettings(nodeSettingsBuilder.build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + transportService.start(); + transportService.acceptIncomingRequests(); + + this.decommissionService = new DecommissionService( + nodeSettingsBuilder.build(), + clusterSettings, + clusterService, + transportService, + threadPool, + allocationService + ); + } + + @After + public void shutdownThreadPoolAndClusterService() { + clusterService.stop(); + threadPool.shutdown(); + } + + @SuppressWarnings("unchecked") + public void testDecommissioningNotStartedForInvalidAttributeName() { + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("rack", "rack-a"); + ActionListener listener = mock(ActionListener.class); + DecommissioningFailedException e = expectThrows( + DecommissioningFailedException.class, + () -> decommissionService.startDecommissionAction(decommissionAttribute, listener) + ); + assertThat(e.getMessage(), Matchers.endsWith("invalid awareness attribute requested for decommissioning")); + } + + @SuppressWarnings("unchecked") + public void testDecommissioningNotStartedForInvalidAttributeValue() { + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "random"); + ActionListener listener = mock(ActionListener.class); + DecommissioningFailedException e = expectThrows( + DecommissioningFailedException.class, + () -> { decommissionService.startDecommissionAction(decommissionAttribute, listener); } + ); + assertThat( + e.getMessage(), + Matchers.endsWith( + "invalid awareness attribute value requested for decommissioning. Set forced awareness values before to decommission" + ) + ); + } + + @SuppressWarnings("unchecked") + public void testDecommissioningFailedWhenAnotherAttributeDecommissioningSuccessful() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + DecommissionStatus oldStatus = randomFrom(DecommissionStatus.SUCCESSFUL, DecommissionStatus.IN_PROGRESS, DecommissionStatus.INIT); + DecommissionAttributeMetadata oldMetadata = new DecommissionAttributeMetadata( + new DecommissionAttribute("zone", "zone_1"), + oldStatus + ); + final ClusterState.Builder builder = builder(clusterService.state()); + setState( + clusterService, + builder.metadata( + Metadata.builder(clusterService.state().metadata()).putCustom(DecommissionAttributeMetadata.TYPE, oldMetadata).build() + ) + ); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(DecommissionResponse clusterStateUpdateResponse) { + fail("on response shouldn't have been called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof DecommissioningFailedException); + if (oldStatus.equals(DecommissionStatus.SUCCESSFUL)) { + assertThat( + e.getMessage(), + Matchers.endsWith("already successfully decommissioned, recommission before triggering another decommission") + ); + } else { + assertThat(e.getMessage(), Matchers.endsWith("is in progress, cannot process this request")); + } + countDownLatch.countDown(); + } + }; + decommissionService.startDecommissionAction(new DecommissionAttribute("zone", "zone_2"), listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + + private ClusterState addDataNodes(ClusterState clusterState, String zone, String... nodeIds) { + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); + org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newDataNode(nodeId, singletonMap("zone", zone)))); + clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + return clusterState; + } + + private ClusterState addClusterManagerNodes(ClusterState clusterState, String zone, String... nodeIds) { + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); + org.opensearch.common.collect.List.of(nodeIds) + .forEach(nodeId -> nodeBuilder.add(newClusterManagerNode(nodeId, singletonMap("zone", zone)))); + clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + return clusterState; + } + + private ClusterState setLocalNodeAsClusterManagerNode(ClusterState clusterState, String nodeId) { + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); + nodeBuilder.localNodeId(nodeId); + nodeBuilder.clusterManagerNodeId(nodeId); + clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + return clusterState; + } + + private ClusterState setNodesInVotingConfig(ClusterState clusterState, DiscoveryNode... nodes) { + final CoordinationMetadata.VotingConfiguration votingConfiguration = CoordinationMetadata.VotingConfiguration.of(nodes); + + Metadata.Builder builder = Metadata.builder() + .coordinationMetadata( + CoordinationMetadata.builder() + .lastAcceptedConfiguration(votingConfiguration) + .lastCommittedConfiguration(votingConfiguration) + .build() + ); + clusterState = ClusterState.builder(clusterState).metadata(builder).build(); + return clusterState; + } + + private static DiscoveryNode newDataNode(String nodeId, Map attributes) { + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), attributes, DATA_ROLE, Version.CURRENT); + } + + private static DiscoveryNode newClusterManagerNode(String nodeId, Map attributes) { + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), attributes, CLUSTER_MANAGER_ROLE, Version.CURRENT); + } + + final private static Set CLUSTER_MANAGER_ROLE = Collections.unmodifiableSet( + new HashSet<>(Collections.singletonList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + ); + + final private static Set DATA_ROLE = Collections.unmodifiableSet( + new HashSet<>(Collections.singletonList(DiscoveryNodeRole.DATA_ROLE)) + ); + + private ClusterState removeNodes(ClusterState clusterState, String... nodeIds) { + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.getNodes()); + org.opensearch.common.collect.List.of(nodeIds).forEach(nodeBuilder::remove); + return allocationService.disassociateDeadNodes(ClusterState.builder(clusterState).nodes(nodeBuilder).build(), false, "test"); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/metadata/DecommissionAttributeMetadataSerializationTests.java b/server/src/test/java/org/opensearch/cluster/metadata/DecommissionAttributeMetadataSerializationTests.java new file mode 100644 index 0000000000000..60b3a03848830 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/DecommissionAttributeMetadataSerializationTests.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.Diff; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.test.AbstractDiffableSerializationTestCase; + +import java.io.IOException; + +public class DecommissionAttributeMetadataSerializationTests extends AbstractDiffableSerializationTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return DecommissionAttributeMetadata::new; + } + + @Override + protected Metadata.Custom createTestInstance() { + String attributeName = randomAlphaOfLength(6); + String attributeValue = randomAlphaOfLength(6); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + DecommissionStatus decommissionStatus = randomFrom(DecommissionStatus.values()); + return new DecommissionAttributeMetadata(decommissionAttribute, decommissionStatus); + } + + @Override + protected Metadata.Custom mutateInstance(Metadata.Custom instance) { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { + DecommissionAttributeMetadata decommissionAttributeMetadata = (DecommissionAttributeMetadata) testInstance; + DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); + String attributeName = decommissionAttribute.attributeName(); + String attributeValue = decommissionAttribute.attributeValue(); + DecommissionStatus decommissionStatus = decommissionAttributeMetadata.status(); + if (randomBoolean()) { + decommissionStatus = randomFrom(DecommissionStatus.values()); + } + if (randomBoolean()) { + attributeName = randomAlphaOfLength(6); + } + if (randomBoolean()) { + attributeValue = randomAlphaOfLength(6); + } + return new DecommissionAttributeMetadata(new DecommissionAttribute(attributeName, attributeValue), decommissionStatus); + } + + @Override + protected Writeable.Reader> diffReader() { + return DecommissionAttributeMetadata::readDiffFrom; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); + } + + @Override + protected Metadata.Custom doParseInstance(XContentParser parser) throws IOException { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + DecommissionAttributeMetadata decommissionAttributeMetadata = DecommissionAttributeMetadata.fromXContent(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + return new DecommissionAttributeMetadata( + decommissionAttributeMetadata.decommissionAttribute(), + decommissionAttributeMetadata.status() + ); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/metadata/DecommissionAttributeMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/DecommissionAttributeMetadataTests.java new file mode 100644 index 0000000000000..746d4565b0db3 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/DecommissionAttributeMetadataTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.test.AbstractNamedWriteableTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class DecommissionAttributeMetadataTests extends AbstractNamedWriteableTestCase { + @Override + protected DecommissionAttributeMetadata createTestInstance() { + String attributeName = randomAlphaOfLength(6); + String attributeValue = randomAlphaOfLength(6); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + DecommissionStatus decommissionStatus = randomFrom(DecommissionStatus.values()); + return new DecommissionAttributeMetadata(decommissionAttribute, decommissionStatus); + } + + @Override + protected DecommissionAttributeMetadata mutateInstance(DecommissionAttributeMetadata instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Collections.singletonList( + new NamedWriteableRegistry.Entry( + DecommissionAttributeMetadata.class, + DecommissionAttributeMetadata.TYPE, + DecommissionAttributeMetadata::new + ) + ) + ); + } + + @Override + protected Class categoryClass() { + return DecommissionAttributeMetadata.class; + } +} diff --git a/server/src/test/java/org/opensearch/cluster/metadata/DecommissionAttributeMetadataXContentTests.java b/server/src/test/java/org/opensearch/cluster/metadata/DecommissionAttributeMetadataXContentTests.java new file mode 100644 index 0000000000000..030946f4510a1 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/DecommissionAttributeMetadataXContentTests.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class DecommissionAttributeMetadataXContentTests extends AbstractXContentTestCase { + @Override + protected DecommissionAttributeMetadata createTestInstance() { + String attributeName = randomAlphaOfLength(6); + String attributeValue = randomAlphaOfLength(6); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + DecommissionStatus decommissionStatus = randomFrom(DecommissionStatus.values()); + return new DecommissionAttributeMetadata(decommissionAttribute, decommissionStatus); + } + + @Override + protected DecommissionAttributeMetadata doParseInstance(XContentParser parser) throws IOException { + return DecommissionAttributeMetadata.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java index f64b45e80dbca..2c7251818e2bc 100644 --- a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java @@ -33,7 +33,6 @@ package org.opensearch.common.settings; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.indices.IndexingMemoryController; @@ -83,9 +82,13 @@ public void testIndicesRequestCacheSetting() { } public void testCircuitBreakerSettings() { - // default is chosen based on actual heap size + final Settings settings = Settings.builder() + .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), randomBoolean()) + .build(); + + // default is chosen based on USE_REAL_MEMORY_USAGE_SETTING setting double defaultTotalPercentage; - if (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() < new ByteSizeValue(1, ByteSizeUnit.GB).getBytes()) { + if (HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.get(settings)) { defaultTotalPercentage = 0.95d; } else { defaultTotalPercentage = 0.7d; @@ -93,22 +96,26 @@ public void testCircuitBreakerSettings() { assertMemorySizeSetting( HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.total.limit", - new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * defaultTotalPercentage)) + new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * defaultTotalPercentage)), + settings ); assertMemorySizeSetting( HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.fielddata.limit", - new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.4)) + new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.4)), + settings ); assertMemorySizeSetting( HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.request.limit", - new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.6)) + new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.6)), + settings ); assertMemorySizeSetting( HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING, "network.breaker.inflight_requests.limit", - new ByteSizeValue((JvmInfo.jvmInfo().getMem().getHeapMax().getBytes())) + new ByteSizeValue((JvmInfo.jvmInfo().getMem().getHeapMax().getBytes())), + settings ); } @@ -121,10 +128,14 @@ public void testIndicesFieldDataCacheSetting() { } private void assertMemorySizeSetting(Setting setting, String settingKey, ByteSizeValue defaultValue) { + assertMemorySizeSetting(setting, settingKey, defaultValue, Settings.EMPTY); + } + + private void assertMemorySizeSetting(Setting setting, String settingKey, ByteSizeValue defaultValue, Settings settings) { assertThat(setting, notNullValue()); assertThat(setting.getKey(), equalTo(settingKey)); assertThat(setting.getProperties(), hasItem(Property.NodeScope)); - assertThat(setting.getDefault(Settings.EMPTY), equalTo(defaultValue)); + assertThat(setting.getDefault(settings), equalTo(defaultValue)); Settings settingWithPercentage = Settings.builder().put(settingKey, "25%").build(); assertThat( setting.get(settingWithPercentage), diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 45d93a5a12847..6bfdd9ae16773 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -89,7 +89,7 @@ import org.opensearch.index.similarity.NonNegativeScoresSimilarity; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; -import org.opensearch.index.store.RemoteDirectoryFactory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.analysis.AnalysisModule; @@ -234,7 +234,7 @@ private IndexService newIndexService(IndexModule module) throws IOException { writableRegistry(), () -> false, null, - new RemoteDirectoryFactory(() -> repositoriesService) + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) ); } diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index e02eac85beafb..de5ef8851ae80 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -851,7 +851,7 @@ public void testEnablingRemoteTranslogStoreFailsWhenRemoteSegmentDisabled() { () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.get(indexSettings) ); assertEquals( - "Settings index.remote_store.translog.enabled cannot be enabled when index.remote_store.enabled is set to false", + "Settings index.remote_store.translog.enabled can ont be set/enabled when index.remote_store.enabled is set to true", iae.getMessage() ); } @@ -876,4 +876,71 @@ public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDefault() { ); assertEquals("To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT", iae.getMessage()); } + + public void testRemoteRepositoryDefaultSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertNull(settings.getRemoteStoreRepository()); + } + + public void testRemoteRepositoryExplicitSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, "repo1") + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertEquals("repo1", settings.getRemoteStoreRepository()); + } + + public void testUpdateRemoteRepositoryFails() { + Set> remoteStoreSettingSet = new HashSet<>(); + remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING); + IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> settings.updateSettings( + Settings.builder().put("index.remote_store.repository", randomUnicodeOfLength(10)).build(), + Settings.builder(), + Settings.builder(), + "index" + ) + ); + assertEquals(error.getMessage(), "final index setting [index.remote_store.repository], not updateable"); + } + + public void testSetRemoteRepositoryFailsWhenRemoteStoreIsNotEnabled() { + Settings indexSettings = Settings.builder() + .put("index.replication.type", ReplicationType.SEGMENT) + .put("index.remote_store.enabled", false) + .put("index.remote_store.repository", "repo1") + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING.get(indexSettings) + ); + assertEquals( + "Settings index.remote_store.repository can ont be set/enabled when index.remote_store.enabled is set to true", + iae.getMessage() + ); + } + + public void testSetRemoteRepositoryFailsWhenEmptyString() { + Settings indexSettings = Settings.builder() + .put("index.replication.type", ReplicationType.SEGMENT) + .put("index.remote_store.enabled", false) + .put("index.remote_store.repository", "") + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING.get(indexSettings) + ); + assertEquals("Setting index.remote_store.repository should be provided with non-empty repository ID", iae.getMessage()); + } } diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 1fe1a37dedae0..96d5573621683 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -11,14 +11,11 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; -import org.hamcrest.MatcherAssert; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.Store; @@ -36,17 +33,21 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; public class NRTReplicationEngineTests extends EngineTestCase { + private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( + "index", + Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build() + ); + public void testCreateEngine() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try ( - final Store nrtEngineStore = createStore(); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); @@ -70,7 +71,7 @@ public void testEngineWritesOpsToTranslog() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try ( - final Store nrtEngineStore = createStore(); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { List operations = generateHistoryOnReplica( @@ -93,6 +94,9 @@ public void testEngineWritesOpsToTranslog() throws Exception { // we don't index into nrtEngine, so get the doc ids from the regular engine. final List docs = getDocIds(engine, true); + // close the NRTEngine, it will commit on close and we'll reuse its store for an IE. + nrtEngine.close(); + // recover a new engine from the nrtEngine's xlog. nrtEngine.translogManager().syncTranslog(); try (InternalEngine engine = new InternalEngine(nrtEngine.config())) { @@ -104,84 +108,77 @@ public void testEngineWritesOpsToTranslog() throws Exception { } } - public void testUpdateSegments() throws Exception { + public void testUpdateSegments_replicaReceivesSISWithHigherGen() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try ( - final Store nrtEngineStore = createStore(); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { - // add docs to the primary engine. - List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()) - .stream() - .filter(op -> op.operationType().equals(Engine.Operation.TYPE.INDEX)) - .collect(Collectors.toList()); - for (Engine.Operation op : operations) { - applyOperation(engine, op); - applyOperation(nrtEngine, op); - } - - engine.refresh("test"); - - final SegmentInfos latestPrimaryInfos = engine.getLatestSegmentInfos(); - nrtEngine.updateSegments(latestPrimaryInfos, engine.getProcessedLocalCheckpoint()); - assertMatchingSegmentsAndCheckpoints(nrtEngine, latestPrimaryInfos); - - // assert a doc from the operations exists. - final ParsedDocument parsedDoc = createParsedDoc(operations.stream().findFirst().get().id(), null); - try (Engine.GetResult getResult = engine.get(newGet(true, parsedDoc), engine::acquireSearcher)) { - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - } - - try (Engine.GetResult getResult = nrtEngine.get(newGet(true, parsedDoc), nrtEngine::acquireSearcher)) { - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - } - - // Flush the primary and update the NRTEngine with the latest committed infos. - engine.flush(); - nrtEngine.translogManager().syncTranslog(); // to advance persisted checkpoint + // assume we start at the same gen. + assertEquals(2, nrtEngine.getLatestSegmentInfos().getGeneration()); + assertEquals(nrtEngine.getLatestSegmentInfos().getGeneration(), nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(engine.getLatestSegmentInfos().getGeneration(), nrtEngine.getLatestSegmentInfos().getGeneration()); + + // flush the primary engine - we don't need any segments, just force a new commit point. + engine.flush(true, true); + assertEquals(3, engine.getLatestSegmentInfos().getGeneration()); + nrtEngine.updateSegments(engine.getLatestSegmentInfos(), engine.getProcessedLocalCheckpoint()); + assertEquals(3, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(3, nrtEngine.getLatestSegmentInfos().getGeneration()); + } + } - Set seqNos = operations.stream().map(Engine.Operation::seqNo).collect(Collectors.toSet()); + public void testUpdateSegments_replicaReceivesSISWithLowerGen() throws IOException { + // if the replica is already at segments_N that is received, it will commit segments_N+1. + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - nrtEngine.ensureOpen(); - try ( - Translog.Snapshot snapshot = assertAndGetInternalTranslogManager(nrtEngine.translogManager()).getTranslog().newSnapshot() - ) { - assertThat(snapshot.totalOperations(), equalTo(operations.size())); - assertThat( - TestTranslog.drainSnapshot(snapshot, false).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet()), - equalTo(seqNos) - ); - } + try ( + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) + ) { + nrtEngine.getLatestSegmentInfos().changed(); + nrtEngine.getLatestSegmentInfos().changed(); + // commit the infos to push us to segments_3. + nrtEngine.commitSegmentInfos(); + assertEquals(3, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(3, nrtEngine.getLatestSegmentInfos().getGeneration()); - final SegmentInfos primaryInfos = engine.getLastCommittedSegmentInfos(); + // update the replica with segments_2 from the primary. + final SegmentInfos primaryInfos = engine.getLatestSegmentInfos(); + assertEquals(2, primaryInfos.getGeneration()); nrtEngine.updateSegments(primaryInfos, engine.getProcessedLocalCheckpoint()); - assertMatchingSegmentsAndCheckpoints(nrtEngine, primaryInfos); + assertEquals(4, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(4, nrtEngine.getLatestSegmentInfos().getGeneration()); + assertEquals(primaryInfos.getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); + assertEquals(primaryInfos.getVersion(), nrtEngine.getLastCommittedSegmentInfos().getVersion()); - assertEquals( - assertAndGetInternalTranslogManager(nrtEngine.translogManager()).getTranslog().getGeneration().translogFileGeneration, - assertAndGetInternalTranslogManager(engine.translogManager()).getTranslog().getGeneration().translogFileGeneration - ); + nrtEngine.close(); + assertEquals(5, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + } + } - try ( - Translog.Snapshot snapshot = assertAndGetInternalTranslogManager(nrtEngine.translogManager()).getTranslog().newSnapshot() - ) { - assertThat(snapshot.totalOperations(), equalTo(operations.size())); - assertThat( - TestTranslog.drainSnapshot(snapshot, false).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet()), - equalTo(seqNos) - ); - } + public void testUpdateSegments_replicaCommitsFirstReceivedInfos() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - // Ensure the same hit count between engines. - int expectedDocCount; - try (final Engine.Searcher test = engine.acquireSearcher("test")) { - expectedDocCount = test.count(Queries.newMatchAllQuery()); - assertSearcherHits(nrtEngine, expectedDocCount); - } - assertEngineCleanedUp(nrtEngine, assertAndGetInternalTranslogManager(nrtEngine.translogManager()).getDeletionPolicy()); + try ( + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) + ) { + assertEquals(2, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(2, nrtEngine.getLatestSegmentInfos().getGeneration()); + // bump the latest infos version a couple of times so that we can assert the correct version after commit. + engine.getLatestSegmentInfos().changed(); + engine.getLatestSegmentInfos().changed(); + assertNotEquals(nrtEngine.getLatestSegmentInfos().getVersion(), engine.getLatestSegmentInfos().getVersion()); + + // update replica with the latest primary infos, it will be the same gen, segments_2, ensure it is also committed. + final SegmentInfos primaryInfos = engine.getLatestSegmentInfos(); + assertEquals(2, primaryInfos.getGeneration()); + nrtEngine.updateSegments(primaryInfos, engine.getProcessedLocalCheckpoint()); + final SegmentInfos lastCommittedSegmentInfos = nrtEngine.getLastCommittedSegmentInfos(); + assertEquals(primaryInfos.getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); + assertEquals(primaryInfos.getVersion(), lastCommittedSegmentInfos.getVersion()); } } @@ -189,7 +186,7 @@ public void testTrimTranslogOps() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try ( - final Store nrtEngineStore = createStore(); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore); ) { List operations = generateHistoryOnReplica( @@ -223,12 +220,9 @@ public void testCommitSegmentInfos() throws Exception { // This test asserts that NRTReplication#commitSegmentInfos creates a new commit point with the latest checkpoints // stored in user data. final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( - "index", - Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build() - ); + try ( - final Store nrtEngineStore = createStore(indexSettings, newDirectory()); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()) @@ -248,6 +242,8 @@ public void testCommitSegmentInfos() throws Exception { // ensure getLatestSegmentInfos returns an updated infos ref with correct userdata. final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); assertEquals(previousInfos.getGeneration(), latestSegmentInfos.getLastGeneration()); + assertEquals(previousInfos.getVersion(), latestSegmentInfos.getVersion()); + assertEquals(previousInfos.counter, latestSegmentInfos.counter); Map userData = latestSegmentInfos.getUserData(); assertEquals(processedCheckpoint, localCheckpointTracker.getProcessedCheckpoint()); assertEquals(maxSeqNo, Long.parseLong(userData.get(MAX_SEQ_NO))); @@ -262,22 +258,6 @@ public void testCommitSegmentInfos() throws Exception { } } - private void assertMatchingSegmentsAndCheckpoints(NRTReplicationEngine nrtEngine, SegmentInfos expectedSegmentInfos) - throws IOException { - assertEquals(engine.getPersistedLocalCheckpoint(), nrtEngine.getPersistedLocalCheckpoint()); - assertEquals(engine.getProcessedLocalCheckpoint(), nrtEngine.getProcessedLocalCheckpoint()); - assertEquals(engine.getLocalCheckpointTracker().getMaxSeqNo(), nrtEngine.getLocalCheckpointTracker().getMaxSeqNo()); - assertEquals(expectedSegmentInfos.files(true), nrtEngine.getLatestSegmentInfos().files(true)); - assertEquals(expectedSegmentInfos.getUserData(), nrtEngine.getLatestSegmentInfos().getUserData()); - assertEquals(expectedSegmentInfos.getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); - } - - private void assertSearcherHits(Engine engine, int hits) { - try (final Engine.Searcher test = engine.acquireSearcher("test")) { - MatcherAssert.assertThat(test, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(hits)); - } - } - private NRTReplicationEngine buildNrtReplicaEngine(AtomicLong globalCheckpoint, Store store) throws IOException { Lucene.cleanLuceneIndex(store.directory()); final Path translogDir = createTempDir(); diff --git a/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java b/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java index 763ee59a385a2..76496491b3ed4 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -52,6 +52,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ToParentBlockJoinQuery; @@ -340,7 +341,13 @@ public void testSortMissing(boolean first, boolean reverse) throws IOException { randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField) ); - assertEquals(numDocs, topDocs.totalHits.value); + // As of Lucene 9.0.0, totalHits may be a lower bound + if (topDocs.totalHits.relation == TotalHits.Relation.EQUAL_TO) { + assertEquals(numDocs, topDocs.totalHits.value); + } else { + assertTrue(1000 <= topDocs.totalHits.value); + assertTrue(numDocs >= topDocs.totalHits.value); + } BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef(); for (int i = 0; i < topDocs.scoreDocs.length; ++i) { final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value"); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 8c00ab97a46ea..662afa80f65fc 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2689,8 +2689,9 @@ public void testRestoreShardFromRemoteStore() throws IOException { storeDirectory.deleteFile(file); } + assertEquals(0, storeDirectory.listAll().length); + Directory remoteDirectory = ((FilterDirectory) ((FilterDirectory) target.remoteStore().directory()).getDelegate()).getDelegate(); - ((BaseDirectoryWrapper) remoteDirectory).setCheckIndexOnClose(false); // extra0 file is added as a part of https://lucene.apache.org/core/7_2_1/test-framework/org/apache/lucene/mockfile/ExtrasFS.html // Safe to remove without impacting the test diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index af92d821a9043..6b05d67836272 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -8,132 +8,209 @@ package org.opensearch.index.shard; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; -import org.opensearch.test.OpenSearchTestCase; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.tests.store.BaseDirectoryWrapper; +import org.junit.After; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.Store; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.nio.file.NoSuchFileException; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.CountDownLatch; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.doThrow; +public class RemoteStoreRefreshListenerTests extends IndexShardTestCase { + private IndexShard indexShard; + private RemoteStoreRefreshListener remoteStoreRefreshListener; -public class RemoteStoreRefreshListenerTests extends OpenSearchTestCase { - private Directory storeDirectory; - private Directory remoteDirectory; + public void setup(boolean primary, int numberOfDocs) throws IOException { + indexShard = newStartedShard( + primary, + Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true).build(), + new InternalEngineFactory() + ); - private RemoteStoreRefreshListener remoteStoreRefreshListener; + indexDocs(1, numberOfDocs); + indexShard.refresh("test"); - public void setup(String[] remoteFiles) throws IOException { - storeDirectory = mock(Directory.class); - remoteDirectory = mock(Directory.class); - when(remoteDirectory.listAll()).thenReturn(remoteFiles); - remoteStoreRefreshListener = new RemoteStoreRefreshListener(storeDirectory, remoteDirectory); + remoteStoreRefreshListener = new RemoteStoreRefreshListener(indexShard); } - public void testAfterRefreshFalse() throws IOException { - setup(new String[0]); - remoteStoreRefreshListener.afterRefresh(false); - verify(storeDirectory, times(0)).listAll(); + private void indexDocs(int startDocId, int numberOfDocs) throws IOException { + for (int i = startDocId; i < startDocId + numberOfDocs; i++) { + indexDoc(indexShard, "_doc", Integer.toString(i)); + } } - public void testAfterRefreshTrueNoLocalFiles() throws IOException { - setup(new String[0]); + @After + public void tearDown() throws Exception { + Directory storeDirectory = ((FilterDirectory) ((FilterDirectory) indexShard.store().directory()).getDelegate()).getDelegate(); + ((BaseDirectoryWrapper) storeDirectory).setCheckIndexOnClose(false); + closeShards(indexShard); + super.tearDown(); + } - when(storeDirectory.listAll()).thenReturn(new String[0]); + public void testAfterRefresh() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any()); - verify(remoteDirectory, times(0)).deleteFile(any()); - } + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - public void testAfterRefreshOnlyUploadFiles() throws IOException { - setup(new String[0]); + verifyUploadedSegments(remoteSegmentStoreDirectory); - String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFiles); + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); - verify(remoteDirectory, times(0)).deleteFile(any()); + verifyUploadedSegments(remoteSegmentStoreDirectory); + } } - public void testAfterRefreshOnlyUploadAndDelete() throws IOException { - setup(new String[] { "0.si", "0.cfs" }); + public void testAfterCommit() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); + flushShard(indexShard); - String[] localFiles = new String[] { "segments_1", "1.si", "1.cfs", "1.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFiles); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.si", "1.si", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT); - verify(remoteDirectory).deleteFile("0.si"); - verify(remoteDirectory).deleteFile("0.cfs"); + verifyUploadedSegments(remoteSegmentStoreDirectory); + + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } } - public void testAfterRefreshOnlyDelete() throws IOException { - setup(new String[] { "0.si", "0.cfs" }); + public void testRefreshAfterCommit() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); + flushShard(indexShard); - String[] localFiles = new String[] { "0.si" }; - when(storeDirectory.listAll()).thenReturn(localFiles); + indexDocs(4, 4); + indexShard.refresh("test"); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any()); - verify(remoteDirectory).deleteFile("0.cfs"); - } + indexDocs(8, 4); + indexShard.refresh("test"); - public void testAfterRefreshTempLocalFile() throws IOException { - setup(new String[0]); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs.tmp" }; - when(storeDirectory.listAll()).thenReturn(localFiles); - doThrow(new NoSuchFileException("0.cfs.tmp")).when(remoteDirectory) - .copyFrom(storeDirectory, "0.cfs.tmp", "0.cfs.tmp", IOContext.DEFAULT); + verifyUploadedSegments(remoteSegmentStoreDirectory); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); - verify(remoteDirectory, times(0)).deleteFile(any()); + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } } - public void testAfterRefreshConsecutive() throws IOException { - setup(new String[0]); + public void testAfterMultipleCommits() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); - String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFiles); - doThrow(new IOException("0.cfs")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfe", IOContext.DEFAULT); - doThrow(new IOException("0.cfe")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); + for (int i = 0; i < RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP + 3; i++) { + indexDocs(4 * (i + 1), 4); + flushShard(indexShard); + } + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } + } + + public void testReplica() throws IOException { + setup(false, 3); remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); - verify(remoteDirectory, times(0)).deleteFile(any()); - String[] localFilesSecondRefresh = new String[] { "segments_1", "0.cfs", "1.cfs", "1.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFilesSecondRefresh); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); + + assertEquals(0, remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().size()); + } + } + public void testReplicaPromotion() throws IOException, InterruptedException { + setup(false, 3); remoteStoreRefreshListener.afterRefresh(true); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT); - verify(remoteDirectory).deleteFile("0.si"); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()) + .getDelegate(); + + assertEquals(0, remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().size()); + + final ShardRouting replicaRouting = indexShard.routingEntry(); + promoteReplica( + indexShard, + Collections.singleton(replicaRouting.allocationId().getId()), + new IndexShardRoutingTable.Builder(replicaRouting.shardId()).addShard(replicaRouting).build() + ); + + // The following logic is referenced from IndexShardTests.testPrimaryFillsSeqNoGapsOnPromotion + // ToDo: Add wait logic as part of promoteReplica() + final CountDownLatch latch = new CountDownLatch(1); + indexShard.acquirePrimaryOperationPermit(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + releasable.close(); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, ThreadPool.Names.GENERIC, ""); + + latch.await(); + + indexDocs(4, 4); + indexShard.refresh("test"); + remoteStoreRefreshListener.afterRefresh(true); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } + + private void verifyUploadedSegments(RemoteSegmentStoreDirectory remoteSegmentStoreDirectory) throws IOException { + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + for (String file : segmentInfos.files(true)) { + if (!RemoteStoreRefreshListener.EXCLUDE_FILES.contains(file)) { + assertTrue(uploadedSegments.containsKey(file)); + } + } + } } } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 23371a39871c7..007317f6e71cd 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -8,11 +8,21 @@ package org.opensearch.index.shard; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.SegmentInfos; +import org.junit.Assert; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; @@ -21,12 +31,29 @@ import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.CheckpointInfoResponse; +import org.opensearch.indices.replication.GetSegmentFilesResponse; +import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static java.util.Arrays.asList; import static org.hamcrest.Matchers.equalTo; @@ -34,6 +61,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { @@ -42,7 +70,7 @@ public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelRepli .build(); /** - * Test that latestReplicationCheckpoint returns null only for docrep enabled indices + * Test that latestReplicationCheckpoint returns null only for docrep enabled indices */ public void testReplicationCheckpointNullForDocRep() throws IOException { Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, "DOCUMENT").put(Settings.EMPTY).build(); @@ -52,11 +80,10 @@ public void testReplicationCheckpointNullForDocRep() throws IOException { } /** - * Test that latestReplicationCheckpoint returns ReplicationCheckpoint for segrep enabled indices + * Test that latestReplicationCheckpoint returns ReplicationCheckpoint for segrep enabled indices */ - public void testReplicationCheckpointNotNullForSegReb() throws IOException { - Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT").put(Settings.EMPTY).build(); - final IndexShard indexShard = newStartedShard(indexSettings); + public void testReplicationCheckpointNotNullForSegRep() throws IOException { + final IndexShard indexShard = newStartedShard(randomBoolean(), settings, new NRTReplicationEngineFactory()); final ReplicationCheckpoint replicationCheckpoint = indexShard.getLatestReplicationCheckpoint(); assertNotNull(replicationCheckpoint); closeShards(indexShard); @@ -181,6 +208,132 @@ public void testPublishCheckpointAfterRelocationHandOff() throws IOException { closeShards(shard); } + public void testReplicaReceivesGenIncrease() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + final int numDocs = randomIntBetween(10, 100); + shards.indexDocs(numDocs); + flushShard(primary, true); + replicateSegments(primary, shards.getReplicas()); + + final int totalDocs = numDocs + shards.indexDocs(randomIntBetween(numDocs + 1, numDocs + 10)); + flushShard(primary); + replicateSegments(primary, shards.getReplicas()); + + assertEqualCommittedSegments(primary, replica); + assertDocCount(primary, totalDocs); + assertDocCount(replica, totalDocs); + } + } + + public void testReplicaReceivesLowerGeneration() throws Exception { + // when a replica gets incoming segments that are lower than what it currently has on disk. + + // start 3 nodes Gens: P [2], R [2], R[2] + // index some docs and flush twice, push to only 1 replica. + // State Gens: P [4], R-1 [3], R-2 [2] + // Promote R-2 as the new primary and demote the old primary. + // State Gens: R[4], R-1 [3], P [4] - *commit on close of NRTEngine, xlog replayed and commit made. + // index docs on new primary and flush + // replicate to all. + // Expected result: State Gens: P[4], R-1 [4], R-2 [4] + try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica_1 = shards.getReplicas().get(0); + final IndexShard replica_2 = shards.getReplicas().get(1); + int numDocs = randomIntBetween(10, 100); + shards.indexDocs(numDocs); + flushShard(primary, false); + replicateSegments(primary, List.of(replica_1)); + numDocs = randomIntBetween(numDocs + 1, numDocs + 10); + shards.indexDocs(numDocs); + flushShard(primary, false); + assertLatestCommitGen(4, primary); + replicateSegments(primary, List.of(replica_1)); + + assertEqualCommittedSegments(primary, replica_1); + assertLatestCommitGen(4, primary, replica_1); + assertLatestCommitGen(2, replica_2); + + shards.promoteReplicaToPrimary(replica_2).get(); + primary.close("demoted", false); + primary.store().close(); + IndexShard oldPrimary = shards.addReplicaWithExistingPath(primary.shardPath(), primary.routingEntry().currentNodeId()); + shards.recoverReplica(oldPrimary); + assertLatestCommitGen(4, oldPrimary); + assertEqualCommittedSegments(oldPrimary, replica_1); + + assertLatestCommitGen(4, replica_2); + + numDocs = randomIntBetween(numDocs + 1, numDocs + 10); + shards.indexDocs(numDocs); + flushShard(replica_2, false); + replicateSegments(replica_2, shards.getReplicas()); + assertEqualCommittedSegments(replica_2, oldPrimary, replica_1); + } + } + + public void testReplicaRestarts() throws Exception { + try (ReplicationGroup shards = createGroup(3, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. + final int numDocs = shards.indexDocs(randomInt(10)); + + // refresh and copy the segments over. + if (randomBoolean()) { + flushShard(primary); + } + primary.refresh("Test"); + replicateSegments(primary, shards.getReplicas()); + + // at this point both shards should have numDocs persisted and searchable. + assertDocCounts(primary, numDocs, numDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, numDocs, numDocs); + } + + final int i1 = randomInt(5); + for (int i = 0; i < i1; i++) { + shards.indexDocs(randomInt(10)); + + // randomly resetart a replica + final IndexShard replicaToRestart = getRandomReplica(shards); + replicaToRestart.close("restart", false); + replicaToRestart.store().close(); + shards.removeReplica(replicaToRestart); + final IndexShard newReplica = shards.addReplicaWithExistingPath( + replicaToRestart.shardPath(), + replicaToRestart.routingEntry().currentNodeId() + ); + shards.recoverReplica(newReplica); + + // refresh and push segments to our other replicas. + if (randomBoolean()) { + failAndPromoteRandomReplica(shards); + } + flushShard(shards.getPrimary()); + replicateSegments(shards.getPrimary(), shards.getReplicas()); + } + primary = shards.getPrimary(); + + // refresh and push segments to our other replica. + flushShard(primary); + replicateSegments(primary, shards.getReplicas()); + + for (IndexShard shard : shards) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + final List docsAfterReplication = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterReplication)); + } + } + } + public void testNRTReplicaPromotedAsPrimary() throws Exception { try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { shards.startAll(); @@ -241,6 +394,213 @@ public void testNRTReplicaPromotedAsPrimary() throws Exception { } } + public void testReplicaPromotedWhileReplicating() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + oldPrimary.refresh("Test"); + shards.syncGlobalCheckpoint(); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + resolveCheckpointInfoResponseListener(listener, oldPrimary); + ShardRouting oldRouting = nextPrimary.shardRouting; + try { + shards.promoteReplicaToPrimary(nextPrimary); + } catch (IOException e) { + Assert.fail("Promotion should not fail"); + } + targetService.shardRoutingChanged(nextPrimary, oldRouting, nextPrimary.shardRouting); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(nextPrimary, targetService); + // wait for replica to finish being promoted, and assert doc counts. + final CountDownLatch latch = new CountDownLatch(1); + nextPrimary.acquirePrimaryOperationPermit(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, ThreadPool.Names.GENERIC, ""); + latch.await(); + assertEquals(nextPrimary.getEngine().getClass(), InternalEngine.class); + nextPrimary.refresh("test"); + + oldPrimary.close("demoted", false); + oldPrimary.store().close(); + IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + + assertDocCount(nextPrimary, numDocs); + assertDocCount(newReplica, numDocs); + + nextPrimary.refresh("test"); + replicateSegments(nextPrimary, shards.getReplicas()); + final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + public void testReplicaClosesWhileReplicating_AfterGetCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + // trigger a cancellation by closing the replica. + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + Assert.fail("Should not be reached"); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testReplicaClosesWhileReplicating_AfterGetSegmentFiles() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + // randomly resolve the listener, indicating the source has resolved. + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testPrimaryCancelsExecution() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onFailure(new CancellableThreads.ExecutionCancelledException("Cancelled")); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) {} + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + private SegmentReplicationTargetService newTargetService(SegmentReplicationSourceFactory sourceFactory) { + return new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory + ); + } + /** * Assert persisted and searchable doc counts. This method should not be used while docs are concurrently indexed because * it asserts point in time seqNos are relative to the doc counts. @@ -253,4 +613,78 @@ private void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCoun // processed cp should be 1 less than our searchable doc count. assertEquals(expectedSearchableDocCount - 1, indexShard.getProcessedLocalCheckpoint()); } + + private void resolveCheckpointInfoResponseListener(ActionListener listener, IndexShard primary) { + try { + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primary.shardId), primary); + listener.onResponse( + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) + ); + } catch (IOException e) { + logger.error("Unexpected error computing CopyState", e); + Assert.fail("Failed to compute copyState"); + } + } + + private void startReplicationAndAssertCancellation(IndexShard replica, SegmentReplicationTargetService targetService) + throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + final SegmentReplicationTarget target = targetService.startReplication( + ReplicationCheckpoint.empty(replica.shardId), + replica, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + Assert.fail("Replication should not complete"); + } + + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + assertTrue(e instanceof CancellableThreads.ExecutionCancelledException); + assertFalse(sendShardFailure); + assertEquals(SegmentReplicationState.Stage.CANCELLED, state.getStage()); + latch.countDown(); + } + } + ); + + latch.await(2, TimeUnit.SECONDS); + assertEquals("Should have resolved listener with failure", 0, latch.getCount()); + assertNull(targetService.get(target.getId())); + } + + private IndexShard getRandomReplica(ReplicationGroup shards) { + return shards.getReplicas().get(randomInt(shards.getReplicas().size() - 1)); + } + + private IndexShard failAndPromoteRandomReplica(ReplicationGroup shards) throws IOException { + IndexShard primary = shards.getPrimary(); + final IndexShard newPrimary = getRandomReplica(shards); + shards.promoteReplicaToPrimary(newPrimary); + primary.close("demoted", true); + primary.store().close(); + primary = shards.addReplicaWithExistingPath(primary.shardPath(), primary.routingEntry().currentNodeId()); + shards.recoverReplica(primary); + return newPrimary; + } + + private void assertLatestCommitGen(long expected, IndexShard... shards) throws IOException { + for (IndexShard indexShard : shards) { + try (final GatedCloseable commit = indexShard.acquireLastIndexCommit(false)) { + assertEquals(expected, commit.get().getGeneration()); + } + } + } + + private void assertEqualCommittedSegments(IndexShard primary, IndexShard... replicas) throws IOException { + for (IndexShard replica : replicas) { + final SegmentInfos replicaInfos = replica.store().readLastCommittedSegmentsInfo(); + final SegmentInfos primaryInfos = primary.store().readLastCommittedSegmentsInfo(); + final Map latestReplicaMetadata = replica.store().getSegmentMetadataMap(replicaInfos); + final Map latestPrimaryMetadata = primary.store().getSegmentMetadataMap(primaryInfos); + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(latestPrimaryMetadata, latestReplicaMetadata); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.missing.isEmpty()); + } + } } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java index 273d3c7e37c56..cd35349e33b59 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java @@ -44,6 +44,7 @@ public void testReadByte() throws IOException { when(inputStream.read()).thenReturn(10); assertEquals(10, remoteIndexInput.readByte()); + assertEquals(1, remoteIndexInput.getFilePointer()); verify(inputStream).read(any()); } @@ -52,13 +53,19 @@ public void testReadByteIOException() throws IOException { when(inputStream.read(any())).thenThrow(new IOException("Error reading")); assertThrows(IOException.class, () -> remoteIndexInput.readByte()); + assertEquals(0, remoteIndexInput.getFilePointer()); } public void testReadBytes() throws IOException { - byte[] buffer = new byte[10]; - remoteIndexInput.readBytes(buffer, 10, 20); + byte[] buffer = new byte[20]; + when(inputStream.read(eq(buffer), anyInt(), anyInt())).thenReturn(10).thenReturn(3).thenReturn(6).thenReturn(-1); + remoteIndexInput.readBytes(buffer, 0, 20); - verify(inputStream).read(buffer, 10, 20); + verify(inputStream).read(buffer, 0, 20); + verify(inputStream).read(buffer, 10, 10); + verify(inputStream).read(buffer, 13, 7); + verify(inputStream).read(buffer, 19, 1); + assertEquals(19, remoteIndexInput.getFilePointer()); } public void testReadBytesMultipleIterations() throws IOException { @@ -95,20 +102,14 @@ public void testLength() { assertEquals(FILESIZE, remoteIndexInput.length()); } - public void testSeek() throws IOException { - remoteIndexInput.seek(10); - - verify(inputStream).skip(10); - } - - public void testSeekIOException() throws IOException { - when(inputStream.skip(10)).thenThrow(new IOException("Error reading")); - - assertThrows(IOException.class, () -> remoteIndexInput.seek(10)); + public void testSeek() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.seek(100L)); } - public void testGetFilePointer() { - assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.getFilePointer()); + public void testGetFilePointer() throws IOException { + when(inputStream.read(any(), eq(0), eq(8))).thenReturn(8); + remoteIndexInput.readBytes(new byte[8], 0, 8); + assertEquals(8, remoteIndexInput.getFilePointer()); } public void testSlice() { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java similarity index 70% rename from server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java rename to server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index e8357d2c184bf..0105d0dc309c2 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.store.Directory; import org.junit.Before; import org.mockito.ArgumentCaptor; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; @@ -27,29 +28,31 @@ import java.io.IOException; import java.nio.file.Path; import java.util.Collections; +import java.util.List; import java.util.function.Supplier; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; -public class RemoteDirectoryFactoryTests extends OpenSearchTestCase { +public class RemoteSegmentStoreDirectoryFactoryTests extends OpenSearchTestCase { private Supplier repositoriesServiceSupplier; private RepositoriesService repositoriesService; - private RemoteDirectoryFactory remoteDirectoryFactory; + private RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory; @Before public void setup() { repositoriesServiceSupplier = mock(Supplier.class); repositoriesService = mock(RepositoriesService.class); when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); - remoteDirectoryFactory = new RemoteDirectoryFactory(repositoriesServiceSupplier); + remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier); } public void testNewDirectory() throws IOException { - Settings settings = Settings.builder().build(); + Settings settings = Settings.builder().put(IndexMetadata.SETTING_INDEX_UUID, "uuid_1").build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); Path tempDir = createTempDir().resolve(indexSettings.getUUID()).resolve("0"); ShardPath shardPath = new ShardPath(false, tempDir, tempDir, new ShardId(indexSettings.getIndex(), 0)); @@ -57,20 +60,21 @@ public void testNewDirectory() throws IOException { BlobStore blobStore = mock(BlobStore.class); BlobContainer blobContainer = mock(BlobContainer.class); when(repository.blobStore()).thenReturn(blobStore); + when(repository.basePath()).thenReturn(new BlobPath().add("base_path")); when(blobStore.blobContainer(any())).thenReturn(blobContainer); when(blobContainer.listBlobs()).thenReturn(Collections.emptyMap()); when(repositoriesService.repository("remote_store_repository")).thenReturn(repository); - try (Directory directory = remoteDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)) { - assertTrue(directory instanceof RemoteDirectory); + try (Directory directory = remoteSegmentStoreDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)) { + assertTrue(directory instanceof RemoteSegmentStoreDirectory); ArgumentCaptor blobPathCaptor = ArgumentCaptor.forClass(BlobPath.class); - verify(blobStore).blobContainer(blobPathCaptor.capture()); - BlobPath blobPath = blobPathCaptor.getValue(); - assertEquals("foo/0/", blobPath.buildAsString()); + verify(blobStore, times(2)).blobContainer(blobPathCaptor.capture()); + List blobPaths = blobPathCaptor.getAllValues(); + assertEquals("base_path/uuid_1/0/segments/data/", blobPaths.get(0).buildAsString()); + assertEquals("base_path/uuid_1/0/segments/metadata/", blobPaths.get(1).buildAsString()); - directory.listAll(); - verify(blobContainer).listBlobs(); + verify(blobContainer).listBlobsByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX); verify(repositoriesService).repository("remote_store_repository"); } } @@ -85,7 +89,7 @@ public void testNewDirectoryRepositoryDoesNotExist() { assertThrows( IllegalArgumentException.class, - () -> remoteDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath) + () -> remoteSegmentStoreDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath) ); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 4eabfa74625f2..96f14616fb54b 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -15,6 +15,7 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.tests.util.LuceneTestCase; import org.junit.Before; +import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Set; import org.opensearch.test.OpenSearchTestCase; @@ -129,26 +130,52 @@ public void testInitNoMetadataFile() throws IOException { private Map getDummyMetadata(String prefix, int commitGeneration) { Map metadata = new HashMap<>(); - metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__qrt::" + randomIntBetween(1000, 5000)); - metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__zxd::" + randomIntBetween(1000, 5000)); - metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__yui::" + randomIntBetween(1000, 5000)); + + metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); + metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); + metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); metadata.put( "segments_" + commitGeneration, - "segments_" + commitGeneration + "::segments_" + commitGeneration + "__exv::" + randomIntBetween(1000, 5000) + "segments_" + + commitGeneration + + "::segments_" + + commitGeneration + + "__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) ); return metadata; } - private void populateMetadata() throws IOException { + private Map> populateMetadata() throws IOException { List metadataFiles = List.of("metadata__1__5__abc", "metadata__1__6__pqr", "metadata__2__1__zxv"); when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( metadataFiles ); - IndexInput indexInput = mock(IndexInput.class); - Map dummyMetadata = getDummyMetadata("_0", 1); - when(indexInput.readMapOfStrings()).thenReturn(dummyMetadata); - when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn(indexInput); + Map> metadataFilenameContentMapping = Map.of( + "metadata__1__5__abc", + getDummyMetadata("_0", 1), + "metadata__1__6__pqr", + getDummyMetadata("_0", 1), + "metadata__2__1__zxv", + getDummyMetadata("_0", 1) + ); + + IndexInput indexInput1 = mock(IndexInput.class); + when(indexInput1.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__5__abc")); + when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(indexInput1); + + IndexInput indexInput2 = mock(IndexInput.class); + when(indexInput2.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__6__pqr")); + when(remoteMetadataDirectory.openInput("metadata__1__6__pqr", IOContext.DEFAULT)).thenReturn(indexInput2); + + IndexInput indexInput3 = mock(IndexInput.class); + when(indexInput3.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__2__1__zxv")); + when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn(indexInput3); + + return metadataFilenameContentMapping; } public void testInit() throws IOException { @@ -291,20 +318,39 @@ public void testCopyFromException() throws IOException { } public void testContainsFile() throws IOException { - populateMetadata(); + List metadataFiles = List.of("metadata__1__5__abc"); + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( + metadataFiles + ); + + Map metadata = new HashMap<>(); + metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234"); + metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345"); + + Map> metadataFilenameContentMapping = Map.of("metadata__1__5__abc", metadata); + + IndexInput indexInput1 = mock(IndexInput.class); + when(indexInput1.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__5__abc")); + when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(indexInput1); + remoteSegmentStoreDirectory.init(); - // This is not the correct way to add files but the other way is to open up access to fields in UploadedSegmentMetadata Map uploadedSegmentMetadataMap = remoteSegmentStoreDirectory .getSegmentsUploadedToRemoteStore(); - uploadedSegmentMetadataMap.put( - "_100.si", - new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234") + + assertThrows( + UnsupportedOperationException.class, + () -> uploadedSegmentMetadataMap.put( + "_100.si", + new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234") + ) ); - assertTrue(remoteSegmentStoreDirectory.containsFile("_100.si", "1234")); - assertFalse(remoteSegmentStoreDirectory.containsFile("_100.si", "2345")); - assertFalse(remoteSegmentStoreDirectory.containsFile("_200.si", "1234")); + assertTrue(remoteSegmentStoreDirectory.containsFile("_0.cfe", "1234")); + assertTrue(remoteSegmentStoreDirectory.containsFile("_0.cfs", "2345")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_0.cfe", "1234000")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_0.cfs", "2345000")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_0.si", "23")); } public void testUploadMetadataEmpty() throws IOException { @@ -336,4 +382,84 @@ public void testUploadMetadataNonEmpty() throws IOException { String metadataString = remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().get("_0.si").toString(); verify(indexOutput).writeMapOfStrings(Map.of("_0.si", metadataString)); } + + public void testDeleteStaleCommitsException() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenThrow( + new IOException("Error reading") + ); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.deleteStaleSegments(5)); + } + + public void testDeleteStaleCommitsWithinThreshold() throws IOException { + populateMetadata(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=5 here so that none of the metadata files will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(5); + + verify(remoteMetadataDirectory, times(0)).openInput(any(String.class), eq(IOContext.DEFAULT)); + } + + public void testDeleteStaleCommitsActualDelete() throws IOException { + Map> metadataFilenameContentMapping = populateMetadata(); + remoteSegmentStoreDirectory.init(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(2); + + for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + verify(remoteDataDirectory).deleteFile(uploadedFilename); + } + ; + verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc"); + } + + public void testDeleteStaleCommitsActualDeleteIOException() throws IOException { + Map> metadataFilenameContentMapping = populateMetadata(); + remoteSegmentStoreDirectory.init(); + + String segmentFileWithException = metadataFilenameContentMapping.get("metadata__1__5__abc") + .values() + .stream() + .findAny() + .get() + .split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + doThrow(new IOException("Error")).when(remoteDataDirectory).deleteFile(segmentFileWithException); + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(2); + + for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + verify(remoteDataDirectory).deleteFile(uploadedFilename); + } + ; + verify(remoteMetadataDirectory, times(0)).deleteFile("metadata__1__5__abc"); + } + + public void testDeleteStaleCommitsActualDeleteNoSuchFileException() throws IOException { + Map> metadataFilenameContentMapping = populateMetadata(); + remoteSegmentStoreDirectory.init(); + + String segmentFileWithException = metadataFilenameContentMapping.get("metadata__1__5__abc") + .values() + .stream() + .findAny() + .get() + .split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + doThrow(new NoSuchFileException(segmentFileWithException)).when(remoteDataDirectory).deleteFile(segmentFileWithException); + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(2); + + for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + verify(remoteDataDirectory).deleteFile(uploadedFilename); + } + ; + verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc"); + } } diff --git a/server/src/test/java/org/opensearch/index/store/StoreTests.java b/server/src/test/java/org/opensearch/index/store/StoreTests.java index b6bced9f038c0..89b11d604d7a1 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreTests.java +++ b/server/src/test/java/org/opensearch/index/store/StoreTests.java @@ -39,6 +39,7 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexNotFoundException; @@ -80,6 +81,7 @@ import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; @@ -93,6 +95,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.Iterator; @@ -121,6 +124,12 @@ public class StoreTests extends OpenSearchTestCase { "index", Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT).build() ); + + IndexSettings SEGMENT_REPLICATION_INDEX_SETTINGS = new IndexSettings( + INDEX_SETTINGS.getIndexMetadata(), + Settings.builder().put(INDEX_SETTINGS.getSettings()).put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build() + ); + private static final Version MIN_SUPPORTED_LUCENE_VERSION = org.opensearch.Version.CURRENT .minimumIndexCompatibilityVersion().luceneVersion; @@ -1150,12 +1159,113 @@ public void testGetMetadataWithSegmentInfos() throws IOException { store.close(); } - public void testcleanupAndPreserveLatestCommitPoint() throws IOException { + public void testCleanupAndPreserveLatestCommitPoint() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); - Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); + Store store = new Store( + shardId, + SEGMENT_REPLICATION_INDEX_SETTINGS, + StoreTests.newDirectory(random()), + new DummyShardLock(shardId) + ); + commitRandomDocs(store); + + Store.MetadataSnapshot commitMetadata = store.getMetadata(); + + // index more docs but only IW.flush, this will create additional files we'll clean up. + final IndexWriter writer = indexRandomDocs(store); + writer.flush(); + writer.close(); + + final List additionalSegments = new ArrayList<>(); + for (String file : store.directory().listAll()) { + if (commitMetadata.contains(file) == false) { + additionalSegments.add(file); + } + } + assertFalse(additionalSegments.isEmpty()); + + // clean up everything not in the latest commit point. + store.cleanupAndPreserveLatestCommitPoint("test", store.readLastCommittedSegmentsInfo()); + + // we want to ensure commitMetadata files are preserved after calling cleanup + for (String existingFile : store.directory().listAll()) { + assertTrue(commitMetadata.contains(existingFile)); + assertFalse(additionalSegments.contains(existingFile)); + } + deleteContent(store.directory()); + IOUtils.close(store); + } + + public void testGetSegmentMetadataMap() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store( + shardId, + SEGMENT_REPLICATION_INDEX_SETTINGS, + new NIOFSDirectory(createTempDir()), + new DummyShardLock(shardId) + ); + store.createEmpty(Version.LATEST); + final Map metadataSnapshot = store.getSegmentMetadataMap(store.readLastCommittedSegmentsInfo()); + // no docs indexed only _N file exists. + assertTrue(metadataSnapshot.isEmpty()); + + // commit some docs to create a commit point. + commitRandomDocs(store); + + final Map snapshotAfterCommit = store.getSegmentMetadataMap(store.readLastCommittedSegmentsInfo()); + assertFalse(snapshotAfterCommit.isEmpty()); + assertFalse(snapshotAfterCommit.keySet().stream().anyMatch((name) -> name.startsWith(IndexFileNames.SEGMENTS))); + store.close(); + } + + public void testSegmentReplicationDiff() { + final String segmentName = "_0.si"; + final StoreFileMetadata SEGMENT_FILE = new StoreFileMetadata(segmentName, 1L, "0", Version.LATEST); + // source has file target is missing. + Store.RecoveryDiff diff = Store.segmentReplicationDiff(Map.of(segmentName, SEGMENT_FILE), Collections.emptyMap()); + assertEquals(List.of(SEGMENT_FILE), diff.missing); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.identical.isEmpty()); + + // target has file not on source. + diff = Store.segmentReplicationDiff(Collections.emptyMap(), Map.of(segmentName, SEGMENT_FILE)); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.identical.isEmpty()); + + // source and target have identical file. + diff = Store.segmentReplicationDiff(Map.of(segmentName, SEGMENT_FILE), Map.of(segmentName, SEGMENT_FILE)); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + assertEquals(List.of(SEGMENT_FILE), diff.identical); + + // source has diff copy of same file as target. + StoreFileMetadata SOURCE_DIFF_FILE = new StoreFileMetadata(segmentName, 1L, "abc", Version.LATEST); + diff = Store.segmentReplicationDiff(Map.of(segmentName, SOURCE_DIFF_FILE), Map.of(segmentName, SEGMENT_FILE)); + assertTrue(diff.missing.isEmpty()); + assertEquals(List.of(SOURCE_DIFF_FILE), diff.different); + assertTrue(diff.identical.isEmpty()); + + // ignore _N files if included in source map. + final String segmentsFile = IndexFileNames.SEGMENTS.concat("_2"); + StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(segmentsFile, 1L, "abc", Version.LATEST); + diff = Store.segmentReplicationDiff(Map.of(segmentsFile, SEGMENTS_FILE), Collections.emptyMap()); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.identical.isEmpty()); + } + + private void commitRandomDocs(Store store) throws IOException { + IndexWriter writer = indexRandomDocs(store); + writer.commit(); + writer.close(); + } + + private IndexWriter indexRandomDocs(Store store) throws IOException { IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec( TestUtil.getDefaultCodec() ); + indexWriterConfig.setCommitOnClose(false); indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig); int docs = 1 + random().nextInt(100); @@ -1171,21 +1281,6 @@ public void testcleanupAndPreserveLatestCommitPoint() throws IOException { ); doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); writer.addDocument(doc); - writer.commit(); - writer.close(); - - Store.MetadataSnapshot commitMetadata = store.getMetadata(); - - Store.MetadataSnapshot refreshMetadata = Store.MetadataSnapshot.EMPTY; - - store.cleanupAndPreserveLatestCommitPoint("test", refreshMetadata); - - // we want to ensure commitMetadata files are preserved after calling cleanup - for (String existingFile : store.directory().listAll()) { - assert (commitMetadata.contains(existingFile) == true); - } - - deleteContent(store.directory()); - IOUtils.close(store); + return writer; } } diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 1f2360abde2ad..22481b5a7b99f 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -66,6 +66,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; @@ -572,6 +573,7 @@ private IndicesClusterStateService createIndicesClusterStateService( threadPool, SegmentReplicationCheckpointPublisher.EMPTY, SegmentReplicationTargetService.NO_OP, + SegmentReplicationSourceService.NO_OP, recoveryTargetService, shardStateAction, null, diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 3ea74dbf38919..cc5100fba9010 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -61,6 +61,7 @@ import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.InternalEngineTests; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.replication.RecoveryDuringReplicationTests; @@ -106,7 +107,7 @@ public void testTranslogHistoryTransferred() throws Exception { public void testWithSegmentReplication_ReplicaUsesPrimaryTranslogUUID() throws Exception { Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); - try (ReplicationGroup shards = createGroup(2, settings)) { + try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { shards.startAll(); final String expectedUUID = getTranslog(shards.getPrimary()).getTranslogUUID(); assertTrue( diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index 38c55620e1223..bd3106454f49b 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -11,26 +11,32 @@ import org.junit.Assert; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexService; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.shard.ShardId; -import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; -import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; @@ -51,15 +57,18 @@ public class OngoingSegmentReplicationsTests extends IndexShardTestCase { private GetSegmentFilesRequest getSegmentFilesRequest; - final Settings settings = Settings.builder().put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()).build(); + final Settings settings = Settings.builder() + .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); @Override public void setUp() throws Exception { super.setUp(); - primary = newStartedShard(true); - replica = newShard(primary.shardId(), false); + primary = newStartedShard(true, settings); + replica = newShard(false, settings, new NRTReplicationEngineFactory()); recoverReplica(replica, primary, true); replicaDiscoveryNode = replica.recoveryState().getTargetNode(); primaryDiscoveryNode = replica.recoveryState().getSourceNode(); @@ -89,6 +98,8 @@ public void tearDown() throws Exception { } public void testPrepareAndSendSegments() throws IOException { + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); OngoingSegmentReplications replications = spy(new OngoingSegmentReplications(mockIndicesService, recoverySettings)); final CheckpointInfoRequest request = new CheckpointInfoRequest( 1L, @@ -108,17 +119,14 @@ public void testPrepareAndSendSegments() throws IOException { 1L, replica.routingEntry().allocationId().getId(), replicaDiscoveryNode, - new ArrayList<>(copyState.getMetadataSnapshot().asMap().values()), + new ArrayList<>(copyState.getMetadataMap().values()), testCheckpoint ); - final Collection expectedFiles = List.copyOf(primary.store().getMetadata().asMap().values()); replications.startSegmentCopy(getSegmentFilesRequest, new ActionListener<>() { @Override public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { - assertEquals(1, getSegmentFilesResponse.files.size()); - assertEquals(1, expectedFiles.size()); - assertTrue(expectedFiles.stream().findFirst().get().isSame(getSegmentFilesResponse.files.get(0))); + assertEquals(copyState.getMetadataMap().size(), getSegmentFilesResponse.files.size()); assertEquals(0, copyState.refCount()); assertFalse(replications.isInCopyStateMap(request.getCheckpoint())); assertEquals(0, replications.size()); @@ -154,6 +162,51 @@ public void testCancelReplication() throws IOException { assertEquals(0, replications.cachedCopyStateSize()); } + public void testCancelReplication_AfterSendFilesStarts() throws IOException, InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); + // add a doc and refresh so primary has more than one segment. + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); + final CheckpointInfoRequest request = new CheckpointInfoRequest( + 1L, + replica.routingEntry().allocationId().getId(), + primaryDiscoveryNode, + testCheckpoint + ); + final FileChunkWriter segmentSegmentFileChunkWriter = (fileMetadata, position, content, lastChunk, totalTranslogOps, listener) -> { + // cancel the replication as soon as the writer starts sending files. + replications.cancel(replica.routingEntry().allocationId().getId(), "Test"); + }; + final CopyState copyState = replications.prepareForReplication(request, segmentSegmentFileChunkWriter); + assertEquals(1, replications.size()); + assertEquals(1, replications.cachedCopyStateSize()); + getSegmentFilesRequest = new GetSegmentFilesRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + new ArrayList<>(copyState.getMetadataMap().values()), + testCheckpoint + ); + replications.startSegmentCopy(getSegmentFilesRequest, new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("Expected onFailure to be invoked."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(CancellableThreads.ExecutionCancelledException.class, e.getClass()); + assertEquals(0, copyState.refCount()); + assertEquals(0, replications.size()); + assertEquals(0, replications.cachedCopyStateSize()); + latch.countDown(); + } + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved with failure", 0, latch.getCount()); + } + public void testMultipleReplicasUseSameCheckpoint() throws IOException { IndexShard secondReplica = newShard(primary.shardId(), false); recoverReplica(secondReplica, primary, true); diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index 6bce74be569c3..323445bee1274 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -9,12 +9,14 @@ package org.opensearch.indices.replication; import org.apache.lucene.util.Version; +import org.junit.Assert; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -28,6 +30,8 @@ import java.util.Arrays; import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; @@ -126,6 +130,39 @@ public void testGetSegmentFiles() { assertTrue(capturedRequest.request instanceof GetSegmentFilesRequest); } + public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + SEQ_NO, + VERSION + ); + StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Arrays.asList(testMetadata), + mock(Store.class), + new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("onFailure response expected."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(e.getClass(), CancellableThreads.ExecutionCancelledException.class); + latch.countDown(); + } + } + ); + replicationSource.cancel(); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved in a failure", 0, latch.getCount()); + } + private DiscoveryNode newDiscoveryNode(String nodeName) { return new DiscoveryNode( nodeName, diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index 2c52772649acc..cde5cd980a91d 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -18,6 +18,8 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.StoreFileMetadata; @@ -28,8 +30,12 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; public class SegmentReplicationSourceHandlerTests extends IndexShardTestCase { @@ -71,7 +77,7 @@ public void testSendFiles() throws IOException { 1 ); - final List expectedFiles = List.copyOf(copyState.getMetadataSnapshot().asMap().values()); + final List expectedFiles = List.copyOf(copyState.getMetadataMap().values()); final GetSegmentFilesRequest getSegmentFilesRequest = new GetSegmentFilesRequest( 1L, @@ -132,6 +138,9 @@ public void onFailure(Exception e) { } public void testSendFileFails() throws IOException { + // index some docs on the primary so a segment is created. + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); chunkWriter = (fileMetadata, position, content, lastChunk, totalTranslogOps, listener) -> listener.onFailure( new OpenSearchException("Test") ); @@ -148,7 +157,7 @@ public void testSendFileFails() throws IOException { 1 ); - final List expectedFiles = List.copyOf(copyState.getMetadataSnapshot().asMap().values()); + final List expectedFiles = List.copyOf(copyState.getMetadataMap().values()); final GetSegmentFilesRequest getSegmentFilesRequest = new GetSegmentFilesRequest( 1L, @@ -197,4 +206,48 @@ public void testReplicationAlreadyRunning() throws IOException { handler.sendFiles(getSegmentFilesRequest, mock(ActionListener.class)); Assert.assertThrows(OpenSearchException.class, () -> { handler.sendFiles(getSegmentFilesRequest, mock(ActionListener.class)); }); } + + public void testCancelReplication() throws IOException, InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + chunkWriter = mock(FileChunkWriter.class); + + final ReplicationCheckpoint latestReplicationCheckpoint = primary.getLatestReplicationCheckpoint(); + final CopyState copyState = new CopyState(latestReplicationCheckpoint, primary); + SegmentReplicationSourceHandler handler = new SegmentReplicationSourceHandler( + localNode, + chunkWriter, + threadPool, + copyState, + primary.routingEntry().allocationId().getId(), + 5000, + 1 + ); + + final GetSegmentFilesRequest getSegmentFilesRequest = new GetSegmentFilesRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + Collections.emptyList(), + latestReplicationCheckpoint + ); + + // cancel before xfer starts. Cancels during copy will be tested in SegmentFileTransferHandlerTests, that uses the same + // cancellableThreads. + handler.cancel("test"); + handler.sendFiles(getSegmentFilesRequest, new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("Expected failure."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(CancellableThreads.ExecutionCancelledException.class, e.getClass()); + latch.countDown(); + } + }); + latch.await(2, TimeUnit.SECONDS); + verify(chunkWriter, times(1)).cancel(); + assertEquals("listener should have resolved with failure", 0, latch.getCount()); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java index 4bfdd81d50a1e..6183f1e5d9dfb 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -121,9 +121,7 @@ public void testCheckpointInfo() { public void onResponse(CheckpointInfoResponse response) { assertEquals(testCheckpoint, response.getCheckpoint()); assertNotNull(response.getInfosBytes()); - // CopyStateTests sets up one pending delete file and one committed segments file - assertEquals(1, response.getPendingDeleteFiles().size()); - assertEquals(1, response.getSnapshot().size()); + assertEquals(1, response.getMetadataMap().size()); } @Override diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index d3a6d1a97dacc..7437cb22e44d1 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -9,25 +9,28 @@ package org.opensearch.indices.replication; import org.junit.Assert; -import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; -import org.opensearch.indices.replication.common.ReplicationLuceneIndex; -import org.opensearch.transport.TransportService; +import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.doAnswer; @@ -35,12 +38,13 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.eq; +import static org.opensearch.indices.replication.SegmentReplicationState.Stage.CANCELLED; public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { - private IndexShard indexShard; + private IndexShard replicaShard; + private IndexShard primaryShard; private ReplicationCheckpoint checkpoint; private SegmentReplicationSource replicationSource; private SegmentReplicationTargetService sut; @@ -48,24 +52,26 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { private ReplicationCheckpoint initialCheckpoint; private ReplicationCheckpoint aheadCheckpoint; + private ReplicationCheckpoint newPrimaryCheckpoint; + @Override public void setUp() throws Exception { super.setUp(); final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); - final TransportService transportService = mock(TransportService.class); - indexShard = newStartedShard(false, settings); - checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 0L, 0L, 0L, 0L); + primaryShard = newStartedShard(true, settings); + replicaShard = newShard(false, settings, new NRTReplicationEngineFactory()); + recoverReplica(replicaShard, primaryShard, true); + checkpoint = new ReplicationCheckpoint(replicaShard.shardId(), 0L, 0L, 0L, 0L); SegmentReplicationSourceFactory replicationSourceFactory = mock(SegmentReplicationSourceFactory.class); replicationSource = mock(SegmentReplicationSource.class); - when(replicationSourceFactory.get(indexShard)).thenReturn(replicationSource); + when(replicationSourceFactory.get(replicaShard)).thenReturn(replicationSource); - sut = new SegmentReplicationTargetService(threadPool, recoverySettings, transportService, replicationSourceFactory); - initialCheckpoint = indexShard.getLatestReplicationCheckpoint(); + sut = prepareForReplication(primaryShard); + initialCheckpoint = replicaShard.getLatestReplicationCheckpoint(); aheadCheckpoint = new ReplicationCheckpoint( initialCheckpoint.getShardId(), initialCheckpoint.getPrimaryTerm(), @@ -73,48 +79,69 @@ public void setUp() throws Exception { initialCheckpoint.getSeqNo(), initialCheckpoint.getSegmentInfosVersion() + 1 ); + newPrimaryCheckpoint = new ReplicationCheckpoint( + initialCheckpoint.getShardId(), + initialCheckpoint.getPrimaryTerm() + 1, + initialCheckpoint.getSegmentsGen(), + initialCheckpoint.getSeqNo(), + initialCheckpoint.getSegmentInfosVersion() + 1 + ); } @Override public void tearDown() throws Exception { - closeShards(indexShard); + closeShards(primaryShard, replicaShard); super.tearDown(); } - public void testTargetReturnsSuccess_listenerCompletes() { - final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, - indexShard, - replicationSource, - new SegmentReplicationTargetService.SegmentReplicationListener() { - @Override - public void onReplicationDone(SegmentReplicationState state) { - assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); - } + public void testsSuccessfulReplication_listenerCompletes() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + sut.startReplication(checkpoint, replicaShard, new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); + latch.countDown(); + } - @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - Assert.fail(); - } + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + logger.error("Unexpected error", e); + Assert.fail("Test should succeed"); } - ); - final SegmentReplicationTarget spy = Mockito.spy(target); - doAnswer(invocation -> { - // set up stage correctly so the transition in markAsDone succeeds on listener completion - moveTargetToFinalStage(target); - final ActionListener listener = invocation.getArgument(0); - listener.onResponse(null); - return null; - }).when(spy).startReplication(any()); - sut.startReplication(spy); + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals(0, latch.getCount()); } - public void testTargetThrowsException() { + public void testReplicationFails() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); final OpenSearchException expectedError = new OpenSearchException("Fail"); + SegmentReplicationSource source = new SegmentReplicationSource() { + + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onFailure(expectedError); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + Assert.fail("Should not be called"); + } + }; final SegmentReplicationTarget target = new SegmentReplicationTarget( checkpoint, - indexShard, - replicationSource, + replicaShard, + source, new SegmentReplicationTargetService.SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { @@ -123,24 +150,21 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); + // failures leave state object in last entered stage. + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); assertEquals(expectedError, e.getCause()); - assertTrue(sendShardFailure); + latch.countDown(); } } ); - final SegmentReplicationTarget spy = Mockito.spy(target); - doAnswer(invocation -> { - final ActionListener listener = invocation.getArgument(0); - listener.onFailure(expectedError); - return null; - }).when(spy).startReplication(any()); - sut.startReplication(spy); + sut.startReplication(target); + latch.await(2, TimeUnit.SECONDS); + assertEquals(0, latch.getCount()); } public void testAlreadyOnNewCheckpoint() { SegmentReplicationTargetService spy = spy(sut); - spy.onNewCheckpoint(indexShard.getLatestReplicationCheckpoint(), indexShard); + spy.onNewCheckpoint(replicaShard.getLatestReplicationCheckpoint(), replicaShard); verify(spy, times(0)).startReplication(any(), any(), any()); } @@ -148,8 +172,8 @@ public void testShardAlreadyReplicating() throws InterruptedException { // Create a spy of Target Service so that we can verify invocation of startReplication call with specific checkpoint on it. SegmentReplicationTargetService serviceSpy = spy(sut); final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, - indexShard, + initialCheckpoint, + replicaShard, replicationSource, mock(SegmentReplicationTargetService.SegmentReplicationListener.class) ); @@ -161,7 +185,7 @@ public void testShardAlreadyReplicating() throws InterruptedException { doAnswer(invocation -> { final ActionListener listener = invocation.getArgument(0); // a new checkpoint arrives before we've completed. - serviceSpy.onNewCheckpoint(aheadCheckpoint, indexShard); + serviceSpy.onNewCheckpoint(aheadCheckpoint, replicaShard); listener.onResponse(null); latch.countDown(); return null; @@ -173,12 +197,51 @@ public void testShardAlreadyReplicating() throws InterruptedException { // wait for the new checkpoint to arrive, before the listener completes. latch.await(30, TimeUnit.SECONDS); - verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(indexShard), any()); + verify(targetSpy, times(0)).cancel(any()); + verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(replicaShard), any()); + } + + public void testOnNewCheckpointFromNewPrimaryCancelOngoingReplication() throws IOException, InterruptedException { + // Create a spy of Target Service so that we can verify invocation of startReplication call with specific checkpoint on it. + SegmentReplicationTargetService serviceSpy = spy(sut); + // Create a Mockito spy of target to stub response of few method calls. + final SegmentReplicationTarget targetSpy = spy( + new SegmentReplicationTarget( + initialCheckpoint, + replicaShard, + replicationSource, + mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + ) + ); + + CountDownLatch latch = new CountDownLatch(1); + // Mocking response when startReplication is called on targetSpy we send a new checkpoint to serviceSpy and later reduce countdown + // of latch. + doAnswer(invocation -> { + // short circuit loop on new checkpoint request + doReturn(null).when(serviceSpy).startReplication(eq(newPrimaryCheckpoint), eq(replicaShard), any()); + // a new checkpoint arrives before we've completed. + serviceSpy.onNewCheckpoint(newPrimaryCheckpoint, replicaShard); + try { + invocation.callRealMethod(); + } catch (CancellableThreads.ExecutionCancelledException e) { + latch.countDown(); + } + return null; + }).when(targetSpy).startReplication(any()); + + // start replication. This adds the target to on-ongoing replication collection + serviceSpy.startReplication(targetSpy); + latch.await(); + // wait for the new checkpoint to arrive, before the listener completes. + assertEquals(CANCELLED, targetSpy.state().getStage()); + verify(targetSpy, times(1)).cancel("Cancelling stuck target after new primary"); + verify(serviceSpy, times(1)).startReplication(eq(newPrimaryCheckpoint), eq(replicaShard), any()); } public void testNewCheckpointBehindCurrentCheckpoint() { SegmentReplicationTargetService spy = spy(sut); - spy.onNewCheckpoint(checkpoint, indexShard); + spy.onNewCheckpoint(checkpoint, replicaShard); verify(spy, times(0)).startReplication(any(), any(), any()); } @@ -190,22 +253,6 @@ public void testShardNotStarted() throws IOException { closeShards(shard); } - public void testNewCheckpoint_validationPassesAndReplicationFails() throws IOException { - allowShardFailures(); - SegmentReplicationTargetService spy = spy(sut); - IndexShard spyShard = spy(indexShard); - ArgumentCaptor captor = ArgumentCaptor.forClass( - SegmentReplicationTargetService.SegmentReplicationListener.class - ); - doNothing().when(spy).startReplication(any(), any(), any()); - spy.onNewCheckpoint(aheadCheckpoint, spyShard); - verify(spy, times(1)).startReplication(any(), any(), captor.capture()); - SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); - listener.onFailure(new SegmentReplicationState(new ReplicationLuceneIndex()), new OpenSearchException("testing"), true); - verify(spyShard).failShard(any(), any()); - closeShard(indexShard, false); - } - /** * here we are starting a new shard in PrimaryMode and testing that we don't process a checkpoint on shard when it is in PrimaryMode. */ @@ -215,71 +262,10 @@ public void testRejectCheckpointOnShardPrimaryMode() throws IOException { // Starting a new shard in PrimaryMode. IndexShard primaryShard = newStartedShard(true); IndexShard spyShard = spy(primaryShard); - doNothing().when(spy).startReplication(any(), any(), any()); spy.onNewCheckpoint(aheadCheckpoint, spyShard); // Verify that checkpoint is not processed as shard is in PrimaryMode. verify(spy, times(0)).startReplication(any(), any(), any()); closeShards(primaryShard); } - - public void testReplicationOnDone() throws IOException { - SegmentReplicationTargetService spy = spy(sut); - IndexShard spyShard = spy(indexShard); - ReplicationCheckpoint cp = indexShard.getLatestReplicationCheckpoint(); - ReplicationCheckpoint newCheckpoint = new ReplicationCheckpoint( - cp.getShardId(), - cp.getPrimaryTerm(), - cp.getSegmentsGen(), - cp.getSeqNo(), - cp.getSegmentInfosVersion() + 1 - ); - ReplicationCheckpoint anotherNewCheckpoint = new ReplicationCheckpoint( - cp.getShardId(), - cp.getPrimaryTerm(), - cp.getSegmentsGen(), - cp.getSeqNo(), - cp.getSegmentInfosVersion() + 2 - ); - ArgumentCaptor captor = ArgumentCaptor.forClass( - SegmentReplicationTargetService.SegmentReplicationListener.class - ); - doNothing().when(spy).startReplication(any(), any(), any()); - spy.onNewCheckpoint(newCheckpoint, spyShard); - spy.onNewCheckpoint(anotherNewCheckpoint, spyShard); - verify(spy, times(1)).startReplication(eq(newCheckpoint), any(), captor.capture()); - verify(spy, times(1)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); - SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); - listener.onDone(new SegmentReplicationState(new ReplicationLuceneIndex())); - doNothing().when(spy).onNewCheckpoint(any(), any()); - verify(spy, timeout(0).times(2)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); - closeShard(indexShard, false); - - } - - public void testBeforeIndexShardClosed_CancelsOngoingReplications() { - final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, - indexShard, - replicationSource, - mock(SegmentReplicationTargetService.SegmentReplicationListener.class) - ); - final SegmentReplicationTarget spy = Mockito.spy(target); - sut.startReplication(spy); - sut.beforeIndexShardClosed(indexShard.shardId(), indexShard, Settings.EMPTY); - verify(spy, times(1)).cancel(any()); - } - - /** - * Move the {@link SegmentReplicationTarget} object through its {@link SegmentReplicationState.Stage} values in order - * until the final, non-terminal stage. - */ - private void moveTargetToFinalStage(SegmentReplicationTarget target) { - SegmentReplicationState.Stage[] stageValues = SegmentReplicationState.Stage.values(); - assertEquals(target.state().getStage(), SegmentReplicationState.Stage.INIT); - // Skip the first two stages (DONE and INIT) and iterate until the last value - for (int i = 2; i < stageValues.length; i++) { - target.state().setStage(stageValues[i]); - } - } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 11217a46b3c69..f8341573770a6 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -18,7 +18,6 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexFormatTooNewException; -import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.store.ByteBuffersDataOutput; import org.apache.lucene.store.ByteBuffersIndexOutput; import org.apache.lucene.store.Directory; @@ -51,7 +50,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.Random; import java.util.Arrays; @@ -71,26 +69,13 @@ public class SegmentReplicationTargetTests extends IndexShardTestCase { private ReplicationCheckpoint repCheckpoint; private ByteBuffersDataOutput buffer; - private static final StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(IndexFileNames.SEGMENTS, 1L, "0", Version.LATEST); - private static final StoreFileMetadata SEGMENTS_FILE_DIFF = new StoreFileMetadata( - IndexFileNames.SEGMENTS, - 5L, - "different", - Version.LATEST - ); - private static final StoreFileMetadata PENDING_DELETE_FILE = new StoreFileMetadata("pendingDelete.del", 1L, "1", Version.LATEST); + private static final String SEGMENT_NAME = "_0.si"; + private static final StoreFileMetadata SEGMENT_FILE = new StoreFileMetadata(SEGMENT_NAME, 1L, "0", Version.LATEST); + private static final StoreFileMetadata SEGMENT_FILE_DIFF = new StoreFileMetadata(SEGMENT_NAME, 5L, "different", Version.LATEST); - private static final Store.MetadataSnapshot SI_SNAPSHOT = new Store.MetadataSnapshot( - Map.of(SEGMENTS_FILE.name(), SEGMENTS_FILE), - null, - 0 - ); + private static final Map SI_SNAPSHOT = Map.of(SEGMENT_FILE.name(), SEGMENT_FILE); - private static final Store.MetadataSnapshot SI_SNAPSHOT_DIFFERENT = new Store.MetadataSnapshot( - Map.of(SEGMENTS_FILE_DIFF.name(), SEGMENTS_FILE_DIFF), - null, - 0 - ); + private static final Map SI_SNAPSHOT_DIFFERENT = Map.of(SEGMENT_FILE_DIFF.name(), SEGMENT_FILE_DIFF); private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( "index", @@ -135,7 +120,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -146,9 +131,8 @@ public void getSegmentFiles( Store store, ActionListener listener ) { - assertEquals(filesToFetch.size(), 2); - assert (filesToFetch.contains(SEGMENTS_FILE)); - assert (filesToFetch.contains(PENDING_DELETE_FILE)); + assertEquals(1, filesToFetch.size()); + assert (filesToFetch.contains(SEGMENT_FILE)); listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; @@ -230,7 +214,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -273,7 +257,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -318,7 +302,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -362,7 +346,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -380,7 +364,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener.class ); segrepTarget = spy(new SegmentReplicationTarget(repCheckpoint, indexShard, segrepSource, segRepListener)); - when(segrepTarget.getMetadataSnapshot()).thenReturn(SI_SNAPSHOT_DIFFERENT); + when(segrepTarget.getMetadataMap()).thenReturn(SI_SNAPSHOT_DIFFERENT); segrepTarget.startReplication(new ActionListener() { @Override public void onResponse(Void replicationResponse) { @@ -413,9 +397,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse( - new CheckpointInfoResponse(checkpoint, storeMetadataSnapshots.get(1), buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE)) - ); + listener.onResponse(new CheckpointInfoResponse(checkpoint, storeMetadataSnapshots.get(1).asMap(), buffer.toArrayCopy())); } @Override @@ -434,7 +416,7 @@ public void getSegmentFiles( ); segrepTarget = spy(new SegmentReplicationTarget(repCheckpoint, indexShard, segrepSource, segRepListener)); - when(segrepTarget.getMetadataSnapshot()).thenReturn(storeMetadataSnapshots.get(0)); + when(segrepTarget.getMetadataMap()).thenReturn(storeMetadataSnapshots.get(0).asMap()); segrepTarget.startReplication(new ActionListener() { @Override public void onResponse(Void replicationResponse) { diff --git a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java index a6f0cf7e98411..77a4a6d22039e 100644 --- a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java @@ -22,7 +22,6 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.io.IOException; -import java.util.Set; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -32,6 +31,7 @@ public class CopyStateTests extends IndexShardTestCase { private static final long EXPECTED_LONG_VALUE = 1L; private static final ShardId TEST_SHARD_ID = new ShardId("testIndex", "testUUID", 0); private static final StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(IndexFileNames.SEGMENTS, 1L, "0", Version.LATEST); + private static final StoreFileMetadata SEGMENT_FILE = new StoreFileMetadata("_0.si", 1L, "0", Version.LATEST); private static final StoreFileMetadata PENDING_DELETE_FILE = new StoreFileMetadata("pendingDelete.del", 1L, "1", Version.LATEST); private static final Store.MetadataSnapshot COMMIT_SNAPSHOT = new Store.MetadataSnapshot( @@ -41,7 +41,7 @@ public class CopyStateTests extends IndexShardTestCase { ); private static final Store.MetadataSnapshot SI_SNAPSHOT = new Store.MetadataSnapshot( - Map.of(SEGMENTS_FILE.name(), SEGMENTS_FILE), + Map.of(SEGMENT_FILE.name(), SEGMENT_FILE), null, 0 ); @@ -61,10 +61,6 @@ public void testCopyStateCreation() throws IOException { // version was never set so this should be zero assertEquals(0, checkpoint.getSegmentInfosVersion()); assertEquals(EXPECTED_LONG_VALUE, checkpoint.getPrimaryTerm()); - - Set pendingDeleteFiles = copyState.getPendingDeleteFiles(); - assertEquals(1, pendingDeleteFiles.size()); - assertTrue(pendingDeleteFiles.contains(PENDING_DELETE_FILE)); } public static IndexShard createMockIndexShard() throws IOException { @@ -78,7 +74,7 @@ public static IndexShard createMockIndexShard() throws IOException { SegmentInfos testSegmentInfos = new SegmentInfos(Version.LATEST.major); when(mockShard.getSegmentInfosSnapshot()).thenReturn(new GatedCloseable<>(testSegmentInfos, () -> {})); - when(mockStore.getMetadata(testSegmentInfos)).thenReturn(SI_SNAPSHOT); + when(mockStore.getSegmentMetadataMap(testSegmentInfos)).thenReturn(SI_SNAPSHOT.asMap()); IndexCommit mockIndexCommit = mock(IndexCommit.class); when(mockShard.acquireLastIndexCommit(false)).thenReturn(new GatedCloseable<>(mockIndexCommit, () -> {})); diff --git a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index 7587f48503625..1789dd3b2a288 100644 --- a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -105,7 +105,25 @@ public void onFailure(ReplicationState state, OpenSearchException e, boolean sen collection.cancel(recoveryId, "meh"); } } + } + public void testMultiReplicationsForSingleShard() throws Exception { + try (ReplicationGroup shards = createGroup(0)) { + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); + final IndexShard shard1 = shards.addReplica(); + final IndexShard shard2 = shards.addReplica(); + final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shard1); + final long recoveryId2 = startRecovery(collection, shards.getPrimaryNode(), shard2); + try { + collection.getOngoingReplicationTarget(shard1.shardId()); + } catch (AssertionError e) { + assertEquals(e.getMessage(), "More than one on-going replication targets"); + } finally { + collection.cancel(recoveryId, "meh"); + collection.cancel(recoveryId2, "meh"); + } + closeShards(shard1, shard2); + } } public void testRecoveryCancellation() throws Exception { diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java new file mode 100644 index 0000000000000..25b0ba0f273be --- /dev/null +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.junit.Before; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.test.rest.RestActionTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class RestDecommissionActionTests extends RestActionTestCase { + + private RestDecommissionAction action; + + @Before + public void setupAction() { + action = new RestDecommissionAction(); + controller().registerHandler(action); + } + + public void testCreateRequest() throws IOException { + Map params = new HashMap<>(); + params.put("awareness_attribute_name", "zone"); + params.put("awareness_attribute_value", "zone-1"); + params.put("timeout", "10s"); + + RestRequest deprecatedRequest = buildRestRequest(params); + + DecommissionRequest request = action.createRequest(deprecatedRequest); + assertEquals(request.getDecommissionAttribute().attributeName(), "zone"); + assertEquals(request.getDecommissionAttribute().attributeValue(), "zone-1"); + assertEquals(request.getTimeout(), TimeValue.timeValueSeconds(10L)); + assertEquals(deprecatedRequest.getHttpRequest().method(), RestRequest.Method.PUT); + } + + public void testCreateRequestWithDefaultTimeout() throws IOException { + Map params = new HashMap<>(); + params.put("awareness_attribute_name", "zone"); + params.put("awareness_attribute_value", "zone-1"); + + RestRequest deprecatedRequest = buildRestRequest(params); + + DecommissionRequest request = action.createRequest(deprecatedRequest); + assertEquals(request.getDecommissionAttribute().attributeName(), "zone"); + assertEquals(request.getDecommissionAttribute().attributeValue(), "zone-1"); + assertEquals(request.getTimeout(), TimeValue.timeValueSeconds(300L)); + assertEquals(deprecatedRequest.getHttpRequest().method(), RestRequest.Method.PUT); + } + + private FakeRestRequest buildRestRequest(Map params) { + return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_cluster/decommission/awareness/{awareness_attribute_name}/{awareness_attribute_value}") + .withParams(params) + .build(); + } +} diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index ed3aa19afa146..a8679a087216d 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -134,8 +134,8 @@ public void testBuildTable() { assertThat(row.get(3).value, equalTo(shardRouting.state())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); - assertThat(row.get(69).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(70).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(72).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(73).value, equalTo(shardStats.getStatePath())); } } } diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index b730dc01c4871..9a28f1800847e 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -14,6 +14,10 @@ import org.opensearch.action.search.CreatePitController; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.PitTestsUtil; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; @@ -33,6 +37,9 @@ import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; +import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; @@ -68,6 +75,7 @@ public void testCreatePITSuccess() throws ExecutionException, InterruptedExcepti ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client()); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index") .setSize(2) @@ -80,6 +88,7 @@ public void testCreatePITSuccess() throws ExecutionException, InterruptedExcepti validatePitStats("index", 1, 0, 0); validatePitStats("index", 1, 0, 1); service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test + assertSegments(true, client()); validatePitStats("index", 0, 1, 0); validatePitStats("index", 0, 1, 1); } @@ -96,12 +105,14 @@ public void testCreatePITWithMultipleIndicesSuccess() throws ExecutionException, ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse response = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), response.getId(), response.getCreationTime()); + assertSegments(false, client()); assertEquals(4, response.getSuccessfulShards()); assertEquals(4, service.getActiveContexts()); validatePitStats("index", 1, 0, 0); validatePitStats("index1", 1, 0, 0); service.doClose(); + assertSegments(true, client()); validatePitStats("index", 0, 1, 0); validatePitStats("index1", 0, 1, 0); } @@ -115,6 +126,7 @@ public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, I ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client()); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index") .setSize(2) @@ -127,6 +139,7 @@ public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, I validatePitStats("index", 1, 0, 0); validatePitStats("index", 1, 0, 1); service.doClose(); + assertSegments(true, client()); validatePitStats("index", 0, 1, 0); validatePitStats("index", 0, 1, 1); } @@ -144,6 +157,7 @@ public void testCreatePITWithNonExistentIndex() { assertTrue(ex.getMessage().contains("no such index [index1]")); assertEquals(0, service.getActiveContexts()); + assertSegments(true, client()); service.doClose(); } @@ -164,6 +178,7 @@ public void testCreatePITOnCloseIndex() throws ExecutionException, InterruptedEx SearchService service = getInstanceFromNode(SearchService.class); assertEquals(0, service.getActiveContexts()); PitTestsUtil.assertGetAllPitsEmpty(client()); + assertSegments(true, client()); service.doClose(); } @@ -187,6 +202,7 @@ public void testPitSearchOnDeletedIndex() throws ExecutionException, Interrupted SearchService service = getInstanceFromNode(SearchService.class); PitTestsUtil.assertGetAllPitsEmpty(client()); assertEquals(0, service.getActiveContexts()); + assertSegments(true, client()); service.doClose(); } @@ -212,6 +228,7 @@ public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedEx ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client()); SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); validatePitStats("index", 1, 0, 0); @@ -227,7 +244,7 @@ public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedEx assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); assertEquals(0, service.getActiveContexts()); PitTestsUtil.assertGetAllPitsEmpty(client()); - + assertSegments(true, client()); // PIT reader contexts are lost after close, verifying it with open index api client().admin().indices().prepareOpen("index").get(); ex = expectThrows(SearchPhaseExecutionException.class, () -> { @@ -271,6 +288,52 @@ public void testMaxOpenPitContexts() throws Exception { validatePitStats("index", 0, maxPitContexts, 0); } + public void testCreatePitMoreThanMaxOpenPitContexts() throws Exception { + createIndex("index"); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + + try { + for (int i = 0; i < 1000; i++) { + client().execute(CreatePitAction.INSTANCE, request).get(); + } + } catch (Exception ex) { + assertTrue( + ex.getMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [search.max_open_pit_context] setting." + ) + ); + } + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + validatePitStats("index", maxPitContexts, 0, 0); + // deleteall + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertTrue(deletePitInfo.isSuccessful()); + } + validatePitStats("index", 0, maxPitContexts, 0); + client().execute(CreatePitAction.INSTANCE, request).get(); + validatePitStats("index", 1, maxPitContexts, 0); + service.doClose(); + validatePitStats("index", 0, maxPitContexts + 1, 0); + } + public void testOpenPitContextsConcurrently() throws Exception { createIndex("index"); final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); @@ -491,6 +554,7 @@ public void testPitAfterUpdateIndex() throws Exception { assertEquals(0, service.getActiveContexts()); validatePitStats("test", 0, 1, 0); PitTestsUtil.assertGetAllPitsEmpty(client()); + assertSegments(true, client()); } } @@ -503,6 +567,7 @@ public void testConcurrentSearches() throws Exception { ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client()); Thread[] threads = new Thread[5]; CountDownLatch latch = new CountDownLatch(threads.length); @@ -538,6 +603,7 @@ public void testConcurrentSearches() throws Exception { validatePitStats("index", 0, 1, 0); validatePitStats("index", 0, 1, 1); PitTestsUtil.assertGetAllPitsEmpty(client()); + assertSegments(true, client()); } public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount, int shardId) throws ExecutionException, diff --git a/server/src/test/java/org/opensearch/search/DocValueFormatTests.java b/server/src/test/java/org/opensearch/search/DocValueFormatTests.java index 36a6eb3ae87b0..bd0fbfe69960c 100644 --- a/server/src/test/java/org/opensearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/opensearch/search/DocValueFormatTests.java @@ -48,7 +48,7 @@ import java.util.ArrayList; import java.util.List; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.longEncode; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.longEncode; public class DocValueFormatTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java index 29126d786770e..d29ccf5b97138 100644 --- a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java @@ -51,6 +51,7 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; +import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -85,6 +86,7 @@ public void testPit() throws Exception { assertEquals(2, searchResponse.getTotalShards()); validatePitStats("index", 2, 2); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client()); } public void testCreatePitWhileNodeDropWithAllowPartialCreationFalse() throws Exception { @@ -112,6 +114,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, "index", 1, client()); assertEquals(1, pitResponse.getSuccessfulShards()); assertEquals(2, pitResponse.getTotalShards()); SearchResponse searchResponse = client().prepareSearch("index") diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java index 111ce23f8a0cb..94fb6cded637d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java @@ -48,8 +48,6 @@ import org.opensearch.search.aggregations.bucket.composite.InternalCompositeTests; import org.opensearch.search.aggregations.bucket.filter.InternalFilterTests; import org.opensearch.search.aggregations.bucket.filter.InternalFiltersTests; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridTests; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridTests; import org.opensearch.search.aggregations.bucket.global.InternalGlobalTests; import org.opensearch.search.aggregations.bucket.histogram.InternalAutoDateHistogramTests; import org.opensearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; @@ -157,8 +155,6 @@ private static List> getAggsTests() { aggsTests.add(new InternalGlobalTests()); aggsTests.add(new InternalFilterTests()); aggsTests.add(new InternalSamplerTests()); - aggsTests.add(new GeoHashGridTests()); - aggsTests.add(new GeoTileGridTests()); aggsTests.add(new InternalRangeTests()); aggsTests.add(new InternalDateRangeTests()); aggsTests.add(new InternalGeoDistanceTests()); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java index c4a87f3993bb4..9290183ec7312 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java @@ -32,10 +32,8 @@ package org.opensearch.search.aggregations.bucket.composite; -import org.opensearch.common.geo.GeoBoundingBoxTests; import org.opensearch.script.Script; import org.opensearch.search.aggregations.BaseAggregationTestCase; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.sort.SortOrder; @@ -74,17 +72,6 @@ private DateHistogramValuesSourceBuilder randomDateHistogramSourceBuilder() { return histo; } - private GeoTileGridValuesSourceBuilder randomGeoTileGridValuesSourceBuilder() { - GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)); - if (randomBoolean()) { - geoTile.precision(randomIntBetween(0, GeoTileUtils.MAX_ZOOM)); - } - if (randomBoolean()) { - geoTile.geoBoundingBox(GeoBoundingBoxTests.randomBBox()); - } - return geoTile; - } - private TermsValuesSourceBuilder randomTermsSourceBuilder() { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)); if (randomBoolean()) { @@ -118,11 +105,9 @@ private HistogramValuesSourceBuilder randomHistogramSourceBuilder() { @Override protected CompositeAggregationBuilder createTestAggregatorBuilder() { int numSources = randomIntBetween(1, 10); - numSources = 1; List> sources = new ArrayList<>(); for (int i = 0; i < numSources; i++) { - int type = randomIntBetween(0, 3); - type = 3; + int type = randomIntBetween(0, 2); switch (type) { case 0: sources.add(randomTermsSourceBuilder()); @@ -133,9 +118,6 @@ protected CompositeAggregationBuilder createTestAggregatorBuilder() { case 2: sources.add(randomHistogramSourceBuilder()); break; - case 3: - sources.add(randomGeoTileGridValuesSourceBuilder()); - break; default: throw new AssertionError("wrong branch"); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 88b2323b8adfc..25003e0b84567 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -32,68 +32,24 @@ package org.opensearch.search.aggregations.bucket.composite; -import org.apache.lucene.tests.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.DoublePoint; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.InetAddressPoint; -import org.apache.lucene.document.IntPoint; -import org.apache.lucene.document.LatLonPoint; import org.apache.lucene.document.LongPoint; -import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.document.SortedSetDocValuesField; -import org.apache.lucene.document.StringField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.tests.util.TestUtil; import org.opensearch.OpenSearchParseException; -import org.opensearch.common.geo.GeoPoint; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.text.Text; -import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.time.DateFormatters; -import org.opensearch.index.Index; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.DateFieldMapper; -import org.opensearch.index.mapper.DocumentMapper; -import org.opensearch.index.mapper.GeoPointFieldMapper; -import org.opensearch.index.mapper.IpFieldMapper; -import org.opensearch.index.mapper.KeywordFieldMapper; -import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.aggregations.Aggregator; -import org.opensearch.search.aggregations.AggregatorTestCase; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.bucket.terms.StringTerms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.aggregations.composite.BaseCompositeAggregatorTestCase; import org.opensearch.search.aggregations.metrics.InternalMax; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; import org.opensearch.search.aggregations.metrics.TopHits; import org.opensearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.opensearch.search.aggregations.support.ValueType; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.IndexSettingsModule; -import org.junit.After; -import org.junit.Before; import java.io.IOException; import java.net.InetAddress; @@ -109,51 +65,14 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class CompositeAggregatorTests extends AggregatorTestCase { - private static MappedFieldType[] FIELD_TYPES; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - FIELD_TYPES = new MappedFieldType[8]; - FIELD_TYPES[0] = new KeywordFieldMapper.KeywordFieldType("keyword"); - FIELD_TYPES[1] = new NumberFieldMapper.NumberFieldType("long", NumberFieldMapper.NumberType.LONG); - FIELD_TYPES[2] = new NumberFieldMapper.NumberFieldType("double", NumberFieldMapper.NumberType.DOUBLE); - FIELD_TYPES[3] = new DateFieldMapper.DateFieldType("date", DateFormatter.forPattern("yyyy-MM-dd||epoch_millis")); - FIELD_TYPES[4] = new NumberFieldMapper.NumberFieldType("price", NumberFieldMapper.NumberType.INTEGER); - FIELD_TYPES[5] = new KeywordFieldMapper.KeywordFieldType("terms"); - FIELD_TYPES[6] = new IpFieldMapper.IpFieldType("ip"); - FIELD_TYPES[7] = new GeoPointFieldMapper.GeoPointFieldType("geo_point"); - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - FIELD_TYPES = null; - } - @Override - protected MapperService mapperServiceMock() { - MapperService mapperService = mock(MapperService.class); - DocumentMapper mapper = mock(DocumentMapper.class); - when(mapper.typeText()).thenReturn(new Text("_doc")); - when(mapper.type()).thenReturn("_doc"); - when(mapperService.documentMapper()).thenReturn(mapper); - return mapperService; - } +public class CompositeAggregatorTests extends BaseCompositeAggregatorTestCase { public void testUnmappedFieldWithTerms() throws Exception { final List>> dataset = new ArrayList<>(); @@ -234,80 +153,6 @@ public void testUnmappedFieldWithTerms() throws Exception { ); } - public void testUnmappedFieldWithGeopoint() throws Exception { - final List>> dataset = new ArrayList<>(); - final String mappedFieldName = "geo_point"; - dataset.addAll( - Arrays.asList( - createDocument(mappedFieldName, new GeoPoint(48.934059, 41.610741)), - createDocument(mappedFieldName, new GeoPoint(-23.065941, 113.610741)), - createDocument(mappedFieldName, new GeoPoint(90.0, 0.0)), - createDocument(mappedFieldName, new GeoPoint(37.2343, -115.8067)), - createDocument(mappedFieldName, new GeoPoint(90.0, 0.0)) - ) - ); - - // just unmapped = no results - testSearchCase( - Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), - dataset, - () -> new CompositeAggregationBuilder("name", Arrays.asList(new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped"))), - (result) -> assertEquals(0, result.getBuckets().size()) - ); - - // unmapped missing bucket = one result - testSearchCase( - Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), - dataset, - () -> new CompositeAggregationBuilder( - "name", - Arrays.asList(new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped").missingBucket(true)) - ), - (result) -> { - assertEquals(1, result.getBuckets().size()); - assertEquals("{unmapped=null}", result.afterKey().toString()); - assertEquals("{unmapped=null}", result.getBuckets().get(0).getKeyAsString()); - assertEquals(5L, result.getBuckets().get(0).getDocCount()); - } - ); - - // field + unmapped, no missing bucket = no results - testSearchCase( - Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), - dataset, - () -> new CompositeAggregationBuilder( - "name", - Arrays.asList( - new GeoTileGridValuesSourceBuilder(mappedFieldName).field(mappedFieldName), - new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped") - ) - ), - (result) -> assertEquals(0, result.getBuckets().size()) - ); - - // field + unmapped with missing bucket = multiple results - testSearchCase( - Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), - dataset, - () -> new CompositeAggregationBuilder( - "name", - Arrays.asList( - new GeoTileGridValuesSourceBuilder(mappedFieldName).field(mappedFieldName), - new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped").missingBucket(true) - ) - ), - (result) -> { - assertEquals(2, result.getBuckets().size()); - assertEquals("{geo_point=7/64/56, unmapped=null}", result.afterKey().toString()); - assertEquals("{geo_point=7/32/56, unmapped=null}", result.getBuckets().get(0).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(0).getDocCount()); - assertEquals("{geo_point=7/64/56, unmapped=null}", result.getBuckets().get(1).getKeyAsString()); - assertEquals(3L, result.getBuckets().get(1).getDocCount()); - } - ); - - } - public void testUnmappedFieldWithHistogram() throws Exception { final List>> dataset = new ArrayList<>(); final String mappedFieldName = "price"; @@ -2483,42 +2328,6 @@ public void testWithIP() throws Exception { }); } - public void testWithGeoPoint() throws Exception { - final List>> dataset = new ArrayList<>(); - dataset.addAll( - Arrays.asList( - createDocument("geo_point", new GeoPoint(48.934059, 41.610741)), - createDocument("geo_point", new GeoPoint(-23.065941, 113.610741)), - createDocument("geo_point", new GeoPoint(90.0, 0.0)), - createDocument("geo_point", new GeoPoint(37.2343, -115.8067)), - createDocument("geo_point", new GeoPoint(90.0, 0.0)) - ) - ); - testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("geo_point")), dataset, () -> { - GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder("geo_point").field("geo_point"); - return new CompositeAggregationBuilder("name", Collections.singletonList(geoTile)); - }, (result) -> { - assertEquals(2, result.getBuckets().size()); - assertEquals("{geo_point=7/64/56}", result.afterKey().toString()); - assertEquals("{geo_point=7/32/56}", result.getBuckets().get(0).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(0).getDocCount()); - assertEquals("{geo_point=7/64/56}", result.getBuckets().get(1).getKeyAsString()); - assertEquals(3L, result.getBuckets().get(1).getDocCount()); - }); - - testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("geo_point")), dataset, () -> { - GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder("geo_point").field("geo_point"); - return new CompositeAggregationBuilder("name", Collections.singletonList(geoTile)).aggregateAfter( - Collections.singletonMap("geo_point", "7/32/56") - ); - }, (result) -> { - assertEquals(1, result.getBuckets().size()); - assertEquals("{geo_point=7/64/56}", result.afterKey().toString()); - assertEquals("{geo_point=7/64/56}", result.getBuckets().get(0).getKeyAsString()); - assertEquals(3L, result.getBuckets().get(0).getDocCount()); - }); - } - public void testEarlyTermination() throws Exception { final List>> dataset = new ArrayList<>(); dataset.addAll( @@ -2648,193 +2457,4 @@ public void testIndexSortWithDuplicate() throws Exception { ); } } - - private void testSearchCase( - List queries, - List>> dataset, - Supplier create, - Consumer verify - ) throws IOException { - for (Query query : queries) { - executeTestCase(false, false, query, dataset, create, verify); - executeTestCase(false, true, query, dataset, create, verify); - } - } - - private void executeTestCase( - boolean forceMerge, - boolean useIndexSort, - Query query, - List>> dataset, - Supplier create, - Consumer verify - ) throws IOException { - Map types = Arrays.stream(FIELD_TYPES) - .collect(Collectors.toMap(MappedFieldType::name, Function.identity())); - CompositeAggregationBuilder aggregationBuilder = create.get(); - Sort indexSort = useIndexSort ? buildIndexSort(aggregationBuilder.sources(), types) : null; - IndexSettings indexSettings = createIndexSettings(indexSort); - try (Directory directory = newDirectory()) { - IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())); - if (indexSort != null) { - config.setIndexSort(indexSort); - config.setCodec(TestUtil.getDefaultCodec()); - } - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config)) { - Document document = new Document(); - int id = 0; - for (Map> fields : dataset) { - document.clear(); - addToDocument(id, document, fields); - indexWriter.addDocument(document); - id++; - } - if (forceMerge || rarely()) { - // forceMerge randomly or if the collector-per-leaf testing stuff would break the tests. - indexWriter.forceMerge(1); - } else { - if (dataset.size() > 0) { - int numDeletes = randomIntBetween(1, 25); - for (int i = 0; i < numDeletes; i++) { - id = randomIntBetween(0, dataset.size() - 1); - indexWriter.deleteDocuments(new Term("id", Integer.toString(id))); - document.clear(); - addToDocument(id, document, dataset.get(id)); - indexWriter.addDocument(document); - } - } - - } - } - try (IndexReader indexReader = DirectoryReader.open(directory)) { - IndexSearcher indexSearcher = new IndexSearcher(indexReader); - InternalComposite composite = searchAndReduce(indexSettings, indexSearcher, query, aggregationBuilder, FIELD_TYPES); - verify.accept(composite); - } - } - } - - private static IndexSettings createIndexSettings(Sort sort) { - Settings.Builder builder = Settings.builder(); - if (sort != null) { - String[] fields = Arrays.stream(sort.getSort()).map(SortField::getField).toArray(String[]::new); - String[] orders = Arrays.stream(sort.getSort()).map((o) -> o.getReverse() ? "desc" : "asc").toArray(String[]::new); - builder.putList("index.sort.field", fields); - builder.putList("index.sort.order", orders); - } - return IndexSettingsModule.newIndexSettings(new Index("_index", "0"), builder.build()); - } - - private void addToDocument(int id, Document doc, Map> keys) { - doc.add(new StringField("id", Integer.toString(id), Field.Store.NO)); - for (Map.Entry> entry : keys.entrySet()) { - final String name = entry.getKey(); - for (Object value : entry.getValue()) { - if (value instanceof Integer) { - doc.add(new SortedNumericDocValuesField(name, (int) value)); - doc.add(new IntPoint(name, (int) value)); - } else if (value instanceof Long) { - doc.add(new SortedNumericDocValuesField(name, (long) value)); - doc.add(new LongPoint(name, (long) value)); - } else if (value instanceof Double) { - doc.add(new SortedNumericDocValuesField(name, NumericUtils.doubleToSortableLong((double) value))); - doc.add(new DoublePoint(name, (double) value)); - } else if (value instanceof String) { - doc.add(new SortedSetDocValuesField(name, new BytesRef((String) value))); - doc.add(new StringField(name, new BytesRef((String) value), Field.Store.NO)); - } else if (value instanceof InetAddress) { - doc.add(new SortedSetDocValuesField(name, new BytesRef(InetAddressPoint.encode((InetAddress) value)))); - doc.add(new InetAddressPoint(name, (InetAddress) value)); - } else if (value instanceof GeoPoint) { - GeoPoint point = (GeoPoint) value; - doc.add( - new SortedNumericDocValuesField( - name, - GeoTileUtils.longEncode(point.lon(), point.lat(), GeoTileGridAggregationBuilder.DEFAULT_PRECISION) - ) - ); - doc.add(new LatLonPoint(name, point.lat(), point.lon())); - } else { - throw new AssertionError("invalid object: " + value.getClass().getSimpleName()); - } - } - } - } - - private static Map createAfterKey(Object... fields) { - assert fields.length % 2 == 0; - final Map map = new HashMap<>(); - for (int i = 0; i < fields.length; i += 2) { - String field = (String) fields[i]; - map.put(field, fields[i + 1]); - } - return map; - } - - @SuppressWarnings("unchecked") - private static Map> createDocument(Object... fields) { - assert fields.length % 2 == 0; - final Map> map = new HashMap<>(); - for (int i = 0; i < fields.length; i += 2) { - String field = (String) fields[i]; - if (fields[i + 1] instanceof List) { - map.put(field, (List) fields[i + 1]); - } else { - map.put(field, Collections.singletonList(fields[i + 1])); - } - } - return map; - } - - private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); - } - - private static Sort buildIndexSort(List> sources, Map fieldTypes) { - List sortFields = new ArrayList<>(); - Map remainingFieldTypes = new HashMap<>(fieldTypes); - for (CompositeValuesSourceBuilder source : sources) { - MappedFieldType type = fieldTypes.remove(source.field()); - remainingFieldTypes.remove(source.field()); - SortField sortField = sortFieldFrom(type); - if (sortField == null) { - break; - } - sortFields.add(sortField); - } - while (remainingFieldTypes.size() > 0 && randomBoolean()) { - // Add extra unused sorts - List fields = new ArrayList<>(remainingFieldTypes.keySet()); - Collections.sort(fields); - String field = fields.get(between(0, fields.size() - 1)); - SortField sortField = sortFieldFrom(remainingFieldTypes.remove(field)); - if (sortField != null) { - sortFields.add(sortField); - } - } - return sortFields.size() > 0 ? new Sort(sortFields.toArray(new SortField[0])) : null; - } - - private static SortField sortFieldFrom(MappedFieldType type) { - if (type instanceof KeywordFieldMapper.KeywordFieldType) { - return new SortedSetSortField(type.name(), false); - } else if (type instanceof DateFieldMapper.DateFieldType) { - return new SortedNumericSortField(type.name(), SortField.Type.LONG, false); - } else if (type instanceof NumberFieldMapper.NumberFieldType) { - switch (type.typeName()) { - case "byte": - case "short": - case "integer": - return new SortedNumericSortField(type.name(), SortField.Type.INT, false); - case "long": - return new SortedNumericSortField(type.name(), SortField.Type.LONG, false); - case "float": - case "double": - return new SortedNumericSortField(type.name(), SortField.Type.DOUBLE, false); - default: - return null; - } - } - return null; - } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java index dfe4034650594..1443208a1d2fc 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java @@ -34,14 +34,15 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.geometry.Rectangle; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.MAX_ZOOM; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.checkPrecisionRange; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.hashToGeoPoint; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.keyToGeoPoint; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.longEncode; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.stringEncode; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.MAX_ZOOM; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.checkPrecisionRange; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.hashToGeoPoint; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.keyToGeoPoint; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.longEncode; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.stringEncode; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; diff --git a/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java new file mode 100644 index 0000000000000..5ca384daedbff --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests to verify behavior of create pit rest action + */ +public class RestCreatePitActionTests extends OpenSearchTestCase { + public void testRestCreatePit() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertFalse(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + params.put("allow_partial_pit_creation", "false"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } + + public void testRestCreatePitDefaultPartialCreation() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertTrue(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } +} diff --git a/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java new file mode 100644 index 0000000000000..0bfa16aafe1e3 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestDeletePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + * Tests to verify the behavior of rest delete pit action for list delete and delete all PIT endpoints + */ +public class RestDeletePitActionTests extends OpenSearchTestCase { + public void testParseDeletePitRequestWithInvalidJsonThrowsException() throws Exception { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{invalid_json}"), + XContentType.JSON + ).build(); + Exception e = expectThrows(IllegalArgumentException.class, () -> action.prepareRequest(request, null)); + assertThat(e.getMessage(), equalTo("Failed to parse request body")); + } + + public void testDeletePitWithBody() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("BODY")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPit() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPitWithBody() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("request [GET /_all] does not support having a body")); + } + } + + public void testDeletePitQueryStringParamsShouldThrowException() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(2)); + assertThat(request.getPitIds().get(0), equalTo("QUERY_STRING")); + assertThat(request.getPitIds().get(1), equalTo("QUERY_STRING_1")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams( + Collections.singletonMap("pit_id", "QUERY_STRING,QUERY_STRING_1") + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("unrecognized param")); + } + } +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4b8eec70f2c1a..ff4005d9bcedf 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -172,7 +172,7 @@ import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer; -import org.opensearch.index.store.RemoteDirectoryFactory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; @@ -185,6 +185,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.ingest.IngestService; @@ -1826,7 +1827,7 @@ public void onFailure(final Exception e) { emptyMap(), null, emptyMap(), - new RemoteDirectoryFactory(() -> repositoriesService) + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( @@ -1857,6 +1858,7 @@ public void onFailure(final Exception e) { transportService, new SegmentReplicationSourceFactory(transportService, recoverySettings, clusterService) ), + SegmentReplicationSourceService.NO_OP, shardStateAction, new NodeMappingRefreshAction(transportService, metadataMappingService), repositoriesService, diff --git a/settings.gradle b/settings.gradle index 65dc6a13100e2..92e07cbb2e7fb 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.10.3" + id "com.gradle.enterprise" version "3.11.1" } buildCache { @@ -47,11 +47,13 @@ List projects = [ 'distribution:archives:freebsd-tar', 'distribution:archives:no-jdk-freebsd-tar', 'distribution:archives:linux-arm64-tar', + 'distribution:archives:linux-s390x-tar', 'distribution:archives:linux-tar', 'distribution:archives:no-jdk-linux-tar', 'distribution:docker', 'distribution:docker:docker-arm64-build-context', 'distribution:docker:docker-arm64-export', + 'distribution:docker:docker-s390x-export', 'distribution:docker:docker-build-context', 'distribution:docker:docker-export', 'distribution:packages:arm64-deb', diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 174747d306ff5..f4a9f51789679 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -1070,6 +1070,22 @@ public List generateHistoryOnReplica( boolean allowGapInSeqNo, boolean allowDuplicate, boolean includeNestedDocs + ) throws Exception { + return generateHistoryOnReplica( + numOps, + allowGapInSeqNo, + allowDuplicate, + includeNestedDocs, + randomFrom(Engine.Operation.TYPE.values()) + ); + } + + public List generateHistoryOnReplica( + int numOps, + boolean allowGapInSeqNo, + boolean allowDuplicate, + boolean includeNestedDocs, + Engine.Operation.TYPE opType ) throws Exception { long seqNo = 0; final int maxIdValue = randomInt(numOps * 2); @@ -1077,7 +1093,6 @@ public List generateHistoryOnReplica( CheckedBiFunction nestedParsedDocFactory = nestedParsedDocFactory(); for (int i = 0; i < numOps; i++) { final String id = Integer.toString(randomInt(maxIdValue)); - final Engine.Operation.TYPE opType = randomFrom(Engine.Operation.TYPE.values()); final boolean isNestedDoc = includeNestedDocs && opType == Engine.Operation.TYPE.INDEX && randomBoolean(); final int nestedValues = between(0, 3); final long startTime = threadPool.relativeTimeInNanos(); @@ -1492,10 +1507,10 @@ public static MapperService createMapperService() throws IOException { * Exposes a translog associated with the given engine for testing purpose. */ public static Translog getTranslog(Engine engine) { - assert engine instanceof InternalEngine : "only InternalEngines have translogs, got: " + engine.getClass(); - InternalEngine internalEngine = (InternalEngine) engine; - internalEngine.ensureOpen(); - TranslogManager translogManager = internalEngine.translogManager(); + assert engine instanceof InternalEngine || engine instanceof NRTReplicationEngine + : "only InternalEngines or NRTReplicationEngines have translogs, got: " + engine.getClass(); + engine.ensureOpen(); + TranslogManager translogManager = engine.translogManager(); assert translogManager instanceof InternalTranslogManager : "only InternalTranslogManager have translogs, got: " + engine.getClass(); InternalTranslogManager internalTranslogManager = (InternalTranslogManager) translogManager; diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index f446538acccbb..09eca006d600a 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -59,12 +59,15 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.internal.io.IOUtils; @@ -88,6 +91,8 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; +import org.opensearch.index.store.RemoteDirectory; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.InternalTranslogFactory; @@ -106,7 +111,10 @@ import org.opensearch.indices.replication.CheckpointInfoResponse; import org.opensearch.indices.replication.GetSegmentFilesResponse; import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.CopyState; @@ -121,12 +129,17 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -139,7 +152,9 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; /** @@ -532,7 +547,10 @@ protected IndexShard newShard( ShardId shardId = shardPath.getShardId(); NodeEnvironment.NodePath remoteNodePath = new NodeEnvironment.NodePath(createTempDir()); ShardPath remoteShardPath = new ShardPath(false, remoteNodePath.resolve(shardId), remoteNodePath.resolve(shardId), shardId); - storeProvider = is -> createStore(is, remoteShardPath); + RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); + RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory); + storeProvider = is -> createStore(shardId, is, remoteSegmentStoreDirectory); remoteStore = storeProvider.apply(indexSettings); } indexShard = new IndexShard( @@ -570,6 +588,13 @@ protected IndexShard newShard( return indexShard; } + private RemoteDirectory newRemoteDirectory(Path f) throws IOException { + FsBlobStore fsBlobStore = new FsBlobStore(1024, f, false); + BlobPath blobPath = new BlobPath(); + BlobContainer fsBlobContainer = new FsBlobContainer(fsBlobStore, blobPath, f); + return new RemoteDirectory(fsBlobContainer); + } + /** * Takes an existing shard, closes it and starts a new initialing shard at the same location * @@ -1154,35 +1179,36 @@ public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { } /** - * Segment Replication specific test method - Replicate segments to a list of replicas from a given primary. - * This test will use a real {@link SegmentReplicationTarget} for each replica with a mock {@link SegmentReplicationSource} that - * writes all segments directly to the target. + * Segment Replication specific test method - Creates a {@link SegmentReplicationTargetService} to perform replications that has + * been configured to return the given primaryShard's current segments. + * + * @param primaryShard {@link IndexShard} - The primary shard to replicate from. */ - public final void replicateSegments(IndexShard primaryShard, List replicaShards) throws IOException, InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); - Store.MetadataSnapshot primaryMetadata; - try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { - final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); - primaryMetadata = primaryShard.store().getMetadata(primarySegmentInfos); - } - final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); - - final ReplicationCollection replicationCollection = new ReplicationCollection<>(logger, threadPool); - final SegmentReplicationSource source = new SegmentReplicationSource() { + public final SegmentReplicationTargetService prepareForReplication(IndexShard primaryShard) { + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory + ); + final SegmentReplicationSource replicationSource = new SegmentReplicationSource() { @Override public void getCheckpointMetadata( long replicationId, ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse( - new CheckpointInfoResponse( - copyState.getCheckpoint(), - copyState.getMetadataSnapshot(), - copyState.getInfosBytes(), - copyState.getPendingDeleteFiles() - ) - ); + try { + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); + listener.onResponse( + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) + ); + copyState.decRef(); + } catch (IOException e) { + logger.error("Unexpected error computing CopyState", e); + Assert.fail("Failed to compute copyState"); + } } @Override @@ -1194,9 +1220,7 @@ public void getSegmentFiles( ActionListener listener ) { try ( - final ReplicationCollection.ReplicationRef replicationRef = replicationCollection.get( - replicationId - ) + final ReplicationCollection.ReplicationRef replicationRef = targetService.get(replicationId) ) { writeFileChunks(replicationRef.get(), primaryShard, filesToFetch.toArray(new StoreFileMetadata[] {})); } catch (IOException e) { @@ -1205,50 +1229,68 @@ public void getSegmentFiles( listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; + when(sourceFactory.get(any())).thenReturn(replicationSource); + return targetService; + } + + /** + * Segment Replication specific test method - Replicate segments to a list of replicas from a given primary. + * This test will use a real {@link SegmentReplicationTarget} for each replica with a mock {@link SegmentReplicationSource} that + * writes all segments directly to the target. + * @param primaryShard - {@link IndexShard} The current primary shard. + * @param replicaShards - Replicas that will be updated. + * @return {@link List} List of target components orchestrating replication. + */ + public final List replicateSegments(IndexShard primaryShard, List replicaShards) + throws IOException, InterruptedException { + final SegmentReplicationTargetService targetService = prepareForReplication(primaryShard); + return replicateSegments(targetService, primaryShard, replicaShards); + } + public final List replicateSegments( + SegmentReplicationTargetService targetService, + IndexShard primaryShard, + List replicaShards + ) throws IOException, InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); + Map primaryMetadata; + try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); + primaryMetadata = primaryShard.store().getSegmentMetadataMap(primarySegmentInfos); + } + List ids = new ArrayList<>(); for (IndexShard replica : replicaShards) { - final SegmentReplicationTarget target = new SegmentReplicationTarget( + final SegmentReplicationTarget target = targetService.startReplication( ReplicationCheckpoint.empty(replica.shardId), replica, - source, - new ReplicationListener() { + new SegmentReplicationTargetService.SegmentReplicationListener() { @Override - public void onDone(ReplicationState state) { + public void onReplicationDone(SegmentReplicationState state) { try (final GatedCloseable snapshot = replica.getSegmentInfosSnapshot()) { final SegmentInfos replicaInfos = snapshot.get(); - final Store.MetadataSnapshot replicaMetadata = replica.store().getMetadata(replicaInfos); - final Store.RecoveryDiff recoveryDiff = primaryMetadata.recoveryDiff(replicaMetadata); + final Map replicaMetadata = replica.store().getSegmentMetadataMap(replicaInfos); + final Store.RecoveryDiff recoveryDiff = Store.segmentReplicationDiff(primaryMetadata, replicaMetadata); assertTrue(recoveryDiff.missing.isEmpty()); assertTrue(recoveryDiff.different.isEmpty()); assertEquals(recoveryDiff.identical.size(), primaryMetadata.size()); - assertEquals(primaryMetadata.getCommitUserData(), replicaMetadata.getCommitUserData()); } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); + } finally { + countDownLatch.countDown(); } - countDownLatch.countDown(); } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { logger.error("Unexpected replication failure in test", e); Assert.fail("test replication should not fail: " + e); } } ); - replicationCollection.start(target, TimeValue.timeValueMillis(5000)); - target.startReplication(new ActionListener<>() { - @Override - public void onResponse(Void o) { - replicationCollection.markAsDone(target.getId()); - } - - @Override - public void onFailure(Exception e) { - replicationCollection.fail(target.getId(), new OpenSearchException("Segment Replication failed", e), true); - } - }); + ids.add(target); + countDownLatch.await(1, TimeUnit.SECONDS); } - countDownLatch.await(3, TimeUnit.SECONDS); + return ids; } private void writeFileChunks(SegmentReplicationTarget target, IndexShard primary, StoreFileMetadata[] files) throws IOException { diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java new file mode 100644 index 0000000000000..7d00772913d6e --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java @@ -0,0 +1,310 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.composite; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.DoublePoint; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; +import org.junit.After; +import org.junit.Before; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.text.Text; +import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.time.DateFormatters; +import org.opensearch.index.Index; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocumentMapper; +import org.opensearch.index.mapper.IpFieldMapper; +import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.opensearch.search.aggregations.bucket.composite.InternalComposite; +import org.opensearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Base class for the Aggregator Tests which are registered under Composite Aggregation. + */ +public class BaseCompositeAggregatorTestCase extends AggregatorTestCase { + + protected static List FIELD_TYPES; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + FIELD_TYPES = new ArrayList<>(); + FIELD_TYPES.add(new KeywordFieldMapper.KeywordFieldType("keyword")); + FIELD_TYPES.add(new NumberFieldMapper.NumberFieldType("long", NumberFieldMapper.NumberType.LONG)); + FIELD_TYPES.add(new NumberFieldMapper.NumberFieldType("double", NumberFieldMapper.NumberType.DOUBLE)); + FIELD_TYPES.add(new DateFieldMapper.DateFieldType("date", DateFormatter.forPattern("yyyy-MM-dd||epoch_millis"))); + FIELD_TYPES.add(new NumberFieldMapper.NumberFieldType("price", NumberFieldMapper.NumberType.INTEGER)); + FIELD_TYPES.add(new KeywordFieldMapper.KeywordFieldType("terms")); + FIELD_TYPES.add(new IpFieldMapper.IpFieldType("ip")); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + FIELD_TYPES = null; + } + + @Override + protected MapperService mapperServiceMock() { + MapperService mapperService = mock(MapperService.class); + DocumentMapper mapper = mock(DocumentMapper.class); + when(mapper.typeText()).thenReturn(new Text("_doc")); + when(mapper.type()).thenReturn("_doc"); + when(mapperService.documentMapper()).thenReturn(mapper); + return mapperService; + } + + protected static Map> createDocument(Object... fields) { + assert fields.length % 2 == 0; + final Map> map = new HashMap<>(); + for (int i = 0; i < fields.length; i += 2) { + String field = (String) fields[i]; + if (fields[i + 1] instanceof List) { + map.put(field, (List) fields[i + 1]); + } else { + map.put(field, Collections.singletonList(fields[i + 1])); + } + } + return map; + } + + protected void testSearchCase( + List queries, + List>> dataset, + Supplier create, + Consumer verify + ) throws IOException { + for (Query query : queries) { + executeTestCase(false, false, query, dataset, create, verify); + executeTestCase(false, true, query, dataset, create, verify); + } + } + + protected void executeTestCase( + boolean forceMerge, + boolean useIndexSort, + Query query, + List>> dataset, + Supplier create, + Consumer verify + ) throws IOException { + Map types = FIELD_TYPES.stream().collect(Collectors.toMap(MappedFieldType::name, Function.identity())); + CompositeAggregationBuilder aggregationBuilder = create.get(); + Sort indexSort = useIndexSort ? buildIndexSort(aggregationBuilder.sources(), types) : null; + IndexSettings indexSettings = createIndexSettings(indexSort); + try (Directory directory = newDirectory()) { + IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())); + if (indexSort != null) { + config.setIndexSort(indexSort); + config.setCodec(TestUtil.getDefaultCodec()); + } + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config)) { + Document document = new Document(); + int id = 0; + for (Map> fields : dataset) { + document.clear(); + addToDocument(id, document, fields); + indexWriter.addDocument(document); + id++; + } + if (forceMerge || rarely()) { + // forceMerge randomly or if the collector-per-leaf testing stuff would break the tests. + indexWriter.forceMerge(1); + } else { + if (dataset.size() > 0) { + int numDeletes = randomIntBetween(1, 25); + for (int i = 0; i < numDeletes; i++) { + id = randomIntBetween(0, dataset.size() - 1); + indexWriter.deleteDocuments(new Term("id", Integer.toString(id))); + document.clear(); + addToDocument(id, document, dataset.get(id)); + indexWriter.addDocument(document); + } + } + + } + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = new IndexSearcher(indexReader); + InternalComposite composite = searchAndReduce( + indexSettings, + indexSearcher, + query, + aggregationBuilder, + FIELD_TYPES.toArray(new MappedFieldType[0]) + ); + verify.accept(composite); + } + } + } + + protected void addToDocument(int id, Document doc, Map> keys) { + doc.add(new StringField("id", Integer.toString(id), Field.Store.NO)); + for (Map.Entry> entry : keys.entrySet()) { + final String name = entry.getKey(); + for (Object value : entry.getValue()) { + if (value instanceof Integer) { + doc.add(new SortedNumericDocValuesField(name, (int) value)); + doc.add(new IntPoint(name, (int) value)); + } else if (value instanceof Long) { + doc.add(new SortedNumericDocValuesField(name, (long) value)); + doc.add(new LongPoint(name, (long) value)); + } else if (value instanceof Double) { + doc.add(new SortedNumericDocValuesField(name, NumericUtils.doubleToSortableLong((double) value))); + doc.add(new DoublePoint(name, (double) value)); + } else if (value instanceof String) { + doc.add(new SortedSetDocValuesField(name, new BytesRef((String) value))); + doc.add(new StringField(name, new BytesRef((String) value), Field.Store.NO)); + } else if (value instanceof InetAddress) { + doc.add(new SortedSetDocValuesField(name, new BytesRef(InetAddressPoint.encode((InetAddress) value)))); + doc.add(new InetAddressPoint(name, (InetAddress) value)); + } else { + if (!addValueToDocument(doc, name, value)) throw new AssertionError( + "invalid object: " + value.getClass().getSimpleName() + ); + } + } + } + } + + /** + * Override this function to handle any specific type of value you want to add in the document for doing the + * composite aggregation. If you have added another Composite Aggregation Type then you must override this + * function so that your field value can be added in the document correctly. + * + * @param doc {@link Document} + * @param name {@link String} Field Name + * @param value {@link Object} Field value + * @return boolean true or false, based on if value is added or not + */ + protected boolean addValueToDocument(final Document doc, final String name, final Object value) { + return false; + } + + protected static Sort buildIndexSort(List> sources, Map fieldTypes) { + List sortFields = new ArrayList<>(); + Map remainingFieldTypes = new HashMap<>(fieldTypes); + for (CompositeValuesSourceBuilder source : sources) { + MappedFieldType type = fieldTypes.remove(source.field()); + remainingFieldTypes.remove(source.field()); + SortField sortField = sortFieldFrom(type); + if (sortField == null) { + break; + } + sortFields.add(sortField); + } + while (remainingFieldTypes.size() > 0 && randomBoolean()) { + // Add extra unused sorts + List fields = new ArrayList<>(remainingFieldTypes.keySet()); + Collections.sort(fields); + String field = fields.get(between(0, fields.size() - 1)); + SortField sortField = sortFieldFrom(remainingFieldTypes.remove(field)); + if (sortField != null) { + sortFields.add(sortField); + } + } + return sortFields.size() > 0 ? new Sort(sortFields.toArray(new SortField[0])) : null; + } + + protected static SortField sortFieldFrom(MappedFieldType type) { + if (type instanceof KeywordFieldMapper.KeywordFieldType) { + return new SortedSetSortField(type.name(), false); + } else if (type instanceof DateFieldMapper.DateFieldType) { + return new SortedNumericSortField(type.name(), SortField.Type.LONG, false); + } else if (type instanceof NumberFieldMapper.NumberFieldType) { + switch (type.typeName()) { + case "byte": + case "short": + case "integer": + return new SortedNumericSortField(type.name(), SortField.Type.INT, false); + case "long": + return new SortedNumericSortField(type.name(), SortField.Type.LONG, false); + case "float": + case "double": + return new SortedNumericSortField(type.name(), SortField.Type.DOUBLE, false); + default: + return null; + } + } + return null; + } + + protected static IndexSettings createIndexSettings(Sort sort) { + Settings.Builder builder = Settings.builder(); + if (sort != null) { + String[] fields = Arrays.stream(sort.getSort()).map(SortField::getField).toArray(String[]::new); + String[] orders = Arrays.stream(sort.getSort()).map((o) -> o.getReverse() ? "desc" : "asc").toArray(String[]::new); + builder.putList("index.sort.field", fields); + builder.putList("index.sort.order", orders); + } + return IndexSettingsModule.newIndexSettings(new Index("_index", "0"), builder.build()); + } + + protected static Map createAfterKey(Object... fields) { + assert fields.length % 2 == 0; + final Map map = new HashMap<>(); + for (int i = 0; i < fields.length; i += 2) { + String field = (String) fields[i]; + map.put(field, fields[i + 1]); + } + return map; + } + + protected static long asLong(String dateTime) { + return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java index a4099d66de28e..5325c48e16913 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java @@ -68,10 +68,6 @@ import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.opensearch.search.aggregations.bucket.filter.ParsedFilter; import org.opensearch.search.aggregations.bucket.filter.ParsedFilters; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; -import org.opensearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.global.ParsedGlobal; import org.opensearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -275,8 +271,6 @@ public ReduceContext forFinalReduction() { map.put(GlobalAggregationBuilder.NAME, (p, c) -> ParsedGlobal.fromXContent(p, (String) c)); map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); - map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); - map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c));