diff --git a/.ci/os.ps1 b/.ci/os.ps1
index 54f5ca54ffaa3..0fa43c4b250fa 100644
--- a/.ci/os.ps1
+++ b/.ci/os.ps1
@@ -1,3 +1,5 @@
+param($GradleTasks='destructiveDistroTest')
+
If (-NOT ([Security.Principal.WindowsPrincipal] [Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole]::Administrator))
{
# Relaunch as an elevated process:
@@ -25,7 +27,6 @@ Remove-Item -Recurse -Force \tmp -ErrorAction Ignore
New-Item -ItemType directory -Path \tmp
$ErrorActionPreference="Continue"
-# TODO: remove the task exclusions once dependencies are set correctly and these don't run for Windows or buldiung the deb on windows is fixed
-& .\gradlew.bat -g "C:\Users\$env:username\.gradle" --parallel --no-daemon --scan --console=plain destructiveDistroTest
+& .\gradlew.bat -g "C:\Users\$env:username\.gradle" --parallel --no-daemon --scan --console=plain $GradleTasks
exit $LastExitCode
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/bucket/terms/LongKeyedBucketOrdsBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/bucket/terms/LongKeyedBucketOrdsBenchmark.java
new file mode 100644
index 0000000000000..adef011abfb1b
--- /dev/null
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/bucket/terms/LongKeyedBucketOrdsBenchmark.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations.bucket.terms;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.PageCacheRecycler;
+import org.elasticsearch.search.aggregations.CardinalityUpperBound;
+import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder;
+import org.elasticsearch.search.aggregations.bucket.terms.LongKeyedBucketOrds;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OperationsPerInvocation;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+@Fork(2)
+@Warmup(iterations = 10)
+@Measurement(iterations = 5)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@OperationsPerInvocation(1_000_000)
+@State(Scope.Benchmark)
+public class LongKeyedBucketOrdsBenchmark {
+ private static final long LIMIT = 1_000_000;
+ /**
+ * The number of distinct values to add to the buckets.
+ */
+ private static final long DISTINCT_VALUES = 10;
+ /**
+ * The number of buckets to create in the {@link #multiBucket} case.
+ *
+ * If this is not relatively prime to {@link #DISTINCT_VALUES} then the
+ * values won't be scattered evenly across the buckets.
+ */
+ private static final long DISTINCT_BUCKETS = 21;
+
+ private final PageCacheRecycler recycler = new PageCacheRecycler(Settings.EMPTY);
+ private final BigArrays bigArrays = new BigArrays(recycler, null, "REQUEST");
+
+ /**
+ * Force loading all of the implementations just for extra paranoia's sake.
+ * We really don't want the JVM to be able to eliminate one of them just
+ * because we don't use it in the particular benchmark. That is totally a
+ * thing it'd do. It is sneaky.
+ */
+ @Setup
+ public void forceLoadClasses(Blackhole bh) {
+ bh.consume(LongKeyedBucketOrds.FromSingle.class);
+ bh.consume(LongKeyedBucketOrds.FromMany.class);
+ }
+
+ /**
+ * Emulates a way that we do not use {@link LongKeyedBucketOrds}
+ * because it is not needed.
+ */
+ @Benchmark
+ public void singleBucketIntoSingleImmutableMonmorphicInvocation(Blackhole bh) {
+ try (LongKeyedBucketOrds.FromSingle ords = new LongKeyedBucketOrds.FromSingle(bigArrays)) {
+ for (long i = 0; i < LIMIT; i++) {
+ ords.add(0, i % DISTINCT_VALUES);
+ }
+ bh.consume(ords);
+ }
+ }
+
+ /**
+ * Emulates the way that most aggregations use {@link LongKeyedBucketOrds}.
+ */
+ @Benchmark
+ public void singleBucketIntoSingleImmutableBimorphicInvocation(Blackhole bh) {
+ try (LongKeyedBucketOrds ords = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.ONE)) {
+ for (long i = 0; i < LIMIT; i++) {
+ ords.add(0, i % DISTINCT_VALUES);
+ }
+ bh.consume(ords);
+ }
+ }
+
+ /**
+ * Emulates the way that {@link AutoDateHistogramAggregationBuilder} uses {@link LongKeyedBucketOrds}.
+ */
+ @Benchmark
+ public void singleBucketIntoSingleMutableMonmorphicInvocation(Blackhole bh) {
+ LongKeyedBucketOrds.FromSingle ords = new LongKeyedBucketOrds.FromSingle(bigArrays);
+ for (long i = 0; i < LIMIT; i++) {
+ if (i % 100_000 == 0) {
+ ords.close();
+ bh.consume(ords);
+ ords = new LongKeyedBucketOrds.FromSingle(bigArrays);
+ }
+ ords.add(0, i % DISTINCT_VALUES);
+ }
+ bh.consume(ords);
+ ords.close();
+ }
+
+ /**
+ * Emulates a way that we do not use {@link LongKeyedBucketOrds}
+ * because it is significantly slower than the
+ * {@link #singleBucketIntoSingleMutableMonmorphicInvocation monomorphic invocation}.
+ */
+ @Benchmark
+ public void singleBucketIntoSingleMutableBimorphicInvocation(Blackhole bh) {
+ LongKeyedBucketOrds ords = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.ONE);
+ for (long i = 0; i < LIMIT; i++) {
+ if (i % 100_000 == 0) {
+ ords.close();
+ bh.consume(ords);
+ ords = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.ONE);
+ }
+ ords.add(0, i % DISTINCT_VALUES);
+
+ }
+ bh.consume(ords);
+ ords.close();
+ }
+
+ /**
+ * Emulates an aggregation that collects from a single bucket "by accident".
+ * This can happen if an aggregation is under, say, a {@code terms}
+ * aggregation and there is only a single value for that term in the index.
+ */
+ @Benchmark
+ public void singleBucketIntoMulti(Blackhole bh) {
+ try (LongKeyedBucketOrds ords = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY)) {
+ for (long i = 0; i < LIMIT; i++) {
+ ords.add(0, i % DISTINCT_VALUES);
+ }
+ bh.consume(ords);
+ }
+ }
+
+ /**
+ * Emulates an aggregation that collects from many buckets.
+ */
+ @Benchmark
+ public void multiBucket(Blackhole bh) {
+ try (LongKeyedBucketOrds ords = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY)) {
+ for (long i = 0; i < LIMIT; i++) {
+ ords.add(i % DISTINCT_BUCKETS, i % DISTINCT_VALUES);
+ }
+ bh.consume(ords);
+ }
+ }
+}
diff --git a/build.gradle b/build.gradle
index a1fa67b65215c..14419937ea9ca 100644
--- a/build.gradle
+++ b/build.gradle
@@ -174,8 +174,8 @@ tasks.register("verifyVersions") {
* after the backport of the backcompat code is complete.
*/
-boolean bwc_tests_enabled = false
-final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/59076" /* place a PR link here when committing bwc changes */
+boolean bwc_tests_enabled = true
+final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */
if (bwc_tests_enabled == false) {
if (bwc_tests_disabled_issue.isEmpty()) {
throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false")
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy
index 1d6c03666d84f..a8d1958218193 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy
@@ -24,6 +24,7 @@ import groovy.transform.CompileStatic
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.ElasticsearchJavaPlugin
import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask
+import org.elasticsearch.gradle.RepositoriesSetupPlugin
import org.elasticsearch.gradle.info.BuildParams
import org.elasticsearch.gradle.info.GlobalBuildInfoPlugin
import org.elasticsearch.gradle.precommit.PrecommitTasks
@@ -59,9 +60,9 @@ class StandaloneRestTestPlugin implements Plugin {
project.rootProject.pluginManager.apply(GlobalBuildInfoPlugin)
project.pluginManager.apply(JavaBasePlugin)
project.pluginManager.apply(TestClustersPlugin)
+ project.pluginManager.apply(RepositoriesSetupPlugin)
project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask)
- ElasticsearchJavaPlugin.configureRepositories(project)
ElasticsearchJavaPlugin.configureTestTasks(project)
ElasticsearchJavaPlugin.configureInputNormalization(project)
ElasticsearchJavaPlugin.configureCompile(project)
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchJavaPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchJavaPlugin.java
index f56ea9cb569fa..2dccc95f82508 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchJavaPlugin.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchJavaPlugin.java
@@ -36,9 +36,6 @@
import org.gradle.api.artifacts.ModuleDependency;
import org.gradle.api.artifacts.ProjectDependency;
import org.gradle.api.artifacts.ResolutionStrategy;
-import org.gradle.api.artifacts.dsl.RepositoryHandler;
-import org.gradle.api.artifacts.repositories.IvyArtifactRepository;
-import org.gradle.api.artifacts.repositories.MavenArtifactRepository;
import org.gradle.api.execution.TaskActionListener;
import org.gradle.api.file.FileCollection;
import org.gradle.api.plugins.BasePlugin;
@@ -59,16 +56,10 @@
import java.io.File;
import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URI;
-import java.util.Arrays;
import java.util.List;
-import java.util.Locale;
import java.util.Map;
import java.util.function.Consumer;
import java.util.function.Function;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
import static org.elasticsearch.gradle.util.GradleUtils.maybeConfigure;
import static org.elasticsearch.gradle.util.Util.toStringable;
@@ -83,11 +74,11 @@ public void apply(Project project) {
project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class);
// apply global test task failure listener
project.getRootProject().getPluginManager().apply(TestFailureReportingPlugin.class);
-
+ // common repositories setup
+ project.getPluginManager().apply(RepositoriesSetupPlugin.class);
project.getPluginManager().apply(JavaLibraryPlugin.class);
configureConfigurations(project);
- configureRepositories(project);
configureCompile(project);
configureInputNormalization(project);
configureTestTasks(project);
@@ -149,77 +140,6 @@ public static void configureConfigurations(Project project) {
disableTransitiveDeps.accept(JavaPlugin.TEST_IMPLEMENTATION_CONFIGURATION_NAME);
}
- private static final Pattern LUCENE_SNAPSHOT_REGEX = Pattern.compile("\\w+-snapshot-([a-z0-9]+)");
-
- /**
- * Adds repositories used by ES dependencies
- */
- public static void configureRepositories(Project project) {
- // ensure all repositories use secure urls
- // TODO: remove this with gradle 7.0, which no longer allows insecure urls
- project.getRepositories().all(repository -> {
- if (repository instanceof MavenArtifactRepository) {
- final MavenArtifactRepository maven = (MavenArtifactRepository) repository;
- assertRepositoryURIIsSecure(maven.getName(), project.getPath(), maven.getUrl());
- for (URI uri : maven.getArtifactUrls()) {
- assertRepositoryURIIsSecure(maven.getName(), project.getPath(), uri);
- }
- } else if (repository instanceof IvyArtifactRepository) {
- final IvyArtifactRepository ivy = (IvyArtifactRepository) repository;
- assertRepositoryURIIsSecure(ivy.getName(), project.getPath(), ivy.getUrl());
- }
- });
- RepositoryHandler repos = project.getRepositories();
- if (System.getProperty("repos.mavenLocal") != null) {
- // with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is
- // useful for development ie. bwc tests where we install stuff in the local repository
- // such that we don't have to pass hardcoded files to gradle
- repos.mavenLocal();
- }
- repos.jcenter();
-
- String luceneVersion = VersionProperties.getLucene();
- if (luceneVersion.contains("-snapshot")) {
- // extract the revision number from the version with a regex matcher
- Matcher matcher = LUCENE_SNAPSHOT_REGEX.matcher(luceneVersion);
- if (matcher.find() == false) {
- throw new GradleException("Malformed lucene snapshot version: " + luceneVersion);
- }
- String revision = matcher.group(1);
- MavenArtifactRepository luceneRepo = repos.maven(repo -> {
- repo.setName("lucene-snapshots");
- repo.setUrl("https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + revision);
- });
- repos.exclusiveContent(exclusiveRepo -> {
- exclusiveRepo.filter(
- descriptor -> descriptor.includeVersionByRegex("org\\.apache\\.lucene", ".*", ".*-snapshot-" + revision)
- );
- exclusiveRepo.forRepositories(luceneRepo);
- });
- }
- }
-
- private static final List SECURE_URL_SCHEMES = Arrays.asList("file", "https", "s3");
-
- private static void assertRepositoryURIIsSecure(final String repositoryName, final String projectPath, final URI uri) {
- if (uri != null && SECURE_URL_SCHEMES.contains(uri.getScheme()) == false) {
- String url;
- try {
- url = uri.toURL().toString();
- } catch (MalformedURLException e) {
- throw new IllegalStateException(e);
- }
- final String message = String.format(
- Locale.ROOT,
- "repository [%s] on project with path [%s] is not using a secure protocol for artifacts on [%s]",
- repositoryName,
- projectPath,
- url
- );
- throw new GradleException(message);
- }
- }
-
/**
* Adds compiler settings to the project
*/
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyList.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyList.java
index 18c24db09fee2..efbba73bf5467 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyList.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyList.java
@@ -218,6 +218,7 @@ private class PropertyListEntry {
this.normalization = normalization;
}
+ @Input
public PropertyNormalization getNormalization() {
return normalization;
}
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/RepositoriesSetupPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/RepositoriesSetupPlugin.java
new file mode 100644
index 0000000000000..3ea2fee7d1483
--- /dev/null
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/RepositoriesSetupPlugin.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gradle;
+
+import org.gradle.api.GradleException;
+import org.gradle.api.Plugin;
+import org.gradle.api.Project;
+import org.gradle.api.artifacts.dsl.RepositoryHandler;
+import org.gradle.api.artifacts.repositories.IvyArtifactRepository;
+import org.gradle.api.artifacts.repositories.MavenArtifactRepository;
+
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class RepositoriesSetupPlugin implements Plugin {
+
+ private static final List SECURE_URL_SCHEMES = Arrays.asList("file", "https", "s3");
+ private static final Pattern LUCENE_SNAPSHOT_REGEX = Pattern.compile("\\w+-snapshot-([a-z0-9]+)");
+
+ @Override
+ public void apply(Project project) {
+ configureRepositories(project);
+ }
+
+ /**
+ * Adds repositories used by ES projects and dependencies
+ */
+ public static void configureRepositories(Project project) {
+ // ensure all repositories use secure urls
+ // TODO: remove this with gradle 7.0, which no longer allows insecure urls
+ project.getRepositories().all(repository -> {
+ if (repository instanceof MavenArtifactRepository) {
+ final MavenArtifactRepository maven = (MavenArtifactRepository) repository;
+ assertRepositoryURIIsSecure(maven.getName(), project.getPath(), maven.getUrl());
+ for (URI uri : maven.getArtifactUrls()) {
+ assertRepositoryURIIsSecure(maven.getName(), project.getPath(), uri);
+ }
+ } else if (repository instanceof IvyArtifactRepository) {
+ final IvyArtifactRepository ivy = (IvyArtifactRepository) repository;
+ assertRepositoryURIIsSecure(ivy.getName(), project.getPath(), ivy.getUrl());
+ }
+ });
+ RepositoryHandler repos = project.getRepositories();
+ if (System.getProperty("repos.mavenLocal") != null) {
+ // with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is
+ // useful for development ie. bwc tests where we install stuff in the local repository
+ // such that we don't have to pass hardcoded files to gradle
+ repos.mavenLocal();
+ }
+ repos.jcenter();
+
+ String luceneVersion = VersionProperties.getLucene();
+ if (luceneVersion.contains("-snapshot")) {
+ // extract the revision number from the version with a regex matcher
+ Matcher matcher = LUCENE_SNAPSHOT_REGEX.matcher(luceneVersion);
+ if (matcher.find() == false) {
+ throw new GradleException("Malformed lucene snapshot version: " + luceneVersion);
+ }
+ String revision = matcher.group(1);
+ MavenArtifactRepository luceneRepo = repos.maven(repo -> {
+ repo.setName("lucene-snapshots");
+ repo.setUrl("https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + revision);
+ });
+ repos.exclusiveContent(exclusiveRepo -> {
+ exclusiveRepo.filter(
+ descriptor -> descriptor.includeVersionByRegex("org\\.apache\\.lucene", ".*", ".*-snapshot-" + revision)
+ );
+ exclusiveRepo.forRepositories(luceneRepo);
+ });
+ }
+ }
+
+ private static void assertRepositoryURIIsSecure(final String repositoryName, final String projectPath, final URI uri) {
+ if (uri != null && SECURE_URL_SCHEMES.contains(uri.getScheme()) == false) {
+ String url;
+ try {
+ url = uri.toURL().toString();
+ } catch (MalformedURLException e) {
+ throw new IllegalStateException(e);
+ }
+ final String message = String.format(
+ Locale.ROOT,
+ "repository [%s] on project with path [%s] is not using a secure protocol for artifacts on [%s]",
+ repositoryName,
+ projectPath,
+ url
+ );
+ throw new GradleException(message);
+ }
+ }
+
+}
diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.repositories.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.repositories.properties
new file mode 100644
index 0000000000000..56113f18b6c5d
--- /dev/null
+++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.repositories.properties
@@ -0,0 +1,20 @@
+#
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+implementation-class=org.elasticsearch.gradle.RepositoriesSetupPlugin
diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle
index f380dfabd321f..7561c6601f020 100644
--- a/client/rest-high-level/build.gradle
+++ b/client/rest-high-level/build.gradle
@@ -98,9 +98,6 @@ testClusters.all {
setting 'xpack.security.authc.api_key.enabled', 'true'
setting 'xpack.security.http.ssl.enabled', 'false'
setting 'xpack.security.transport.ssl.enabled', 'false'
- if (BuildParams.isSnapshotBuild() == false) {
- systemProperty 'es.eql_feature_flag_registered', 'true'
- }
setting 'xpack.eql.enabled', 'true'
// Truststore settings are not used since TLS is not enabled. Included for testing the get certificates API
setting 'xpack.security.http.ssl.certificate_authorities', 'testnode.crt'
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
index 4c21067d9519a..44d0cb6994925 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
@@ -54,6 +54,8 @@
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.analytics.InferencePipelineAggregationBuilder;
+import org.elasticsearch.client.analytics.ParsedInference;
import org.elasticsearch.client.analytics.ParsedStringStats;
import org.elasticsearch.client.analytics.ParsedTopMetrics;
import org.elasticsearch.client.analytics.StringStatsAggregationBuilder;
@@ -1957,6 +1959,7 @@ static List getDefaultNamedXContents() {
map.put(CompositeAggregationBuilder.NAME, (p, c) -> ParsedComposite.fromXContent(p, (String) c));
map.put(StringStatsAggregationBuilder.NAME, (p, c) -> ParsedStringStats.PARSER.parse(p, (String) c));
map.put(TopMetricsAggregationBuilder.NAME, (p, c) -> ParsedTopMetrics.PARSER.parse(p, (String) c));
+ map.put(InferencePipelineAggregationBuilder.NAME, (p, c) -> ParsedInference.fromXContent(p, (String ) (c)));
List entries = map.entrySet().stream()
.map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue()))
.collect(Collectors.toList());
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/InferencePipelineAggregationBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/InferencePipelineAggregationBuilder.java
new file mode 100644
index 0000000000000..05a24a08e4c59
--- /dev/null
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/InferencePipelineAggregationBuilder.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.analytics;
+
+import org.elasticsearch.client.ml.inference.trainedmodel.InferenceConfig;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ConstructingObjectParser;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
+import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;
+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Objects;
+import java.util.TreeMap;
+
+import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
+
+/**
+ * For building inference pipeline aggregations
+ *
+ * NOTE: This extends {@linkplain AbstractPipelineAggregationBuilder} for compatibility
+ * with {@link SearchSourceBuilder#aggregation(PipelineAggregationBuilder)} but it
+ * doesn't support any "server" side things like {@linkplain #doWriteTo(StreamOutput)}
+ * or {@linkplain #createInternal(Map)}
+ */
+public class InferencePipelineAggregationBuilder extends AbstractPipelineAggregationBuilder {
+
+ public static String NAME = "inference";
+
+ public static final ParseField MODEL_ID = new ParseField("model_id");
+ private static final ParseField INFERENCE_CONFIG = new ParseField("inference_config");
+
+
+ @SuppressWarnings("unchecked")
+ private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(
+ NAME, false,
+ (args, name) -> new InferencePipelineAggregationBuilder(name, (String)args[0], (Map) args[1])
+ );
+
+ static {
+ PARSER.declareString(constructorArg(), MODEL_ID);
+ PARSER.declareObject(constructorArg(), (p, c) -> p.mapStrings(), BUCKETS_PATH_FIELD);
+ PARSER.declareNamedObject(InferencePipelineAggregationBuilder::setInferenceConfig,
+ (p, c, n) -> p.namedObject(InferenceConfig.class, n, c), INFERENCE_CONFIG);
+ }
+
+ private final Map bucketPathMap;
+ private final String modelId;
+ private InferenceConfig inferenceConfig;
+
+ public static InferencePipelineAggregationBuilder parse(String pipelineAggregatorName,
+ XContentParser parser) {
+ return PARSER.apply(parser, pipelineAggregatorName);
+ }
+
+ public InferencePipelineAggregationBuilder(String name, String modelId, Map bucketsPath) {
+ super(name, NAME, new TreeMap<>(bucketsPath).values().toArray(new String[] {}));
+ this.modelId = modelId;
+ this.bucketPathMap = bucketsPath;
+ }
+
+ public void setInferenceConfig(InferenceConfig inferenceConfig) {
+ this.inferenceConfig = inferenceConfig;
+ }
+
+ @Override
+ protected void validate(ValidationContext context) {
+ // validation occurs on the server
+ }
+
+ @Override
+ protected void doWriteTo(StreamOutput out) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected PipelineAggregator createInternal(Map metaData) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected boolean overrideBucketsPath() {
+ return true;
+ }
+
+ @Override
+ protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(MODEL_ID.getPreferredName(), modelId);
+ builder.field(BUCKETS_PATH_FIELD.getPreferredName(), bucketPathMap);
+ if (inferenceConfig != null) {
+ builder.startObject(INFERENCE_CONFIG.getPreferredName());
+ builder.field(inferenceConfig.getName(), inferenceConfig);
+ builder.endObject();
+ }
+ return builder;
+ }
+
+ @Override
+ public String getWriteableName() {
+ return NAME;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(super.hashCode(), bucketPathMap, modelId, inferenceConfig);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) return true;
+ if (obj == null || getClass() != obj.getClass()) return false;
+ if (super.equals(obj) == false) return false;
+
+ InferencePipelineAggregationBuilder other = (InferencePipelineAggregationBuilder) obj;
+ return Objects.equals(bucketPathMap, other.bucketPathMap)
+ && Objects.equals(modelId, other.modelId)
+ && Objects.equals(inferenceConfig, other.inferenceConfig);
+ }
+}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/ParsedInference.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/ParsedInference.java
new file mode 100644
index 0000000000000..4fe03fb4c5b5a
--- /dev/null
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/ParsedInference.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.analytics;
+
+import org.elasticsearch.client.ml.inference.results.FeatureImportance;
+import org.elasticsearch.client.ml.inference.results.TopClassEntry;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.xcontent.ConstructingObjectParser;
+import org.elasticsearch.common.xcontent.ObjectParser;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParseException;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.aggregations.ParsedAggregation;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * This class parses the superset of all possible fields that may be written by
+ * InferenceResults. The warning field is mutually exclusive with all the other fields.
+ *
+ * In the case of classification results {@link #getValue()} may return a String,
+ * Boolean or a Double. For regression results {@link #getValue()} is always
+ * a Double.
+ */
+import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
+
+public class ParsedInference extends ParsedAggregation {
+
+ @SuppressWarnings("unchecked")
+ private static final ConstructingObjectParser PARSER =
+ new ConstructingObjectParser<>(ParsedInference.class.getSimpleName(), true,
+ args -> new ParsedInference(args[0], (List) args[1],
+ (List) args[2], (String) args[3]));
+
+ public static final ParseField FEATURE_IMPORTANCE = new ParseField("feature_importance");
+ public static final ParseField WARNING = new ParseField("warning");
+ public static final ParseField TOP_CLASSES = new ParseField("top_classes");
+
+ static {
+ PARSER.declareField(optionalConstructorArg(), (p, n) -> {
+ Object o;
+ XContentParser.Token token = p.currentToken();
+ if (token == XContentParser.Token.VALUE_STRING) {
+ o = p.text();
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ o = p.booleanValue();
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ o = p.doubleValue();
+ } else {
+ throw new XContentParseException(p.getTokenLocation(),
+ "[" + ParsedInference.class.getSimpleName() + "] failed to parse field [" + CommonFields.VALUE + "] "
+ + "value [" + token + "] is not a string, boolean or number");
+ }
+ return o;
+ }, CommonFields.VALUE, ObjectParser.ValueType.VALUE);
+ PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> FeatureImportance.fromXContent(p), FEATURE_IMPORTANCE);
+ PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> TopClassEntry.fromXContent(p), TOP_CLASSES);
+ PARSER.declareString(optionalConstructorArg(), WARNING);
+ declareAggregationFields(PARSER);
+ }
+
+ public static ParsedInference fromXContent(XContentParser parser, final String name) {
+ ParsedInference parsed = PARSER.apply(parser, null);
+ parsed.setName(name);
+ return parsed;
+ }
+
+ private final Object value;
+ private final List featureImportance;
+ private final List topClasses;
+ private final String warning;
+
+ ParsedInference(Object value,
+ List featureImportance,
+ List topClasses,
+ String warning) {
+ this.value = value;
+ this.warning = warning;
+ this.featureImportance = featureImportance;
+ this.topClasses = topClasses;
+ }
+
+ public Object getValue() {
+ return value;
+ }
+
+ public List getFeatureImportance() {
+ return featureImportance;
+ }
+
+ public List getTopClasses() {
+ return topClasses;
+ }
+
+ public String getWarning() {
+ return warning;
+ }
+
+ @Override
+ protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+ if (warning != null) {
+ builder.field(WARNING.getPreferredName(), warning);
+ } else {
+ builder.field(CommonFields.VALUE.getPreferredName(), value);
+ if (topClasses != null && topClasses.size() > 0) {
+ builder.field(TOP_CLASSES.getPreferredName(), topClasses);
+ }
+ if (featureImportance != null && featureImportance.size() > 0) {
+ builder.field(FEATURE_IMPORTANCE.getPreferredName(), featureImportance);
+ }
+ }
+ return builder;
+ }
+
+ @Override
+ public String getType() {
+ return InferencePipelineAggregationBuilder.NAME;
+ }
+}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/eql/EqlSearchRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/eql/EqlSearchRequest.java
index 7416803b8345a..10cda684ae2a7 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/eql/EqlSearchRequest.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/eql/EqlSearchRequest.java
@@ -26,7 +26,6 @@
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilder;
-import org.elasticsearch.search.searchafter.SearchAfterBuilder;
import java.io.IOException;
import java.util.Arrays;
@@ -40,12 +39,10 @@ public class EqlSearchRequest implements Validatable, ToXContentObject {
private QueryBuilder filter = null;
private String timestampField = "@timestamp";
private String eventCategoryField = "event.category";
- private String implicitJoinKeyField = "agent.id";
private boolean isCaseSensitive = true;
private int size = 10;
private int fetchSize = 1000;
- private SearchAfterBuilder searchAfterBuilder;
private String query;
private String tiebreakerField;
@@ -58,11 +55,9 @@ public class EqlSearchRequest implements Validatable, ToXContentObject {
static final String KEY_TIMESTAMP_FIELD = "timestamp_field";
static final String KEY_TIEBREAKER_FIELD = "tiebreaker_field";
static final String KEY_EVENT_CATEGORY_FIELD = "event_category_field";
- static final String KEY_IMPLICIT_JOIN_KEY_FIELD = "implicit_join_key_field";
static final String KEY_CASE_SENSITIVE = "case_sensitive";
static final String KEY_SIZE = "size";
static final String KEY_FETCH_SIZE = "fetch_size";
- static final String KEY_SEARCH_AFTER = "search_after";
static final String KEY_QUERY = "query";
static final String KEY_WAIT_FOR_COMPLETION_TIMEOUT = "wait_for_completion_timeout";
static final String KEY_KEEP_ALIVE = "keep_alive";
@@ -84,16 +79,8 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par
builder.field(KEY_TIEBREAKER_FIELD, tiebreakerField());
}
builder.field(KEY_EVENT_CATEGORY_FIELD, eventCategoryField());
- if (implicitJoinKeyField != null) {
- builder.field(KEY_IMPLICIT_JOIN_KEY_FIELD, implicitJoinKeyField());
- }
builder.field(KEY_SIZE, size());
builder.field(KEY_FETCH_SIZE, fetchSize());
-
- if (searchAfterBuilder != null) {
- builder.array(KEY_SEARCH_AFTER, searchAfterBuilder.getSortValues());
- }
-
builder.field(KEY_CASE_SENSITIVE, isCaseSensitive());
builder.field(KEY_QUERY, query);
@@ -156,10 +143,6 @@ public EqlSearchRequest eventCategoryField(String eventCategoryField) {
return this;
}
- public String implicitJoinKeyField() {
- return this.implicitJoinKeyField;
- }
-
public boolean isCaseSensitive() {
return this.isCaseSensitive;
}
@@ -169,12 +152,6 @@ public EqlSearchRequest isCaseSensitive(boolean isCaseSensitive) {
return this;
}
- public EqlSearchRequest implicitJoinKeyField(String implicitJoinKeyField) {
- Objects.requireNonNull(implicitJoinKeyField, "implicit join key must not be null");
- this.implicitJoinKeyField = implicitJoinKeyField;
- return this;
- }
-
public int size() {
return this.size;
}
@@ -199,23 +176,6 @@ public EqlSearchRequest fetchSize(int fetchSize) {
return this;
}
- public Object[] searchAfter() {
- if (searchAfterBuilder == null) {
- return null;
- }
- return searchAfterBuilder.getSortValues();
- }
-
- public EqlSearchRequest searchAfter(Object[] values) {
- this.searchAfterBuilder = new SearchAfterBuilder().setSortValues(values);
- return this;
- }
-
- private EqlSearchRequest setSearchAfter(SearchAfterBuilder builder) {
- this.searchAfterBuilder = builder;
- return this;
- }
-
public String query() {
return this.query;
}
@@ -269,8 +229,6 @@ public boolean equals(Object o) {
Objects.equals(timestampField, that.timestampField) &&
Objects.equals(tiebreakerField, that.tiebreakerField) &&
Objects.equals(eventCategoryField, that.eventCategoryField) &&
- Objects.equals(implicitJoinKeyField, that.implicitJoinKeyField) &&
- Objects.equals(searchAfterBuilder, that.searchAfterBuilder) &&
Objects.equals(query, that.query) &&
Objects.equals(isCaseSensitive, that.isCaseSensitive) &&
Objects.equals(waitForCompletionTimeout, that.waitForCompletionTimeout) &&
@@ -289,8 +247,6 @@ public int hashCode() {
timestampField,
tiebreakerField,
eventCategoryField,
- implicitJoinKeyField,
- searchAfterBuilder,
query,
isCaseSensitive,
waitForCompletionTimeout,
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java
index 7cc2000a44e67..0f79048261ac8 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java
@@ -57,6 +57,7 @@ public static Builder builder() {
static final ParseField CREATE_TIME = new ParseField("create_time");
static final ParseField VERSION = new ParseField("version");
static final ParseField ALLOW_LAZY_START = new ParseField("allow_lazy_start");
+ static final ParseField MAX_NUM_THREADS = new ParseField("max_num_threads");
private static final ObjectParser PARSER = new ObjectParser<>("data_frame_analytics_config", true, Builder::new);
@@ -80,6 +81,7 @@ public static Builder builder() {
ValueType.VALUE);
PARSER.declareString(Builder::setVersion, Version::fromString, VERSION);
PARSER.declareBoolean(Builder::setAllowLazyStart, ALLOW_LAZY_START);
+ PARSER.declareInt(Builder::setMaxNumThreads, MAX_NUM_THREADS);
}
private static DataFrameAnalysis parseAnalysis(XContentParser parser) throws IOException {
@@ -100,11 +102,13 @@ private static DataFrameAnalysis parseAnalysis(XContentParser parser) throws IOE
private final Instant createTime;
private final Version version;
private final Boolean allowLazyStart;
+ private final Integer maxNumThreads;
private DataFrameAnalyticsConfig(@Nullable String id, @Nullable String description, @Nullable DataFrameAnalyticsSource source,
@Nullable DataFrameAnalyticsDest dest, @Nullable DataFrameAnalysis analysis,
@Nullable FetchSourceContext analyzedFields, @Nullable ByteSizeValue modelMemoryLimit,
- @Nullable Instant createTime, @Nullable Version version, @Nullable Boolean allowLazyStart) {
+ @Nullable Instant createTime, @Nullable Version version, @Nullable Boolean allowLazyStart,
+ @Nullable Integer maxNumThreads) {
this.id = id;
this.description = description;
this.source = source;
@@ -115,6 +119,7 @@ private DataFrameAnalyticsConfig(@Nullable String id, @Nullable String descripti
this.createTime = createTime == null ? null : Instant.ofEpochMilli(createTime.toEpochMilli());;
this.version = version;
this.allowLazyStart = allowLazyStart;
+ this.maxNumThreads = maxNumThreads;
}
public String getId() {
@@ -157,6 +162,10 @@ public Boolean getAllowLazyStart() {
return allowLazyStart;
}
+ public Integer getMaxNumThreads() {
+ return maxNumThreads;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@@ -193,6 +202,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
if (allowLazyStart != null) {
builder.field(ALLOW_LAZY_START.getPreferredName(), allowLazyStart);
}
+ if (maxNumThreads != null) {
+ builder.field(MAX_NUM_THREADS.getPreferredName(), maxNumThreads);
+ }
builder.endObject();
return builder;
}
@@ -212,12 +224,14 @@ public boolean equals(Object o) {
&& Objects.equals(modelMemoryLimit, other.modelMemoryLimit)
&& Objects.equals(createTime, other.createTime)
&& Objects.equals(version, other.version)
- && Objects.equals(allowLazyStart, other.allowLazyStart);
+ && Objects.equals(allowLazyStart, other.allowLazyStart)
+ && Objects.equals(maxNumThreads, other.maxNumThreads);
}
@Override
public int hashCode() {
- return Objects.hash(id, description, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime, version, allowLazyStart);
+ return Objects.hash(id, description, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime, version, allowLazyStart,
+ maxNumThreads);
}
@Override
@@ -237,6 +251,7 @@ public static class Builder {
private Instant createTime;
private Version version;
private Boolean allowLazyStart;
+ private Integer maxNumThreads;
private Builder() {}
@@ -290,9 +305,14 @@ public Builder setAllowLazyStart(Boolean allowLazyStart) {
return this;
}
+ public Builder setMaxNumThreads(Integer maxNumThreads) {
+ this.maxNumThreads = maxNumThreads;
+ return this;
+ }
+
public DataFrameAnalyticsConfig build() {
return new DataFrameAnalyticsConfig(id, description, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime,
- version, allowLazyStart);
+ version, allowLazyStart, maxNumThreads);
}
}
}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigUpdate.java
index 1d5ecb6657762..f6bda01bcf3b3 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigUpdate.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigUpdate.java
@@ -51,22 +51,25 @@ public static Builder builder() {
DataFrameAnalyticsConfig.MODEL_MEMORY_LIMIT,
VALUE);
PARSER.declareBoolean(Builder::setAllowLazyStart, DataFrameAnalyticsConfig.ALLOW_LAZY_START);
-
+ PARSER.declareInt(Builder::setMaxNumThreads, DataFrameAnalyticsConfig.MAX_NUM_THREADS);
}
private final String id;
private final String description;
private final ByteSizeValue modelMemoryLimit;
private final Boolean allowLazyStart;
+ private final Integer maxNumThreads;
private DataFrameAnalyticsConfigUpdate(String id,
@Nullable String description,
@Nullable ByteSizeValue modelMemoryLimit,
- @Nullable Boolean allowLazyStart) {
+ @Nullable Boolean allowLazyStart,
+ @Nullable Integer maxNumThreads) {
this.id = id;
this.description = description;
this.modelMemoryLimit = modelMemoryLimit;
this.allowLazyStart = allowLazyStart;
+ this.maxNumThreads = maxNumThreads;
}
public String getId() {
@@ -85,6 +88,10 @@ public Boolean isAllowLazyStart() {
return allowLazyStart;
}
+ public Integer getMaxNumThreads() {
+ return maxNumThreads;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@@ -98,6 +105,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
if (allowLazyStart != null) {
builder.field(DataFrameAnalyticsConfig.ALLOW_LAZY_START.getPreferredName(), allowLazyStart);
}
+ if (maxNumThreads != null) {
+ builder.field(DataFrameAnalyticsConfig.MAX_NUM_THREADS.getPreferredName(), maxNumThreads);
+ }
builder.endObject();
return builder;
}
@@ -117,12 +127,13 @@ public boolean equals(Object other) {
return Objects.equals(this.id, that.id)
&& Objects.equals(this.description, that.description)
&& Objects.equals(this.modelMemoryLimit, that.modelMemoryLimit)
- && Objects.equals(this.allowLazyStart, that.allowLazyStart);
+ && Objects.equals(this.allowLazyStart, that.allowLazyStart)
+ && Objects.equals(this.maxNumThreads, that.maxNumThreads);
}
@Override
public int hashCode() {
- return Objects.hash(id, description, modelMemoryLimit, allowLazyStart);
+ return Objects.hash(id, description, modelMemoryLimit, allowLazyStart, maxNumThreads);
}
public static class Builder {
@@ -131,6 +142,7 @@ public static class Builder {
private String description;
private ByteSizeValue modelMemoryLimit;
private Boolean allowLazyStart;
+ private Integer maxNumThreads;
private Builder() {}
@@ -158,8 +170,13 @@ public Builder setAllowLazyStart(Boolean allowLazyStart) {
return this;
}
+ public Builder setMaxNumThreads(Integer maxNumThreads) {
+ this.maxNumThreads = maxNumThreads;
+ return this;
+ }
+
public DataFrameAnalyticsConfigUpdate build() {
- return new DataFrameAnalyticsConfigUpdate(id, description, modelMemoryLimit, allowLazyStart);
+ return new DataFrameAnalyticsConfigUpdate(id, description, modelMemoryLimit, allowLazyStart, maxNumThreads);
}
}
}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/results/FeatureImportance.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/results/FeatureImportance.java
new file mode 100644
index 0000000000000..d6d0bd4b04f41
--- /dev/null
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/results/FeatureImportance.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.ml.inference.results;
+
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.xcontent.ConstructingObjectParser;
+import org.elasticsearch.common.xcontent.ToXContentObject;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
+import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
+
+public class FeatureImportance implements ToXContentObject {
+
+ public static final String IMPORTANCE = "importance";
+ public static final String FEATURE_NAME = "feature_name";
+ public static final String CLASS_IMPORTANCE = "class_importance";
+
+ @SuppressWarnings("unchecked")
+ private static final ConstructingObjectParser PARSER =
+ new ConstructingObjectParser<>("feature_importance", true,
+ a -> new FeatureImportance((String) a[0], (Double) a[1], (Map) a[2])
+ );
+
+ static {
+ PARSER.declareString(constructorArg(), new ParseField(FeatureImportance.FEATURE_NAME));
+ PARSER.declareDouble(constructorArg(), new ParseField(FeatureImportance.IMPORTANCE));
+ PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(HashMap::new, XContentParser::doubleValue),
+ new ParseField(FeatureImportance.CLASS_IMPORTANCE));
+ }
+
+ public static FeatureImportance fromXContent(XContentParser parser) {
+ return PARSER.apply(parser, null);
+ }
+
+ private final Map classImportance;
+ private final double importance;
+ private final String featureName;
+
+ public FeatureImportance(String featureName, double importance, Map classImportance) {
+ this.featureName = Objects.requireNonNull(featureName);
+ this.importance = importance;
+ this.classImportance = classImportance == null ? null : Collections.unmodifiableMap(classImportance);
+ }
+
+ public Map getClassImportance() {
+ return classImportance;
+ }
+
+ public double getImportance() {
+ return importance;
+ }
+
+ public String getFeatureName() {
+ return featureName;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(FEATURE_NAME, featureName);
+ builder.field(IMPORTANCE, importance);
+ if (classImportance != null && classImportance.isEmpty() == false) {
+ builder.startObject(CLASS_IMPORTANCE);
+ for (Map.Entry entry : classImportance.entrySet()) {
+ builder.field(entry.getKey(), entry.getValue());
+ }
+ builder.endObject();
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public boolean equals(Object object) {
+ if (object == this) { return true; }
+ if (object == null || getClass() != object.getClass()) { return false; }
+ FeatureImportance that = (FeatureImportance) object;
+ return Objects.equals(featureName, that.featureName)
+ && Objects.equals(importance, that.importance)
+ && Objects.equals(classImportance, that.classImportance);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(featureName, importance, classImportance);
+ }
+}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/results/TopClassEntry.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/results/TopClassEntry.java
new file mode 100644
index 0000000000000..9afd663f6812d
--- /dev/null
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/results/TopClassEntry.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.ml.inference.results;
+
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.xcontent.ConstructingObjectParser;
+import org.elasticsearch.common.xcontent.ObjectParser;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentObject;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParseException;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Objects;
+
+import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
+
+public class TopClassEntry implements ToXContentObject {
+
+ public static final ParseField CLASS_NAME = new ParseField("class_name");
+ public static final ParseField CLASS_PROBABILITY = new ParseField("class_probability");
+ public static final ParseField CLASS_SCORE = new ParseField("class_score");
+
+ public static final String NAME = "top_class";
+
+ private static final ConstructingObjectParser PARSER =
+ new ConstructingObjectParser<>(NAME, true, a -> new TopClassEntry(a[0], (Double) a[1], (Double) a[2]));
+
+ static {
+ PARSER.declareField(constructorArg(), (p, n) -> {
+ Object o;
+ XContentParser.Token token = p.currentToken();
+ if (token == XContentParser.Token.VALUE_STRING) {
+ o = p.text();
+ } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ o = p.booleanValue();
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ o = p.doubleValue();
+ } else {
+ throw new XContentParseException(p.getTokenLocation(),
+ "[" + NAME + "] failed to parse field [" + CLASS_NAME + "] value [" + token
+ + "] is not a string, boolean or number");
+ }
+ return o;
+ }, CLASS_NAME, ObjectParser.ValueType.VALUE);
+ PARSER.declareDouble(constructorArg(), CLASS_PROBABILITY);
+ PARSER.declareDouble(constructorArg(), CLASS_SCORE);
+ }
+
+ public static TopClassEntry fromXContent(XContentParser parser) throws IOException {
+ return PARSER.parse(parser, null);
+ }
+
+ private final Object classification;
+ private final double probability;
+ private final double score;
+
+ public TopClassEntry(Object classification, double probability, double score) {
+ this.classification = Objects.requireNonNull(classification);
+ this.probability = probability;
+ this.score = score;
+ }
+
+ public Object getClassification() {
+ return classification;
+ }
+
+ public double getProbability() {
+ return probability;
+ }
+
+ public double getScore() {
+ return score;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
+ builder.field(CLASS_NAME.getPreferredName(), classification);
+ builder.field(CLASS_PROBABILITY.getPreferredName(), probability);
+ builder.field(CLASS_SCORE.getPreferredName(), score);
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public boolean equals(Object object) {
+ if (object == this) { return true; }
+ if (object == null || getClass() != object.getClass()) { return false; }
+ TopClassEntry that = (TopClassEntry) object;
+ return Objects.equals(classification, that.classification) && probability == that.probability && score == that.score;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(classification, probability, score);
+ }
+}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java
index 9627a98ab6cf3..bd1b6a50cad6e 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java
@@ -352,8 +352,10 @@ public static class IndexPrivilegeName {
public static final String MANAGE_ILM = "manage_ilm";
public static final String CREATE_DOC = "create_doc";
public static final String MAINTENANCE = "maintenance";
+ public static final String AUTO_CONFIGURE = "auto_configure";
public static final String[] ALL_ARRAY = new String[] { NONE, ALL, READ, READ_CROSS, CREATE, INDEX, DELETE, WRITE, MONITOR, MANAGE,
- DELETE_INDEX, CREATE_INDEX, VIEW_INDEX_METADATA, MANAGE_FOLLOW_INDEX, MANAGE_ILM, CREATE_DOC, MAINTENANCE };
+ DELETE_INDEX, CREATE_INDEX, VIEW_INDEX_METADATA, MANAGE_FOLLOW_INDEX, MANAGE_ILM, CREATE_DOC, MAINTENANCE,
+ AUTO_CONFIGURE};
}
}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java
index 9abea8ef3a001..3e0a006598d75 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java
@@ -1308,6 +1308,7 @@ public void testPutDataFrameAnalyticsConfig_GivenOutlierDetectionAnalysis() thro
assertThat(createdConfig.getAnalyzedFields(), equalTo(config.getAnalyzedFields()));
assertThat(createdConfig.getModelMemoryLimit(), equalTo(ByteSizeValue.parseBytesSizeValue("1gb", ""))); // default value
assertThat(createdConfig.getDescription(), equalTo("some description"));
+ assertThat(createdConfig.getMaxNumThreads(), equalTo(1));
}
public void testPutDataFrameAnalyticsConfig_GivenRegression() throws Exception {
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java
index 14ce4dd489abe..dd0b4bf6540f2 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java
@@ -688,6 +688,7 @@ public void testDefaultNamedXContents() {
// Explicitly check for metrics from the analytics module because they aren't in InternalAggregationTestCase
assertTrue(namedXContents.removeIf(e -> e.name.getPreferredName().equals("string_stats")));
assertTrue(namedXContents.removeIf(e -> e.name.getPreferredName().equals("top_metrics")));
+ assertTrue(namedXContents.removeIf(e -> e.name.getPreferredName().equals("inference")));
assertEquals(expectedInternalAggregations + expectedSuggestions, namedXContents.size());
Map, Integer> categories = new HashMap<>();
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/analytics/InferenceAggIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/analytics/InferenceAggIT.java
new file mode 100644
index 0000000000000..fd530a23ec54e
--- /dev/null
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/analytics/InferenceAggIT.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.analytics;
+
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.WriteRequest;
+import org.elasticsearch.client.ESRestHighLevelClientTestCase;
+import org.elasticsearch.client.RequestOptions;
+import org.elasticsearch.client.indices.CreateIndexRequest;
+import org.elasticsearch.client.ml.PutTrainedModelRequest;
+import org.elasticsearch.client.ml.inference.TrainedModelConfig;
+import org.elasticsearch.client.ml.inference.TrainedModelDefinition;
+import org.elasticsearch.client.ml.inference.TrainedModelInput;
+import org.elasticsearch.client.ml.inference.trainedmodel.RegressionConfig;
+import org.elasticsearch.client.ml.inference.trainedmodel.tree.Tree;
+import org.elasticsearch.client.ml.inference.trainedmodel.tree.TreeNode;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.search.aggregations.bucket.terms.ParsedTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
+import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+
+public class InferenceAggIT extends ESRestHighLevelClientTestCase {
+
+ public void testInferenceAgg() throws IOException {
+
+ // create a very simple decision tree with a root node and 2 leaves
+ List featureNames = Collections.singletonList("cost");
+ Tree.Builder builder = Tree.builder();
+ builder.setFeatureNames(featureNames);
+ TreeNode.Builder root = builder.addJunction(0, 0, true, 1.0);
+ int leftChild = root.getLeftChild();
+ int rightChild = root.getRightChild();
+ builder.addLeaf(leftChild, 10.0);
+ builder.addLeaf(rightChild, 20.0);
+
+ final String modelId = "simple_regression";
+ putTrainedModel(modelId, featureNames, builder.build());
+
+ final String index = "inference-test-data";
+ indexData(index);
+
+ TermsAggregationBuilder termsAgg = new TermsAggregationBuilder("fruit_type").field("fruit");
+ AvgAggregationBuilder avgAgg = new AvgAggregationBuilder("avg_cost").field("cost");
+ termsAgg.subAggregation(avgAgg);
+
+ Map bucketPaths = new HashMap<>();
+ bucketPaths.put("cost", "avg_cost");
+ InferencePipelineAggregationBuilder inferenceAgg = new InferencePipelineAggregationBuilder("infer", modelId, bucketPaths);
+ termsAgg.subAggregation(inferenceAgg);
+
+ SearchRequest search = new SearchRequest(index);
+ search.source().aggregation(termsAgg);
+ SearchResponse response = highLevelClient().search(search, RequestOptions.DEFAULT);
+ ParsedTerms terms = response.getAggregations().get("fruit_type");
+ List extends Terms.Bucket> buckets = terms.getBuckets();
+ {
+ assertThat(buckets.get(0).getKey(), equalTo("apple"));
+ ParsedInference inference = buckets.get(0).getAggregations().get("infer");
+ assertThat((Double) inference.getValue(), closeTo(20.0, 0.01));
+ assertNull(inference.getWarning());
+ assertNull(inference.getFeatureImportance());
+ assertNull(inference.getTopClasses());
+ }
+ {
+ assertThat(buckets.get(1).getKey(), equalTo("banana"));
+ ParsedInference inference = buckets.get(1).getAggregations().get("infer");
+ assertThat((Double) inference.getValue(), closeTo(10.0, 0.01));
+ assertNull(inference.getWarning());
+ assertNull(inference.getFeatureImportance());
+ assertNull(inference.getTopClasses());
+ }
+ }
+
+ private void putTrainedModel(String modelId, List inputFields, Tree tree) throws IOException {
+ TrainedModelDefinition definition = new TrainedModelDefinition.Builder().setTrainedModel(tree).build();
+ TrainedModelConfig trainedModelConfig = TrainedModelConfig.builder()
+ .setDefinition(definition)
+ .setModelId(modelId)
+ .setInferenceConfig(new RegressionConfig())
+ .setInput(new TrainedModelInput(inputFields))
+ .setDescription("test model")
+ .build();
+ highLevelClient().machineLearning().putTrainedModel(new PutTrainedModelRequest(trainedModelConfig), RequestOptions.DEFAULT);
+ }
+
+ private void indexData(String index) throws IOException {
+ CreateIndexRequest create = new CreateIndexRequest(index);
+ create.mapping("{\"properties\": {\"fruit\": {\"type\": \"keyword\"}," +
+ "\"cost\": {\"type\": \"double\"}}}", XContentType.JSON);
+ highLevelClient().indices().create(create, RequestOptions.DEFAULT);
+ BulkRequest bulk = new BulkRequest(index).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
+ bulk.add(new IndexRequest().source(XContentType.JSON, "fruit", "apple", "cost", "1.2"));
+ bulk.add(new IndexRequest().source(XContentType.JSON, "fruit", "banana", "cost", "0.8"));
+ bulk.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
+ highLevelClient().bulk(bulk, RequestOptions.DEFAULT);
+ }
+}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java
index b1bcf4ccefe6d..7ccee107985bb 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java
@@ -3040,6 +3040,7 @@ public void testPutDataFrameAnalytics() throws Exception {
.setAnalyzedFields(analyzedFields) // <5>
.setModelMemoryLimit(new ByteSizeValue(5, ByteSizeUnit.MB)) // <6>
.setDescription("this is an example description") // <7>
+ .setMaxNumThreads(1) // <8>
.build();
// end::put-data-frame-analytics-config
@@ -3096,6 +3097,7 @@ public void testUpdateDataFrameAnalytics() throws Exception {
.setId("my-analytics-config") // <1>
.setDescription("new description") // <2>
.setModelMemoryLimit(new ByteSizeValue(128, ByteSizeUnit.MB)) // <3>
+ .setMaxNumThreads(4) // <4>
.build();
// end::update-data-frame-analytics-config-update
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/eql/EqlSearchRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/eql/EqlSearchRequestTests.java
index 48e14ee898b6c..23edf06418c27 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/eql/EqlSearchRequestTests.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/eql/EqlSearchRequestTests.java
@@ -39,9 +39,6 @@ protected EqlSearchRequest createClientTestInstance() {
if (randomBoolean()) {
EqlSearchRequest.fetchSize(randomIntBetween(1, Integer.MAX_VALUE));
}
- if (randomBoolean()) {
- EqlSearchRequest.implicitJoinKeyField(randomAlphaOfLength(10));
- }
if (randomBoolean()) {
EqlSearchRequest.eventCategoryField(randomAlphaOfLength(10));
}
@@ -54,9 +51,6 @@ protected EqlSearchRequest createClientTestInstance() {
if (randomBoolean()) {
EqlSearchRequest.tiebreakerField(randomAlphaOfLength(10));
}
- if (randomBoolean()) {
- EqlSearchRequest.searchAfter(randomArray(1, 4, Object[]::new, () -> randomAlphaOfLength(3)));
- }
if (randomBoolean()) {
if (randomBoolean()) {
EqlSearchRequest.filter(QueryBuilders.matchAllQuery());
@@ -76,12 +70,10 @@ protected org.elasticsearch.xpack.eql.action.EqlSearchRequest doParseToServerIns
protected void assertInstances(org.elasticsearch.xpack.eql.action.EqlSearchRequest serverInstance, EqlSearchRequest
clientTestInstance) {
assertThat(serverInstance.eventCategoryField(), equalTo(clientTestInstance.eventCategoryField()));
- assertThat(serverInstance.implicitJoinKeyField(), equalTo(clientTestInstance.implicitJoinKeyField()));
assertThat(serverInstance.timestampField(), equalTo(clientTestInstance.timestampField()));
assertThat(serverInstance.tiebreakerField(), equalTo(clientTestInstance.tiebreakerField()));
assertThat(serverInstance.filter(), equalTo(clientTestInstance.filter()));
assertThat(serverInstance.query(), equalTo(clientTestInstance.query()));
- assertThat(serverInstance.searchAfter(), equalTo(clientTestInstance.searchAfter()));
assertThat(serverInstance.indicesOptions(), equalTo(clientTestInstance.indicesOptions()));
assertThat(serverInstance.indices(), equalTo(clientTestInstance.indices()));
assertThat(serverInstance.fetchSize(), equalTo(clientTestInstance.fetchSize()));
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java
index 24688a6070915..623e7a98cc888 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java
@@ -69,6 +69,9 @@ public static DataFrameAnalyticsConfig randomDataFrameAnalyticsConfig() {
if (randomBoolean()) {
builder.setAllowLazyStart(randomBoolean());
}
+ if (randomBoolean()) {
+ builder.setMaxNumThreads(randomIntBetween(1, 20));
+ }
return builder.build();
}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigUpdateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigUpdateTests.java
index 086629a323117..0b1bf767a20aa 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigUpdateTests.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigUpdateTests.java
@@ -46,6 +46,9 @@ public static DataFrameAnalyticsConfigUpdate randomDataFrameAnalyticsConfigUpdat
if (randomBoolean()) {
builder.setAllowLazyStart(randomBoolean());
}
+ if (randomBoolean()) {
+ builder.setMaxNumThreads(randomIntBetween(1, 20));
+ }
return builder.build();
}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelInputTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelInputTests.java
index 30b6c46402df4..ca93a456c37e5 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelInputTests.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelInputTests.java
@@ -54,5 +54,4 @@ public static TrainedModelInput createRandomInput() {
protected TrainedModelInput createTestInstance() {
return createRandomInput();
}
-
}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/results/FeatureImportanceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/results/FeatureImportanceTests.java
new file mode 100644
index 0000000000000..9e6c8492e7453
--- /dev/null
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/results/FeatureImportanceTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.ml.inference.results;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.AbstractXContentTestCase;
+
+import java.io.IOException;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+public class FeatureImportanceTests extends AbstractXContentTestCase {
+
+ @Override
+ protected FeatureImportance createTestInstance() {
+ return new FeatureImportance(
+ randomAlphaOfLength(10),
+ randomDoubleBetween(-10.0, 10.0, false),
+ randomBoolean() ? null :
+ Stream.generate(() -> randomAlphaOfLength(10))
+ .limit(randomLongBetween(2, 10))
+ .collect(Collectors.toMap(Function.identity(), (k) -> randomDoubleBetween(-10, 10, false))));
+
+ }
+
+ @Override
+ protected FeatureImportance doParseInstance(XContentParser parser) throws IOException {
+ return FeatureImportance.fromXContent(parser);
+ }
+
+ @Override
+ protected boolean supportsUnknownFields() {
+ return true;
+ }
+
+ @Override
+ protected Predicate getRandomFieldsExcludeFilter() {
+ return field -> field.equals(FeatureImportance.CLASS_IMPORTANCE);
+ }
+}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/results/TopClassEntryTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/results/TopClassEntryTests.java
new file mode 100644
index 0000000000000..672d8a80df010
--- /dev/null
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/results/TopClassEntryTests.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.ml.inference.results;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.AbstractXContentTestCase;
+
+import java.io.IOException;
+
+public class TopClassEntryTests extends AbstractXContentTestCase {
+ @Override
+ protected TopClassEntry createTestInstance() {
+ Object classification;
+ if (randomBoolean()) {
+ classification = randomAlphaOfLength(10);
+ } else if (randomBoolean()) {
+ classification = randomBoolean();
+ } else {
+ classification = randomDouble();
+ }
+ return new TopClassEntry(classification, randomDouble(), randomDouble());
+ }
+
+ @Override
+ protected TopClassEntry doParseInstance(XContentParser parser) throws IOException {
+ return TopClassEntry.fromXContent(parser);
+ }
+
+ @Override
+ protected boolean supportsUnknownFields() {
+ return true;
+ }
+}
diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle
index e687a761a1325..53291ad067e3d 100644
--- a/distribution/archives/build.gradle
+++ b/distribution/archives/build.gradle
@@ -25,7 +25,7 @@ import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.info.BuildParams
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
import org.elasticsearch.gradle.tar.SymbolicLinkPreservingTar
-
+import groovy.io.FileType
import java.nio.file.Files
import java.nio.file.Path
@@ -243,6 +243,13 @@ subprojects {
project.delete(archiveExtractionDir)
archiveExtractionDir.mkdirs()
}
+ // common sanity checks on extracted archive directly as part of checkExtraction
+ doLast {
+ // check no plain class files are packaged
+ archiveExtractionDir.eachFileRecurse (FileType.FILES) { file ->
+ assert file.name.endsWith(".class") == false
+ }
+ }
}
tasks.named('check').configure { dependsOn checkExtraction }
if (project.name.contains('tar')) {
diff --git a/distribution/build.gradle b/distribution/build.gradle
index 74c2ba53c2dfb..a4a2e8993b8d8 100644
--- a/distribution/build.gradle
+++ b/distribution/build.gradle
@@ -259,6 +259,7 @@ copyModule(processSystemdOutputs, project(':modules:systemd'))
configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
apply plugin: 'elasticsearch.jdk-download'
+ apply plugin: 'elasticsearch.repositories'
// Setup all required JDKs
project.jdks {
@@ -278,6 +279,31 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
/*****************************************************************************
* Properties to expand when copying packaging files *
*****************************************************************************/
+ configurations {
+ ['libs', 'libsPluginCli', 'libsKeystoreCli', 'libsSecurityCli'].each {
+ create(it) {
+ canBeConsumed = false
+ canBeResolved = true
+ attributes {
+ attribute(Category.CATEGORY_ATTRIBUTE, objects.named(Category, Category.LIBRARY))
+ attribute(Usage.USAGE_ATTRIBUTE, objects.named(Usage, Usage.JAVA_RUNTIME))
+ attribute(Bundling.BUNDLING_ATTRIBUTE, objects.named(Bundling, Bundling.EXTERNAL))
+ }
+ }
+ }
+ }
+
+ dependencies {
+ libs project(':server')
+ libs project(':libs:elasticsearch-plugin-classloader')
+ libs project(':distribution:tools:java-version-checker')
+ libs project(':distribution:tools:launchers')
+
+ libsPluginCli project(':distribution:tools:plugin-cli')
+ libsKeystoreCli project(path: ':distribution:tools:keystore-cli')
+ libsSecurityCli project(':x-pack:plugin:security:cli')
+ }
+
project.ext {
/*****************************************************************************
@@ -286,22 +312,16 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
libFiles = { oss ->
copySpec {
// delay by using closures, since they have not yet been configured, so no jar task exists yet
- from { project(':server').jar }
- from { project(':server').configurations.runtimeClasspath }
- from { project(':libs:elasticsearch-plugin-classloader').jar }
- from { project(':distribution:tools:java-version-checker').jar }
- from { project(':distribution:tools:launchers').jar }
+ from(configurations.libs)
into('tools/plugin-cli') {
- from { project(':distribution:tools:plugin-cli').jar }
- from { project(':distribution:tools:plugin-cli').configurations.runtimeClasspath }
+ from(configurations.libsPluginCli)
}
into('tools/keystore-cli') {
- from { project(':distribution:tools:keystore-cli').jar }
+ from(configurations.libsKeystoreCli)
}
if (oss == false) {
into('tools/security-cli') {
- from { project(':x-pack:plugin:security:cli').jar }
- from { project(':x-pack:plugin:security:cli').configurations.runtimeClasspath }
+ from(configurations.libsSecurityCli)
}
}
}
@@ -612,14 +632,14 @@ subprojects {
}
['archives:windows-zip', 'archives:oss-windows-zip',
- 'archives:darwin-tar', 'archives:oss-darwin-tar',
- 'archives:linux-aarch64-tar', 'archives:oss-linux-aarch64-tar',
- 'archives:linux-tar', 'archives:oss-linux-tar',
- 'archives:integ-test-zip',
- 'packages:rpm', 'packages:deb',
- 'packages:aarch64-rpm', 'packages:aarch64-deb',
- 'packages:oss-rpm', 'packages:oss-deb',
- 'packages:aarch64-oss-rpm', 'packages:aarch64-oss-deb'
+ 'archives:darwin-tar', 'archives:oss-darwin-tar',
+ 'archives:linux-aarch64-tar', 'archives:oss-linux-aarch64-tar',
+ 'archives:linux-tar', 'archives:oss-linux-tar',
+ 'archives:integ-test-zip',
+ 'packages:rpm', 'packages:deb',
+ 'packages:aarch64-rpm', 'packages:aarch64-deb',
+ 'packages:oss-rpm', 'packages:oss-deb',
+ 'packages:aarch64-oss-rpm', 'packages:aarch64-oss-deb'
].forEach { subName ->
Project subproject = project("${project.path}:${subName}")
Configuration configuration = configurations.create(subproject.name)
diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle
index 08d7e54283a9b..25ca5283917c4 100644
--- a/distribution/packages/build.gradle
+++ b/distribution/packages/build.gradle
@@ -384,7 +384,7 @@ Closure commonRpmConfig(boolean oss, boolean jdk, String architecture) {
prefix '/usr'
packager 'Elasticsearch'
- version = project.version.replace('-', '_')
+ archiveVersion = project.version.replace('-', '_')
release = '1'
os 'LINUX'
distribution 'Elasticsearch'
diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle
index 0263205521081..65d43c45b43e5 100644
--- a/distribution/tools/launchers/build.gradle
+++ b/distribution/tools/launchers/build.gradle
@@ -21,7 +21,7 @@ import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
apply plugin: 'elasticsearch.build'
dependencies {
- api parent.project('java-version-checker')
+ compileOnly project(':distribution:tools:java-version-checker')
testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
testImplementation "junit:junit:${versions.junit}"
testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}"
diff --git a/docs/build.gradle b/docs/build.gradle
index 3f6527469a812..81b698a2a1b14 100644
--- a/docs/build.gradle
+++ b/docs/build.gradle
@@ -54,7 +54,6 @@ testClusters.integTest {
setting 'indices.lifecycle.history_index_enabled', 'false'
if (BuildParams.isSnapshotBuild() == false) {
systemProperty 'es.autoscaling_feature_flag_registered', 'true'
- systemProperty 'es.eql_feature_flag_registered', 'true'
systemProperty 'es.searchable_snapshots_feature_enabled', 'true'
}
setting 'xpack.autoscaling.enabled', 'true'
diff --git a/docs/java-rest/high-level/aggs-builders.asciidoc b/docs/java-rest/high-level/aggs-builders.asciidoc
index 4ac24b7f00d97..718ac5056298d 100644
--- a/docs/java-rest/high-level/aggs-builders.asciidoc
+++ b/docs/java-rest/high-level/aggs-builders.asciidoc
@@ -12,21 +12,21 @@ This page lists all the available aggregations with their corresponding `Aggrega
[options="header"]
|======
| Aggregation | AggregationBuilder Class | Method in AggregationBuilders
-| {ref}/search-aggregations-metrics-avg-aggregation.html[Avg] | {agg-ref}/metrics/avg/AvgAggregationBuilder.html[AvgAggregationBuilder] | {agg-ref}/AggregationBuilders.html#avg-java.lang.String-[AggregationBuilders.avg()]
-| {ref}/search-aggregations-metrics-cardinality-aggregation.html[Cardinality] | {agg-ref}/metrics/cardinality/CardinalityAggregationBuilder.html[CardinalityAggregationBuilder] | {agg-ref}/AggregationBuilders.html#cardinality-java.lang.String-[AggregationBuilders.cardinality()]
-| {ref}/search-aggregations-metrics-extendedstats-aggregation.html[Extended Stats] | {agg-ref}/metrics/stats/extended/ExtendedStatsAggregationBuilder.html[ExtendedStatsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#extendedStats-java.lang.String-[AggregationBuilders.extendedStats()]
-| {ref}/search-aggregations-metrics-geobounds-aggregation.html[Geo Bounds] | {agg-ref}/metrics/geobounds/GeoBoundsAggregationBuilder.html[GeoBoundsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#geoBounds-java.lang.String-[AggregationBuilders.geoBounds()]
-| {ref}/search-aggregations-metrics-geocentroid-aggregation.html[Geo Centroid] | {agg-ref}/metrics/geocentroid/GeoCentroidAggregationBuilder.html[GeoCentroidAggregationBuilder] | {agg-ref}/AggregationBuilders.html#geoCentroid-java.lang.String-[AggregationBuilders.geoCentroid()]
-| {ref}/search-aggregations-metrics-max-aggregation.html[Max] | {agg-ref}/metrics/max/MaxAggregationBuilder.html[MaxAggregationBuilder] | {agg-ref}/AggregationBuilders.html#max-java.lang.String-[AggregationBuilders.max()]
-| {ref}/search-aggregations-metrics-min-aggregation.html[Min] | {agg-ref}/metrics/min/MinAggregationBuilder.html[MinxAggregationBuilder] | {agg-ref}/AggregationBuilders.html#min-java.lang.String-[AggregationBuilders.min()]
-| {ref}/search-aggregations-metrics-percentile-aggregation.html[Percentiles] | {agg-ref}/metrics/percentiles/PercentilesAggregationBuilder.html[PercentilesAggregationBuilder] | {agg-ref}/AggregationBuilders.html#percentiles-java.lang.String-[AggregationBuilders.percentiles()]
-| {ref}/search-aggregations-metrics-percentile-rank-aggregation.html[Percentile Ranks] | {agg-ref}/metrics/percentiles/PercentileRanksAggregationBuilder.html[PercentileRanksAggregationBuilder] | {agg-ref}/AggregationBuilders.html#percentileRanks-java.lang.String-[AggregationBuilders.percentileRanks()]
-| {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Scripted Metric] | {agg-ref}/metrics/scripted/ScriptedMetricAggregationBuilder.html[ScriptedMetricAggregationBuilder] | {agg-ref}/AggregationBuilders.html#scriptedMetric-java.lang.String-[AggregationBuilders.scriptedMetric()]
-| {ref}/search-aggregations-metrics-stats-aggregation.html[Stats] | {agg-ref}/metrics/stats/StatsAggregationBuilder.html[StatsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#stats-java.lang.String-[AggregationBuilders.stats()]
-| {ref}/search-aggregations-metrics-sum-aggregation.html[Sum] | {agg-ref}/metrics/sum/SumAggregationBuilder.html[SumAggregationBuilder] | {agg-ref}/AggregationBuilders.html#sum-java.lang.String-[AggregationBuilders.sum()]
-| {ref}/search-aggregations-metrics-top-hits-aggregation.html[Top hits] | {agg-ref}/metrics/tophits/TopHitsAggregationBuilder.html[TopHitsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#topHits-java.lang.String-[AggregationBuilders.topHits()]
+| {ref}/search-aggregations-metrics-avg-aggregation.html[Avg] | {agg-ref}/metrics/AvgAggregationBuilder.html[AvgAggregationBuilder] | {agg-ref}/AggregationBuilders.html#avg-java.lang.String-[AggregationBuilders.avg()]
+| {ref}/search-aggregations-metrics-cardinality-aggregation.html[Cardinality] | {agg-ref}/metrics/CardinalityAggregationBuilder.html[CardinalityAggregationBuilder] | {agg-ref}/AggregationBuilders.html#cardinality-java.lang.String-[AggregationBuilders.cardinality()]
+| {ref}/search-aggregations-metrics-extendedstats-aggregation.html[Extended Stats] | {agg-ref}/metrics/ExtendedStatsAggregationBuilder.html[ExtendedStatsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#extendedStats-java.lang.String-[AggregationBuilders.extendedStats()]
+| {ref}/search-aggregations-metrics-geobounds-aggregation.html[Geo Bounds] | {agg-ref}/metrics/GeoBoundsAggregationBuilder.html[GeoBoundsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#geoBounds-java.lang.String-[AggregationBuilders.geoBounds()]
+| {ref}/search-aggregations-metrics-geocentroid-aggregation.html[Geo Centroid] | {agg-ref}/metrics/GeoCentroidAggregationBuilder.html[GeoCentroidAggregationBuilder] | {agg-ref}/AggregationBuilders.html#geoCentroid-java.lang.String-[AggregationBuilders.geoCentroid()]
+| {ref}/search-aggregations-metrics-max-aggregation.html[Max] | {agg-ref}/metrics/MaxAggregationBuilder.html[MaxAggregationBuilder] | {agg-ref}/AggregationBuilders.html#max-java.lang.String-[AggregationBuilders.max()]
+| {ref}/search-aggregations-metrics-min-aggregation.html[Min] | {agg-ref}/metrics/MinAggregationBuilder.html[MinAggregationBuilder] | {agg-ref}/AggregationBuilders.html#min-java.lang.String-[AggregationBuilders.min()]
+| {ref}/search-aggregations-metrics-percentile-aggregation.html[Percentiles] | {agg-ref}/metrics/PercentilesAggregationBuilder.html[PercentilesAggregationBuilder] | {agg-ref}/AggregationBuilders.html#percentiles-java.lang.String-[AggregationBuilders.percentiles()]
+| {ref}/search-aggregations-metrics-percentile-rank-aggregation.html[Percentile Ranks] | {agg-ref}/metrics/PercentileRanksAggregationBuilder.html[PercentileRanksAggregationBuilder] | {agg-ref}/AggregationBuilders.html#percentileRanks-java.lang.String-[AggregationBuilders.percentileRanks()]
+| {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Scripted Metric] | {agg-ref}/metrics/ScriptedMetricAggregationBuilder.html[ScriptedMetricAggregationBuilder] | {agg-ref}/AggregationBuilders.html#scriptedMetric-java.lang.String-[AggregationBuilders.scriptedMetric()]
+| {ref}/search-aggregations-metrics-stats-aggregation.html[Stats] | {agg-ref}/metrics/StatsAggregationBuilder.html[StatsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#stats-java.lang.String-[AggregationBuilders.stats()]
+| {ref}/search-aggregations-metrics-sum-aggregation.html[Sum] | {agg-ref}/metrics/SumAggregationBuilder.html[SumAggregationBuilder] | {agg-ref}/AggregationBuilders.html#sum-java.lang.String-[AggregationBuilders.sum()]
+| {ref}/search-aggregations-metrics-top-hits-aggregation.html[Top hits] | {agg-ref}/metrics/TopHitsAggregationBuilder.html[TopHitsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#topHits-java.lang.String-[AggregationBuilders.topHits()]
| {ref}/search-aggregations-metrics-top-metrics.html[Top Metrics] | {javadoc-client}/analytics/TopMetricsAggregationBuilder.html[TopMetricsAggregationBuilder] | None
-| {ref}/search-aggregations-metrics-valuecount-aggregation.html[Value Count] | {agg-ref}/metrics/valuecount/ValueCountAggregationBuilder.html[ValueCountAggregationBuilder] | {agg-ref}/AggregationBuilders.html#count-java.lang.String-[AggregationBuilders.count()]
+| {ref}/search-aggregations-metrics-valuecount-aggregation.html[Value Count] | {agg-ref}/metrics/ValueCountAggregationBuilder.html[ValueCountAggregationBuilder] | {agg-ref}/AggregationBuilders.html#count-java.lang.String-[AggregationBuilders.count()]
| {ref}/search-aggregations-metrics-string-stats-aggregation.html[String Stats] | {javadoc-client}/analytics/StringStatsAggregationBuilder.html[StringStatsAggregationBuilder] | None
|======
@@ -59,20 +59,21 @@ This page lists all the available aggregations with their corresponding `Aggrega
==== Pipeline Aggregations
[options="header"]
|======
-| Pipeline on | PipelineAggregationBuilder Class | Method in PipelineAggregatorBuilders
-| {ref}/search-aggregations-pipeline-avg-bucket-aggregation.html[Avg Bucket] | {agg-ref}/pipeline/bucketmetrics/avg/AvgBucketPipelineAggregationBuilder.html[AvgBucketPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#avgBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.avgBucket()]
-| {ref}/search-aggregations-pipeline-derivative-aggregation.html[Derivative] | {agg-ref}/pipeline/derivative/DerivativePipelineAggregationBuilder.html[DerivativePipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#derivative-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.derivative()]
-| {ref}/search-aggregations-pipeline-max-bucket-aggregation.html[Max Bucket] | {agg-ref}/pipeline/bucketmetrics/max/MaxBucketPipelineAggregationBuilder.html[MaxBucketPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#maxBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.maxBucket()]
-| {ref}/search-aggregations-pipeline-min-bucket-aggregation.html[Min Bucket] | {agg-ref}/pipeline/bucketmetrics/min/MinBucketPipelineAggregationBuilder.html[MinBucketPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#minBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.minBucket()]
-| {ref}/search-aggregations-pipeline-sum-bucket-aggregation.html[Sum Bucket] | {agg-ref}/pipeline/bucketmetrics/sum/SumBucketPipelineAggregationBuilder.html[SumBucketPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#sumBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.sumBucket()]
-| {ref}/search-aggregations-pipeline-stats-bucket-aggregation.html[Stats Bucket] | {agg-ref}/pipeline/bucketmetrics/stats/StatsBucketPipelineAggregationBuilder.html[StatsBucketPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#statsBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.statsBucket()]
-| {ref}/search-aggregations-pipeline-extended-stats-bucket-aggregation.html[Extended Stats Bucket] | {agg-ref}/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketPipelineAggregationBuilder.html[ExtendedStatsBucketPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#extendedStatsBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.extendedStatsBucket()]
-| {ref}/search-aggregations-pipeline-percentiles-bucket-aggregation.html[Percentiles Bucket] | {agg-ref}/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.html[PercentilesBucketPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#percentilesBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.percentilesBucket()]
-| {ref}/search-aggregations-pipeline-movavg-aggregation.html[Moving Average] | {agg-ref}/pipeline/movavg/MovAvgPipelineAggregationBuilder.html[MovAvgPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#movingAvg-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.movingAvg()]
-| {ref}/search-aggregations-pipeline-cumulative-sum-aggregation.html[Cumulative Sum] | {agg-ref}/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.html[CumulativeSumPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#cumulativeSum-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.cumulativeSum()]
-| {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Bucket Script] | {agg-ref}/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.html[BucketScriptPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#bucketScript-java.lang.String-java.util.Map-org.elasticsearch.script.Script-[PipelineAggregatorBuilders.bucketScript()]
-| {ref}/search-aggregations-pipeline-bucket-selector-aggregation.html[Bucket Selector] | {agg-ref}/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.html[BucketSelectorPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#bucketSelector-java.lang.String-java.util.Map-org.elasticsearch.script.Script-[PipelineAggregatorBuilders.bucketSelector()]
-| {ref}/search-aggregations-pipeline-serialdiff-aggregation.html[Serial Differencing] | {agg-ref}/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.html[SerialDiffPipelineAggregationBuilder] | {agg-ref}/pipeline/PipelineAggregatorBuilders.html#diff-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.diff()]
+| Pipeline on | PipelineAggregationBuilder Class | Method in PipelineAggregatorBuilders
+| {ref}/search-aggregations-pipeline-avg-bucket-aggregation.html[Avg Bucket] | {agg-ref}/pipeline/AvgBucketPipelineAggregationBuilder.html[AvgBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#avgBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.avgBucket()]
+| {ref}/search-aggregations-pipeline-derivative-aggregation.html[Derivative] | {agg-ref}/pipeline/DerivativePipelineAggregationBuilder.html[DerivativePipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#derivative-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.derivative()]
+| {ref}/search-aggregations-pipeline-inference-bucket-aggregation.html[Inference] | {javadoc-client}/analytics/InferencePipelineAggregationBuilder.html[InferencePipelineAggregationBuilder] | None
+| {ref}/search-aggregations-pipeline-max-bucket-aggregation.html[Max Bucket] | {agg-ref}/pipeline/MaxBucketPipelineAggregationBuilder.html[MaxBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#maxBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.maxBucket()]
+| {ref}/search-aggregations-pipeline-min-bucket-aggregation.html[Min Bucket] | {agg-ref}/pipeline/MinBucketPipelineAggregationBuilder.html[MinBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#minBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.minBucket()]
+| {ref}/search-aggregations-pipeline-sum-bucket-aggregation.html[Sum Bucket] | {agg-ref}/pipeline/SumBucketPipelineAggregationBuilder.html[SumBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#sumBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.sumBucket()]
+| {ref}/search-aggregations-pipeline-stats-bucket-aggregation.html[Stats Bucket] | {agg-ref}/pipeline/StatsBucketPipelineAggregationBuilder.html[StatsBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#statsBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.statsBucket()]
+| {ref}/search-aggregations-pipeline-extended-stats-bucket-aggregation.html[Extended Stats Bucket] | {agg-ref}/pipeline/ExtendedStatsBucketPipelineAggregationBuilder.html[ExtendedStatsBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#extendedStatsBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.extendedStatsBucket()]
+| {ref}/search-aggregations-pipeline-percentiles-bucket-aggregation.html[Percentiles Bucket] | {agg-ref}/pipeline/PercentilesBucketPipelineAggregationBuilder.html[PercentilesBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#percentilesBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.percentilesBucket()]
+| {ref}/search-aggregations-pipeline-movfn-aggregation.html[Moving Function] | {agg-ref}/pipeline/MovFnPipelineAggregationBuilder.html[MovFnPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#movingFunction-java.lang.String-org.elasticsearch.script.Script-java.lang.String-int-[PipelineAggregatorBuilders.movingFunction()]
+| {ref}/search-aggregations-pipeline-cumulative-sum-aggregation.html[Cumulative Sum] | {agg-ref}/pipeline/CumulativeSumPipelineAggregationBuilder.html[CumulativeSumPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#cumulativeSum-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.cumulativeSum()]
+| {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Bucket Script] | {agg-ref}/pipeline/BucketScriptPipelineAggregationBuilder.html[BucketScriptPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#bucketScript-java.lang.String-java.util.Map-org.elasticsearch.script.Script-[PipelineAggregatorBuilders.bucketScript()]
+| {ref}/search-aggregations-pipeline-bucket-selector-aggregation.html[Bucket Selector] | {agg-ref}/pipeline/BucketSelectorPipelineAggregationBuilder.html[BucketSelectorPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#bucketSelector-java.lang.String-java.util.Map-org.elasticsearch.script.Script-[PipelineAggregatorBuilders.bucketSelector()]
+| {ref}/search-aggregations-pipeline-serialdiff-aggregation.html[Serial Differencing] | {agg-ref}/pipeline/SerialDiffPipelineAggregationBuilder.html[SerialDiffPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#diff-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.diff()]
|======
==== Matrix Aggregations
diff --git a/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc
index dab05b533cf50..8221dff43bbd5 100644
--- a/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc
+++ b/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc
@@ -38,6 +38,7 @@ include-tagged::{doc-tests-file}[{api}-config]
<5> The fields to be included in / excluded from the analysis
<6> The memory limit for the model created as part of the analysis process
<7> Optionally, a human-readable description
+<8> The maximum number of threads to be used by the analysis. Defaults to 1.
[id="{upid}-{api}-query-config"]
diff --git a/docs/java-rest/high-level/ml/update-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/update-data-frame-analytics.asciidoc
index b6df7d25d0453..a110baa49e60e 100644
--- a/docs/java-rest/high-level/ml/update-data-frame-analytics.asciidoc
+++ b/docs/java-rest/high-level/ml/update-data-frame-analytics.asciidoc
@@ -34,6 +34,7 @@ include-tagged::{doc-tests-file}[{api}-config-update]
<1> The {dfanalytics-job} ID
<2> The human-readable description
<3> The memory limit for the model created as part of the analysis process
+<4> The maximum number of threads to be used by the analysis
[id="{upid}-{api}-query-config"]
diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc
index d0b95ae7f497b..b85cf8103858f 100644
--- a/docs/plugins/analysis-icu.asciidoc
+++ b/docs/plugins/analysis-icu.asciidoc
@@ -30,7 +30,7 @@ include::install_remove.asciidoc[]
==== ICU Analyzer
The `icu_analyzer` analyzer performs basic normalization, tokenization and character folding, using the
-`icu_normalizer` char filter, `icu_tokenizer` and `icu_normalizer` token filter
+`icu_normalizer` char filter, `icu_tokenizer` and `icu_folding` token filter
The following parameters are accepted:
diff --git a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc
index 393ac4d634372..0abcf7c19ebff 100644
--- a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc
+++ b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc
@@ -535,10 +535,10 @@ first (ascending order, `asc`) or last (descending order, `desc`).
==== Size
The `size` parameter can be set to define how many composite buckets should be returned.
-Each composite bucket is considered as a single bucket so setting a size of 10 will return the
+Each composite bucket is considered as a single bucket, so setting a size of 10 will return the
first 10 composite buckets created from the values source.
The response contains the values for each composite bucket in an array containing the values extracted
-from each value source.
+from each value source. Defaults to `10`.
==== Pagination
diff --git a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc
index 0c18dd19459ee..850ea8576613d 100644
--- a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc
+++ b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc
@@ -286,3 +286,93 @@ POST /sales/_search?size=0
// TEST[setup:sales]
<1> Documents without a value in the `quantity` field will fall into the same bucket as documents that have the value `0`.
+
+[[search-aggregations-bucket-histogram-aggregation-histogram-fields]]
+==== Histogram fields
+
+Running a histogram aggregation over histogram fields computes the total number of counts for each interval.
+
+For example, executing a histogram aggregation against the following index that stores pre-aggregated histograms
+with latency metrics (in milliseconds) for different networks:
+
+[source,console]
+--------------------------------------------------
+PUT metrics_index/_doc/1
+{
+ "network.name" : "net-1",
+ "latency_histo" : {
+ "values" : [1, 3, 8, 12, 15],
+ "counts" : [3, 7, 23, 12, 6]
+ }
+}
+
+PUT metrics_index/_doc/2
+{
+ "network.name" : "net-2",
+ "latency_histo" : {
+ "values" : [1, 6, 8, 12, 14],
+ "counts" : [8, 17, 8, 7, 6]
+ }
+}
+
+POST /metrics_index/_search?size=0
+{
+ "aggs" : {
+ "latency_buckets" : {
+ "histogram" : {
+ "field" : "latency_histo",
+ "interval" : 5
+ }
+ }
+ }
+}
+--------------------------------------------------
+
+
+The `histogram` aggregation will sum the counts of each interval computed based on the `values` and
+return the following output:
+
+[source,console-result]
+--------------------------------------------------
+{
+ ...
+ "aggregations": {
+ "prices" : {
+ "buckets": [
+ {
+ "key": 0.0,
+ "doc_count": 18
+ },
+ {
+ "key": 5.0,
+ "doc_count": 48
+ },
+ {
+ "key": 10.0,
+ "doc_count": 25
+ },
+ {
+ "key": 15.0,
+ "doc_count": 6
+ }
+ ]
+ }
+ }
+}
+--------------------------------------------------
+// TESTRESPONSE[skip:test not setup]
+
+[IMPORTANT]
+========
+Histogram aggregation is a bucket aggregation, which partitions documents into buckets rather than calculating metrics over fields like
+metrics aggregations do. Each bucket represents a collection of documents which sub-aggregations can run on.
+On the other hand, a histogram field is a pre-aggregated field representing multiple values inside a single field:
+buckets of numerical data and a count of items/documents for each bucket. This mismatch between the histogram aggregations expected input
+(expecting raw documents) and the histogram field (that provides summary information) limits the outcome of the aggregation
+to only the doc counts for each bucket.
+
+
+**Consequently, when executing a histogram aggregation over a histogram field, no sub-aggregations are allowed.**
+========
+
+Also, when running histogram aggregation over histogram field the `missing` parameter is not supported.
diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc
index 4973df5625b51..258d9e6a8c4d7 100644
--- a/docs/reference/aggregations/pipeline.asciidoc
+++ b/docs/reference/aggregations/pipeline.asciidoc
@@ -288,3 +288,4 @@ include::pipeline/normalize-aggregation.asciidoc[]
include::pipeline/serial-diff-aggregation.asciidoc[]
include::pipeline/stats-bucket-aggregation.asciidoc[]
include::pipeline/extended-stats-bucket-aggregation.asciidoc[]
+include::pipeline/inference-bucket-aggregation.asciidoc[]
diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc
index 12b039e7b00d5..35bea691c7e51 100644
--- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc
+++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc
@@ -1,3 +1,4 @@
+[role="xpack"]
[[data-streams-change-mappings-and-settings]]
== Change mappings and settings for a data stream
@@ -579,10 +580,7 @@ contains information about the stream's oldest backing index, `.ds-logs-000001`.
{
"name": "logs",
"timestamp_field": {
- "name": "@timestamp",
- "mapping": {
- "type": "date"
- }
+ "name": "@timestamp"
},
"indices": [
{
diff --git a/docs/reference/data-streams/data-stream-apis.asciidoc b/docs/reference/data-streams/data-stream-apis.asciidoc
index a19a4bc78773c..e5cb6c6953878 100644
--- a/docs/reference/data-streams/data-stream-apis.asciidoc
+++ b/docs/reference/data-streams/data-stream-apis.asciidoc
@@ -1,3 +1,4 @@
+[role="xpack"]
[[data-stream-apis]]
== Data stream APIs
diff --git a/docs/reference/data-streams/data-streams-overview.asciidoc b/docs/reference/data-streams/data-streams-overview.asciidoc
index 4a66946b9ab4d..0ec7a984b7317 100644
--- a/docs/reference/data-streams/data-streams-overview.asciidoc
+++ b/docs/reference/data-streams/data-streams-overview.asciidoc
@@ -1,3 +1,4 @@
+[role="xpack"]
[[data-streams-overview]]
== Data streams overview
++++
@@ -16,9 +17,9 @@ the stream's backing indices. It contains:
* A name or wildcard (`*`) pattern for the data stream.
-* The data stream's _timestamp field_. This field must be mapped as a
- <> or <> field data type and must be
- included in every document indexed to the data stream.
+* A mapping for the data stream's `@timestamp` field. This field must be mapped
+as a <> or <> field data type and must be
+included in every document indexed to the data stream.
* The mappings and settings applied to each backing index when it's created.
diff --git a/docs/reference/data-streams/data-streams.asciidoc b/docs/reference/data-streams/data-streams.asciidoc
index 612c0425ba5cf..d0ed6fd6e924b 100644
--- a/docs/reference/data-streams/data-streams.asciidoc
+++ b/docs/reference/data-streams/data-streams.asciidoc
@@ -1,3 +1,4 @@
+[role="xpack"]
[[data-streams]]
= Data streams
++++
diff --git a/docs/reference/data-streams/set-up-a-data-stream.asciidoc b/docs/reference/data-streams/set-up-a-data-stream.asciidoc
index 3b8fb9802e32b..d07b59d54ceb1 100644
--- a/docs/reference/data-streams/set-up-a-data-stream.asciidoc
+++ b/docs/reference/data-streams/set-up-a-data-stream.asciidoc
@@ -1,3 +1,4 @@
+[role="xpack"]
[[set-up-a-data-stream]]
== Set up a data stream
@@ -8,6 +9,7 @@ To set up a data stream, follow these steps:
. <>.
. <>.
. <> to verify it exists.
+. <>.
After you set up a data stream, you can <> for indexing, searches, and other supported operations.
@@ -20,11 +22,8 @@ and its backing indices.
=== Prerequisites
* {es} data streams are intended for time-series data only. Each document
-indexed to a data stream must contain a shared timestamp field.
-+
-TIP: Data streams work well with most common log formats. While no schema is
-required to use data streams, we recommend the {ecs-ref}[Elastic Common Schema
-(ECS)].
+indexed to a data stream must contain the `@timestamp` field. This field must be
+mapped as a <> or <> field data type.
* Data streams are best suited for time-based,
<> use cases. If you frequently need to
@@ -132,17 +131,17 @@ this pattern.
----
====
-* A `data_stream` definition containing the `timestamp_field` property.
- This timestamp field must be included in every document indexed to the data
- stream.
+* A `data_stream` definition containing `@timestamp` in the `timestamp_field`
+property. The `@timestamp` field must be included in every document indexed to
+the data stream.
* A <> or <> field mapping for the
-timestamp field specified in the `timestamp_field` property.
+`@timestamp` field.
+
-IMPORTANT: Carefully consider the timestamp field's mapping, including
+IMPORTANT: Carefully consider the `@timestamp` field's mapping, including
<> such as <>.
-Once the stream is created, you can only update the timestamp field's mapping by
-reindexing the data stream. See
+Once the stream is created, you can only update the `@timestamp` field's mapping
+by reindexing the data stream. See
<>.
* If you intend to use {ilm-init}, you must specify the
@@ -283,7 +282,7 @@ PUT /_data_stream/logs_alt
You can use the <> to get
information about one or more data streams, including:
-* The timestamp field and its mapping
+* The timestamp field
* The current backing indices, which is returned as an array. The last item in
the array contains information about the stream's current write index.
* The current generation
@@ -325,10 +324,7 @@ contains information about the stream's write index, `.ds-logs-000002`.
{
"name": "logs",
"timestamp_field": {
- "name": "@timestamp",
- "mapping": {
- "type": "date"
- }
+ "name": "@timestamp"
},
"indices": [
{
@@ -356,6 +352,13 @@ contains information about the stream's write index, `.ds-logs-000002`.
contains information about the stream's current write index, `.ds-logs-000002`.
====
+[discrete]
+[[secure-a-data-stream]]
+=== Secure a data stream
+
+You can use {es} {security-features} to control access to a data stream and its
+data. See <>.
+
[discrete]
[[delete-a-data-stream]]
=== Delete a data stream
diff --git a/docs/reference/data-streams/use-a-data-stream.asciidoc b/docs/reference/data-streams/use-a-data-stream.asciidoc
index 79e716863934f..15f18ad07b164 100644
--- a/docs/reference/data-streams/use-a-data-stream.asciidoc
+++ b/docs/reference/data-streams/use-a-data-stream.asciidoc
@@ -1,3 +1,4 @@
+[role="xpack"]
[[use-a-data-stream]]
== Use a data stream
@@ -122,6 +123,64 @@ PUT /logs/_bulk?refresh
====
--
+You can use an <> with these requests to pre-process
+data before it's indexed.
+
+.*Example: Ingest pipeline*
+[%collapsible]
+====
+The following <> request creates the
+`lowercase_message_field` ingest pipeline. The pipeline uses the
+<> to change the `message`
+field value to lowercase before indexing.
+
+[source,console]
+----
+PUT /_ingest/pipeline/lowercase_message_field
+{
+ "description" : "Lowercases the message field value",
+ "processors" : [
+ {
+ "lowercase" : {
+ "field" : "message"
+ }
+ }
+ ]
+}
+----
+// TEST[continued]
+
+The following index API request adds a new document to the `logs` data stream.
+
+The request includes a `?pipeline=lowercase_message_field` query parameter.
+This parameter indicates {es} should use the `lowercase_message_field` pipeline
+to pre-process the document before indexing it.
+
+During pre-processing, the pipeline changes the letter case of the document's
+`message` field value from `LOGIN Successful` to `login successful`.
+
+[source,console]
+----
+POST /logs/_doc?pipeline=lowercase_message_field
+{
+ "@timestamp": "2020-12-08T11:12:01.000Z",
+ "user": {
+ "id": "I1YBEOxJ"
+ },
+ "message": "LOGIN Successful"
+}
+----
+// TEST[continued]
+
+////
+[source,console]
+----
+DELETE /_ingest/pipeline/lowercase_message_field
+----
+// TEST[continued]
+////
+====
+
[discrete]
[[search-a-data-stream]]
=== Search a data stream
diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc
index 352019afe83cc..c50afa8531faf 100644
--- a/docs/reference/docs/bulk.asciidoc
+++ b/docs/reference/docs/bulk.asciidoc
@@ -163,6 +163,9 @@ Each bulk item can include the routing value using the
`routing` field. It automatically follows the behavior of the
index / delete operation based on the `_routing` mapping.
+NOTE: Data streams do not support custom routing. Instead, target the
+appropriate backing index for the stream.
+
[float]
[[bulk-wait-for-active-shards]]
===== Wait For Active Shards
diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc
index cb263fabd3248..7bcd77a27c4d1 100644
--- a/docs/reference/docs/index_.asciidoc
+++ b/docs/reference/docs/index_.asciidoc
@@ -302,6 +302,9 @@ additional document parsing pass. If the `_routing` mapping is defined
and set to be `required`, the index operation will fail if no routing
value is provided or extracted.
+NOTE: Data streams do not support custom routing. Instead, target the
+appropriate backing index for the stream.
+
[float]
[[index-distributed]]
===== Distributed
diff --git a/docs/reference/eql/delete-async-eql-search-api.asciidoc b/docs/reference/eql/delete-async-eql-search-api.asciidoc
index 9b585c28c5515..32bc8207a8ed0 100644
--- a/docs/reference/eql/delete-async-eql-search-api.asciidoc
+++ b/docs/reference/eql/delete-async-eql-search-api.asciidoc
@@ -7,7 +7,7 @@
Delete async EQL search
++++
-dev::[]
+experimental::[]
Deletes an <> or a
<>. The API also
diff --git a/docs/reference/eql/eql-search-api.asciidoc b/docs/reference/eql/eql-search-api.asciidoc
index b9da2d9af0d09..473fbbca2600e 100644
--- a/docs/reference/eql/eql-search-api.asciidoc
+++ b/docs/reference/eql/eql-search-api.asciidoc
@@ -7,11 +7,12 @@
EQL search
++++
-dev::[]
+experimental::[]
Returns search results for an <> query.
-In {es}, EQL assumes each document in an index corresponds to an event.
+In {es}, EQL assumes each document in a data stream or index corresponds to an
+event.
////
[source,console]
@@ -44,9 +45,9 @@ GET /my_index/_eql/search
[[eql-search-api-request]]
==== {api-request-title}
-`GET //_eql/search`
+`GET //_eql/search`
-`POST //_eql/search`
+`POST //_eql/search`
[[eql-search-api-prereqs]]
==== {api-prereq-title}
@@ -61,12 +62,13 @@ See <>.
[[eql-search-api-path-params]]
==== {api-path-parms-title}
-``::
+``::
(Required, string)
-Comma-separated list of index names or <> used to
-limit the request. Accepts wildcard expressions.
+Comma-separated list of data streams, indices, or <> used to limit the request. Accepts wildcard (`*`) expressions.
+
-To search all indices, use `_all` or `*`.
+To search all data streams and indices in a cluster, use
+`_all` or `*`.
[[eql-search-api-query-params]]
==== {api-query-parms-title}
@@ -157,18 +159,30 @@ Field containing the event classification, such as `process`, `file`, or
`network`.
+
Defaults to `event.category`, as defined in the {ecs-ref}/ecs-event.html[Elastic
-Common Schema (ECS)]. If an index does not contain the `event.category` field,
-this value is required.
+Common Schema (ECS)]. If a data stream or index does not contain the
+`event.category` field, this value is required.
+
+`fetch_size`::
+(Optional, integer)
+Maximum number of events to search at a time for sequence queries. Defaults to
+`1000`.
++
+This value must be greater than `2` but cannot exceed the value of the
+<> setting, which defaults to
+`10000`.
++
+Internally, a sequence query fetches and paginates sets of events to search for
+matches. This parameter controls the size of those sets. This parameter does not
+limit the total number of events searched or the number of matching events
+returned.
++
+A greater `fetch_size` value often increases search speed but uses more memory.
`filter`::
(Optional, <>)
Query, written in query DSL, used to filter the events on which the EQL query
runs.
-`implicit_join_key_field`::
-(Optional, string)
-Reserved for future use.
-
`keep_alive`::
+
--
@@ -219,10 +233,6 @@ If both parameters are specified, only the query parameter is used.
IMPORTANT: This parameter supports a subset of EQL syntax. See
<>.
-`search_after`::
-(Optional, string)
-Reserved for future use.
-
`size`::
(Optional, integer or float)
For <>, the maximum number of matching events to
@@ -231,7 +241,10 @@ return.
For <>, the maximum number of matching sequences
to return.
+
-Defaults to `50`. This value must be greater than `0`.
+Defaults to `10`. This value must be greater than `0`.
++
+NOTE: You cannot use <>, such as `head` or `tail`, to exceed
+this value.
[[eql-search-api-tiebreaker-field]]
`tiebreaker_field`::
@@ -254,8 +267,8 @@ field is used to sort the events in ascending, lexicographic order.
Field containing event timestamp.
Defaults to `@timestamp`, as defined in the
-{ecs-ref}/ecs-event.html[Elastic Common Schema (ECS)]. If an index does not
-contain the `@timestamp` field, this value is required.
+{ecs-ref}/ecs-event.html[Elastic Common Schema (ECS)]. If a data stream or index
+does not contain the `@timestamp` field, this value is required.
Events in the API response are sorted by this field's value, converted to
milliseconds since the https://en.wikipedia.org/wiki/Unix_time[Unix epoch], in
diff --git a/docs/reference/eql/functions.asciidoc b/docs/reference/eql/functions.asciidoc
index 32845a3876abb..969ace1cd37d1 100644
--- a/docs/reference/eql/functions.asciidoc
+++ b/docs/reference/eql/functions.asciidoc
@@ -6,7 +6,7 @@
Function reference
++++
-dev::[]
+experimental::[]
{es} supports the following EQL functions:
diff --git a/docs/reference/eql/get-async-eql-search-api.asciidoc b/docs/reference/eql/get-async-eql-search-api.asciidoc
index 721a8788bb7f4..db2f0bf5ee126 100644
--- a/docs/reference/eql/get-async-eql-search-api.asciidoc
+++ b/docs/reference/eql/get-async-eql-search-api.asciidoc
@@ -7,7 +7,7 @@
Get async EQL search
++++
-dev::[]
+experimental::[]
Returns the current status and available results for an <> or a <EQL
++++
-dev::[]
+experimental::[]
{eql-ref}/index.html[Event Query Language (EQL)] is a query language used for
logs and other event-based data.
diff --git a/docs/reference/eql/limitations.asciidoc b/docs/reference/eql/limitations.asciidoc
index dfd0d1ee65b09..872d9cce05bed 100644
--- a/docs/reference/eql/limitations.asciidoc
+++ b/docs/reference/eql/limitations.asciidoc
@@ -6,15 +6,15 @@
Limitations
++++
-dev::[]
+experimental::[]
[discrete]
[[eql-nested-fields]]
=== EQL search on nested fields is not supported
You cannot use EQL to search the values of a <> field or the
-sub-fields of a `nested` field. However, indices containing `nested` field
-mappings are otherwise supported.
+sub-fields of a `nested` field. However, data streams and indices containing
+`nested` field mappings are otherwise supported.
[discrete]
[[eql-unsupported-syntax]]
@@ -41,5 +41,3 @@ queries that contain:
** {eql-ref}/pipes.html#sort[`sort`]
** {eql-ref}/pipes.html#unique[`unique`]
** {eql-ref}/pipes.html#unique-count[`unique_count`]
-
-* The `until` {eql-ref}/sequences.html[sequence keyword]
\ No newline at end of file
diff --git a/docs/reference/eql/pipes.asciidoc b/docs/reference/eql/pipes.asciidoc
index a61ffd3a20a77..9593f0930cf14 100644
--- a/docs/reference/eql/pipes.asciidoc
+++ b/docs/reference/eql/pipes.asciidoc
@@ -6,7 +6,7 @@
Pipe reference
++++
-dev::[]
+experimental::[]
{es} supports the following EQL pipes:
@@ -17,21 +17,21 @@ dev::[]
[[eql-pipe-head]]
=== `head`
-Returns up to a specified number of events, starting with the earliest matching
-events. Works similarly to the
+Returns up to a specified number of events or sequences, starting with the
+earliest matches. Works similarly to the
https://en.wikipedia.org/wiki/Head_(Unix)[Unix head command].
[%collapsible]
====
*Example*
-The following EQL query returns up to fifty of the earliest powershell
+The following EQL query returns up to three of the earliest powershell
commands.
[source,eql]
----
process where process.name == "powershell.exe"
-| head 50
+| head 3
----
*Syntax*
@@ -44,28 +44,28 @@ head
``::
(Required, integer)
-Maximum number of matching events to return.
+Maximum number of matching events or sequences to return.
====
[discrete]
[[eql-pipe-tail]]
=== `tail`
-Returns up to a specified number of events, starting with the most recent
-matching events. Works similarly to the
+Returns up to a specified number of events or sequences, starting with the most
+recent matches. Works similarly to the
https://en.wikipedia.org/wiki/Tail_(Unix)[Unix tail command].
[%collapsible]
====
*Example*
-The following EQL query returns up to thirty of the most recent `svchost.exe`
+The following EQL query returns up to five of the most recent `svchost.exe`
processes.
[source,eql]
----
process where process.name == "svchost.exe"
-| tail 30
+| tail 5
----
*Syntax*
@@ -78,5 +78,5 @@ tail
``::
(Required, integer)
-Maximum number of matching events to return.
-====
\ No newline at end of file
+Maximum number of matching events or sequences to return.
+====
diff --git a/docs/reference/eql/requirements.asciidoc b/docs/reference/eql/requirements.asciidoc
index 3f3e581315806..81ec9cd9fa594 100644
--- a/docs/reference/eql/requirements.asciidoc
+++ b/docs/reference/eql/requirements.asciidoc
@@ -6,7 +6,7 @@
Requirements
++++
-dev::[]
+experimental::[]
EQL is schema-less and works well with most common log formats.
@@ -21,10 +21,11 @@ with core ECS fields by default.
[[eql-required-fields]]
=== Required fields
-In {es}, EQL assumes each document in an index corresponds to an event.
+In {es}, EQL assumes each document in a data stream or index corresponds to an
+event.
-To search an index using EQL, each document in the index must contain the
-following field archetypes:
+To search a data stream or index using EQL, each document in the data stream or
+index must contain the following field archetypes:
Event category::
A field containing the event classification, such as `process`, `file`, or
diff --git a/docs/reference/eql/search.asciidoc b/docs/reference/eql/search.asciidoc
index 8fdf1a8ff5a30..21773c4d76262 100644
--- a/docs/reference/eql/search.asciidoc
+++ b/docs/reference/eql/search.asciidoc
@@ -3,16 +3,17 @@
[[eql-search]]
== Run an EQL search
-dev::[]
+experimental::[]
To start using EQL in {es}, first ensure your event data meets
<>. You can then use the <> to search event data stored in one or more {es} indices.
+search API>> to search event data stored in one or more {es} data streams or
+indices.
.*Example*
[%collapsible]
====
-To get started, ingest or add the data to an {es} index.
+To get started, ingest or add the data to an {es} data stream or index.
The following <> request adds some example log data to the
`sec_logs` index. This log data follows the {ecs-ref}[Elastic Common Schema
@@ -31,6 +32,8 @@ PUT /sec_logs/_bulk?refresh
{ "@timestamp": "2020-12-07T11:07:08.000Z", "agent": { "id": "8a4f500d" }, "event": { "category": "file", "id": "bYA7gPay", "sequence": 4 }, "file": { "accessed": "2020-12-07T11:07:08.000Z", "name": "cmd.exe", "path": "C:\\Windows\\System32\\cmd.exe", "type": "file", "size": 16384 }, "process": { "name": "cmd.exe", "path": "C:\\Windows\\System32\\cmd.exe" } }
{"index":{"_index" : "sec_logs", "_id" : "5"}}
{ "@timestamp": "2020-12-07T11:07:09.000Z", "agent": { "id": "8a4f500d" }, "event": { "category": "process", "id": "aR3NWVOs", "sequence": 5 }, "process": { "name": "regsvr32.exe", "path": "C:\\Windows\\System32\\regsvr32.exe" } }
+{"index":{"_index" : "sec_logs", "_id" : "6"}}
+{ "@timestamp": "2020-12-07T11:07:10.000Z", "agent": { "id": "8a4f500d" }, "event": { "category": "process", "id": "GTSmSqgz0U", "sequence": 6, "type": "termination" }, "process": { "name": "regsvr32.exe", "path": "C:\\Windows\\System32\\regsvr32.exe" } }
----
// TESTSETUP
@@ -99,7 +102,7 @@ https://en.wikipedia.org/wiki/Unix_time[Unix epoch], in ascending order.
"name": "cmd.exe",
"path": "C:\\Windows\\System32\\cmd.exe"
}
- },
+ },
"sort": [
1607252645000
]
@@ -389,6 +392,27 @@ contains the shared `agent.id` value for each matching event.
}
----
// TESTRESPONSE[s/"took": 60/"took": $body.took/]
+
+You can use the <> to specify an expiration
+event for sequences. Matching sequences must end before this event.
+
+The following request adds
+`until [ process where event.type == "termination" ]` to the previous EQL query.
+This ensures matching sequences end before a process termination event.
+
+[source,console]
+----
+GET /sec_logs/_eql/search
+{
+ "query": """
+ sequence by agent.id with maxspan=1h
+ [ file where file.name == "cmd.exe" ]
+ [ process where stringContains(process.name, "regsvr32") ]
+ until [ process where event.type == "termination" ]
+ """
+}
+----
+// TEST[s/search/search\?filter_path\=\-\*\.sequences\.\*events\.\*fields/]
====
[discrete]
@@ -547,7 +571,7 @@ tiebreaker for events with the same timestamp.
}
----
// TESTRESPONSE[s/"took": 34/"took": $body.took/]
-<1> The event's <>, converted to
+<1> The event's <>, converted to
milliseconds since the https://en.wikipedia.org/wiki/Unix_time[Unix
epoch]
<2> The event's `event.id` value.
diff --git a/docs/reference/eql/syntax.asciidoc b/docs/reference/eql/syntax.asciidoc
index b826318062788..1b20f5a3c97a4 100644
--- a/docs/reference/eql/syntax.asciidoc
+++ b/docs/reference/eql/syntax.asciidoc
@@ -6,7 +6,7 @@
Syntax reference
++++
-dev::[]
+experimental::[]
[IMPORTANT]
====
@@ -485,7 +485,7 @@ sequence by user.name
----
====
-You can combine the `sequence by` and `with maxspan` keywords to constrain a
+You can combine the `sequence by` and `with maxspan` keywords to constrain a
sequence by both field values and a timespan.
[source,eql]
@@ -513,6 +513,89 @@ sequence by user.name with maxspan=15m
----
====
+[discrete]
+[[eql-until-keyword]]
+==== `until` keyword
+
+You can use the `until` keyword to specify an expiration event for sequences.
+Matching sequences must end before this event, which is not included the
+results. If this event occurs within a sequence, the sequence is not considered
+a match.
+
+[source,eql]
+----
+sequence
+ [ event_category_1 where condition_1 ]
+ [ event_category_2 where condition_2 ]
+ ...
+until [ event_category_2 where condition_2 ]
+----
+
+.*Example*
+[%collapsible]
+====
+The following EQL sequence query uses the `until` keyword to end sequences
+before a process termination event. Process termination events have an event
+category of `process` and `event.type` value of `termination`.
+
+[source,eql]
+----
+sequence
+ [ file where file.extension == "exe" ]
+ [ process where true ]
+until [ process where event.type == "termination" ]
+----
+====
+
+[TIP]
+====
+The `until` keyword can be helpful when searching for process sequences in
+Windows event logs, such as those ingested using
+{winlogbeat-ref}/index.html[Winlogbeat].
+
+In Windows, a process ID (PID) is unique only while a process is running. After
+a process terminates, its PID can be reused.
+
+You can search for a sequence of events with the same PID value using the `by`
+and `sequence by` keywords.
+
+.*Example*
+[%collapsible]
+=====
+The following EQL query uses the `sequence by` keyword to match a sequence of
+events that share the same `process.pid` value.
+
+[source,eql]
+----
+sequence by process.pid
+ [ process where process.name == "cmd.exe" ]
+ [ process where process.name == "whoami.exe" ]
+----
+=====
+
+However, due to PID reuse, this can result in a matching sequence that
+contains events across unrelated processes. To prevent false positives, you can
+use the `until` keyword to end matching sequences before a process termination
+event.
+
+.*Example*
+[%collapsible]
+=====
+The following EQL query uses the `until` keyword to end sequences before
+`process` events with an `event.type` of `termination`. These events indicate a
+process has been terminated.
+
+[source,eql]
+----
+sequence by process.pid
+ [ process where process.name == "cmd.exe" ]
+ [ process where process.name == "whoami.exe" ]
+until [ process where event.type == "termination" ]
+----
+=====
+
+====
+
[discrete]
[[eql-functions]]
=== Functions
diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc
index 8c172c99202ea..fc78872840046 100644
--- a/docs/reference/glossary.asciidoc
+++ b/docs/reference/glossary.asciidoc
@@ -480,8 +480,9 @@ See the {ref}/indices-shrink-index.html[shrink index API].
[[glossary-snapshot]] snapshot ::
// tag::snapshot-def[]
-A backup taken from a running {es} cluster.
-You can take snapshots of individual indices or of the entire cluster.
+A backup taken from a running {es} cluster.
+A snapshot can include backups of an entire cluster or only data streams and
+indices you specify.
// end::snapshot-def[]
[[glossary-snapshot-lifecycle-policy]] snapshot lifecycle policy ::
diff --git a/docs/reference/how-to/indexing-speed.asciidoc b/docs/reference/how-to/indexing-speed.asciidoc
index 97cd88d06e080..8fac808eb76f7 100644
--- a/docs/reference/how-to/indexing-speed.asciidoc
+++ b/docs/reference/how-to/indexing-speed.asciidoc
@@ -62,7 +62,7 @@ gets indexed and when it becomes visible, increasing the
If you have a large amount of data that you want to load all at once into
Elasticsearch, it may be beneficial to set `index.number_of_replicas` to `0` in
-order to speep up indexing. Having no replicas means that losing a single node
+order to speed up indexing. Having no replicas means that losing a single node
may incur data loss, so it is important that the data lives elsewhere so that
this initial load can be retried in case of an issue. Once the initial load is
finished, you can set `index.number_of_replicas` back to its original value.
diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc
index 1c9e0b112425b..49b9e5c238da9 100644
--- a/docs/reference/ilm/apis/explain.asciidoc
+++ b/docs/reference/ilm/apis/explain.asciidoc
@@ -6,12 +6,14 @@
Explain lifecycle
++++
-Shows an index's current lifecycle status.
+Retrieves the current lifecycle status for one or more indices. For data
+streams, the API retrieves the current lifecycle status for the stream's backing
+indices.
[[ilm-explain-lifecycle-request]]
==== {api-request-title}
-`GET /_ilm/explain`
+`GET /_ilm/explain`
[[ilm-explain-lifecycle-prereqs]]
==== {api-prereq-title}
@@ -31,8 +33,12 @@ about any failures.
[[ilm-explain-lifecycle-path-params]]
==== {api-path-parms-title}
-``::
- (Required, string) Identifier for the index.
+``::
+(Required, string)
+Comma-separated list of data streams, indices, and index aliases to target.
+Wildcard expressions (`*`) are supported.
++
+To target all data streams and indices in a cluster, use `_all` or `*`.
[[ilm-explain-lifecycle-query-params]]
==== {api-query-parms-title}
diff --git a/docs/reference/ilm/ilm-and-snapshots.asciidoc b/docs/reference/ilm/ilm-and-snapshots.asciidoc
index ce19388c519d0..8682391df99f5 100644
--- a/docs/reference/ilm/ilm-and-snapshots.asciidoc
+++ b/docs/reference/ilm/ilm-and-snapshots.asciidoc
@@ -1,9 +1,9 @@
[role="xpack"]
[testenv="basic"]
[[index-lifecycle-and-snapshots]]
-== Restore a managed index
+== Restore a managed data stream or index
-When you restore a snapshot that contains managed indices,
+When you restore a managed index or a data stream with managed backing indices,
{ilm-init} automatically resumes executing the restored indices' policies.
A restored index's `min_age` is relative to when it was originally created or rolled over,
not its restoration time.
@@ -12,8 +12,8 @@ an index has been restored from a snapshot.
If you restore an index that was accidentally deleted half way through its month long lifecycle,
it proceeds normally through the last two weeks of its lifecycle.
-In some cases, you might want to restore a managed index and
-prevent {ilm-init} from immediately executing its policy.
+In some cases, you might want to prevent {ilm-init} from immediately executing
+its policy on a restored index.
For example, if you are restoring an older snapshot you might want to
prevent it from rapidly progressing through all of its lifecycle phases.
You might want to add or update documents before it's marked read-only or shrunk,
diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc
index c71c5ecc4b365..3ee7f13435ceb 100644
--- a/docs/reference/index.asciidoc
+++ b/docs/reference/index.asciidoc
@@ -25,9 +25,7 @@ include::search/index.asciidoc[]
include::query-dsl.asciidoc[]
-ifdef::permanently-unreleased-branch[]
include::eql/index.asciidoc[]
-endif::[]
include::sql/index.asciidoc[]
diff --git a/docs/reference/indices/add-alias.asciidoc b/docs/reference/indices/add-alias.asciidoc
index 231b5712b0e79..75dbd41a45de1 100644
--- a/docs/reference/indices/add-alias.asciidoc
+++ b/docs/reference/indices/add-alias.asciidoc
@@ -37,6 +37,8 @@ to add to the alias.
+
To add all indices in the cluster to the alias,
use a value of `_all`.
++
+NOTE: You cannot add <> to an index alias.
``::
(Required, string)
diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc
index 5d46b249736b5..41d6cf2d01913 100644
--- a/docs/reference/indices/aliases.asciidoc
+++ b/docs/reference/indices/aliases.asciidoc
@@ -75,6 +75,8 @@ used to perform the action.
+
If the `indices` parameter is not specified,
this parameter is required.
++
+NOTE: You cannot add <> to an index alias.
`indices`::
(Array)
@@ -83,6 +85,8 @@ used to perform the action.
+
If the `index` parameter is not specified,
this parameter is required.
++
+NOTE: You cannot add <> to an index alias.
`alias`::
(String)
diff --git a/docs/reference/indices/apis/reload-analyzers.asciidoc b/docs/reference/indices/apis/reload-analyzers.asciidoc
index b9b5bc79732b3..38195f72bc50d 100644
--- a/docs/reference/indices/apis/reload-analyzers.asciidoc
+++ b/docs/reference/indices/apis/reload-analyzers.asciidoc
@@ -7,6 +7,8 @@
++++
Reloads an index's <> and their resources.
+For data streams, the API reloads search analyzers and resources for the
+stream's backing indices.
[source,console]
--------------------------------------------------
@@ -18,9 +20,9 @@ POST /twitter/_reload_search_analyzers
[[indices-reload-analyzers-api-request]]
=== {api-request-title}
-`POST //_reload_search_analyzers`
+`POST //_reload_search_analyzers`
-`GET //_reload_search_analyzers`
+`GET //_reload_search_analyzers`
[discrete]
@@ -63,10 +65,12 @@ in the future.
[[indices-reload-analyzers-api-path-params]]
=== {api-path-parms-title}
-``::
+``::
(Required, string)
-Comma-separated list or wildcard expression of index names
-used to limit the request.
+Comma-separated list of data streams, indices, and index aliases used to limit
+the request. Wildcard expressions (`*`) are supported.
++
+To target all data streams and indices in a cluster, use `_all` or `*`.
[discrete]
diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc
index ba83b8baf341b..af3696e38a0f2 100644
--- a/docs/reference/indices/clearcache.asciidoc
+++ b/docs/reference/indices/clearcache.asciidoc
@@ -4,7 +4,8 @@
Clear cache
++++
-Clears caches for one or more indices.
+Clears the caches of one or more indices. For data streams, the API clears the
+caches of the stream's backing indices.
[source,console]
----
@@ -16,7 +17,7 @@ POST /twitter/_cache/clear
[[clear-cache-api-request]]
==== {api-request-title}
-`POST //_cache/clear`
+`POST //_cache/clear`
`POST /_cache/clear`
@@ -24,7 +25,13 @@ POST /twitter/_cache/clear
[[clear-cache-api-path-params]]
==== {api-path-parms-title}
-include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index]
+``::
+(Optional, string)
+Comma-separated list of data streams, indices, and index aliases used to limit
+the request. Wildcard expressions (`*`) are supported.
++
+To target all data streams and indices in a cluster, omit this parameter or use
+`_all` or `*`.
[[clear-cache-api-query-params]]
@@ -127,7 +134,7 @@ POST /twitter/_cache/clear?fields=foo,bar <1>
[[clear-cache-api-multi-ex]]
-===== Clear caches for several indices
+===== Clear caches for several data streams and indices
[source,console]
----
@@ -137,7 +144,7 @@ POST /kimchy,elasticsearch/_cache/clear
[[clear-cache-api-all-ex]]
-===== Clear caches for all indices
+===== Clear caches for all data streams and indices
[source,console]
----
diff --git a/docs/reference/indices/create-data-stream.asciidoc b/docs/reference/indices/create-data-stream.asciidoc
index 4651c2bc2aef0..d195a514b7b85 100644
--- a/docs/reference/indices/create-data-stream.asciidoc
+++ b/docs/reference/indices/create-data-stream.asciidoc
@@ -1,3 +1,4 @@
+[role="xpack"]
[[indices-create-data-stream]]
=== Create data stream API
++++
diff --git a/docs/reference/indices/delete-data-stream.asciidoc b/docs/reference/indices/delete-data-stream.asciidoc
index ce9cdb7a57746..62a432cfabac3 100644
--- a/docs/reference/indices/delete-data-stream.asciidoc
+++ b/docs/reference/indices/delete-data-stream.asciidoc
@@ -1,3 +1,4 @@
+[role="xpack"]
[[indices-delete-data-stream]]
=== Delete data stream API
++++
diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc
index 585ca772c103a..0f0e9ed2faa0c 100644
--- a/docs/reference/indices/get-data-stream.asciidoc
+++ b/docs/reference/indices/get-data-stream.asciidoc
@@ -1,3 +1,4 @@
+[role="xpack"]
[[indices-get-data-stream]]
=== Get data stream API
++++
@@ -84,9 +85,10 @@ GET /_data_stream/my-data-stream
==== {api-path-parms-title}
``::
-(Required, string)
-Name of the data stream to retrieve.
-Wildcard (`*`) expressions are supported.
+(Optional, string)
+Comma-separated list of data stream names used to limit the request. Wildcard
+(`*`) expressions are supported. If omitted, all data streams will be
+returned.
[role="child_attributes"]
[[get-data-stream-api-response-body]]
@@ -105,19 +107,16 @@ Name of the data stream.
`timestamp_field`::
(object)
-Contains information about the data stream's timestamp field.
+Contains information about the data stream's `@timestamp` field.
+
.Properties of `timestamp_field`
[%collapsible%open]
=====
`name`::
(string)
-Name of the data stream's timestamp field. This field must be included in every
-document indexed to the data stream.
-
-`mapping`::
-(<>)
-Field mapping for the data stream's timestamp field.
+Name of the data stream's timestamp field, which must be `@timestamp`. The
+`@timestamp` field must be included in every document indexed to the data
+stream.
=====
`indices`::
@@ -205,10 +204,7 @@ The API returns the following response:
{
"name": "my-data-stream",
"timestamp_field": {
- "name": "@timestamp",
- "mapping": {
- "type": "date"
- }
+ "name": "@timestamp"
},
"indices": [
{
@@ -228,10 +224,7 @@ The API returns the following response:
{
"name": "my-data-stream_two",
"timestamp_field": {
- "name": "@timestamp",
- "mapping": {
- "type": "date"
- }
+ "name": "@timestamp"
},
"indices": [
{
diff --git a/docs/reference/indices/get-index.asciidoc b/docs/reference/indices/get-index.asciidoc
index 129fa89c842b6..40e99b5c29993 100644
--- a/docs/reference/indices/get-index.asciidoc
+++ b/docs/reference/indices/get-index.asciidoc
@@ -4,7 +4,8 @@
Get index
++++
-Returns information about one or more indexes.
+Returns information about one or more indices. For data streams, the API
+returns information about the stream's backing indices.
[source,console]
--------------------------------------------------
@@ -15,20 +16,19 @@ GET /twitter
[[get-index-api-request]]
==== {api-request-title}
-`GET /`
+`GET /`
[[get-index-api-path-params]]
==== {api-path-parms-title}
-``::
+``::
+(Required, string)
+Comma-separated list of data streams, indices, and index aliases used to limit
+the request. Wildcard expressions (`*`) are supported.
+
---
-(Required, string) Comma-separated list or wildcard expression of index names
-used to limit the request.
-
-Use a value of `_all` to retrieve information for all indices in the cluster.
---
+To target all data streams and indices in a cluster, omit this parameter or use
+`_all` or `*`.
[[get-index-api-query-params]]
diff --git a/docs/reference/indices/get-mapping.asciidoc b/docs/reference/indices/get-mapping.asciidoc
index b602b9fb7691f..a86b1182b9fad 100644
--- a/docs/reference/indices/get-mapping.asciidoc
+++ b/docs/reference/indices/get-mapping.asciidoc
@@ -4,7 +4,8 @@
Get mapping
++++
-Retrieves <> for indices in a cluster.
+Retrieves <> for one or more indices. For data
+streams, the API retrieves mappings for the stream's backing indices.
[source,console]
--------------------------------------------------
@@ -17,13 +18,19 @@ GET /twitter/_mapping
`GET /_mapping`
-`GET //_mapping`
+`GET //_mapping`
[[get-mapping-api-path-params]]
==== {api-path-parms-title}
-include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index]
+``::
+(Optional, string)
+Comma-separated list of data streams, indices, and index aliases used to limit
+the request. Wildcard expressions (`*`) are supported.
++
+To target all data streams and indices in a cluster, omit this parameter or use
+`_all` or `*`.
[[get-mapping-api-query-params]]
@@ -48,12 +55,13 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
==== {api-examples-title}
[[get-mapping-api-multi-ex]]
-===== Multiple indices
+===== Multiple data streams and indices
-The get mapping API can be used to get more than one index with a
+The get mapping API can be used to get more than one data stream or index with a
single call. General usage of the API follows the following syntax:
-`host:port/{index}/_mapping` where `{index}` can accept a comma-separated
-list of names. To get mappings for all indices you can use `_all` for `{index}`.
+`host:port//_mapping` where `` can accept a comma-separated
+list of names. To get mappings for all data streams and indices in a cluster, use `_all` or `*` for ``
+or omit the `` parameter.
The following are some examples:
[source,console]
@@ -63,11 +71,13 @@ GET /twitter,kimchy/_mapping
// TEST[setup:twitter]
// TEST[s/^/PUT kimchy\nPUT book\n/]
-If you want to get mappings of all indices and types then the following
-two examples are equivalent:
+If you want to get mappings of all indices in a cluster, the following
+examples are equivalent:
[source,console]
--------------------------------------------------
+GET /*/_mapping
+
GET /_all/_mapping
GET /_mapping
diff --git a/docs/reference/indices/get-settings.asciidoc b/docs/reference/indices/get-settings.asciidoc
index 19defec6743b6..58dab6e9f01d3 100644
--- a/docs/reference/indices/get-settings.asciidoc
+++ b/docs/reference/indices/get-settings.asciidoc
@@ -4,7 +4,8 @@
Get index settings
++++
-Returns setting information for an index.
+Returns setting information for one or more indices. For data streams, the API
+returns setting information for the stream's backing indices.
[source,console]
--------------------------------------------------
@@ -16,17 +17,21 @@ GET /twitter/_settings
[[get-index-settings-api-request]]
==== {api-request-title}
-`GET //_settings`
+`GET //_settings`
-`GET //_settings/`
+`GET //_settings/`
[[get-index-settings-api-path-params]]
==== {api-path-parms-title}
-include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index]
+``::
+(Optional, string)
+Comma-separated list of data streams, indices, and index aliases used to limit
+the request. Wildcard expressions (`*`) are supported.
+
-Use a value of `_all` to retrieve information for all indices in the cluster.
+To target all data streams and indices in a cluster, omit this parameter or use
+`_all` or `*`.
``::
(Optional, string) Comma-separated list or wildcard expression of setting names
@@ -58,10 +63,10 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=master-timeout]
[[get-index-settings-api-example]]
==== {api-examples-title}
-===== Multiple indices
+===== Multiple data streams and indices
-The get settings API can be used to get settings for more than one index with a
-single call. To get settings for all indices you can use `_all` for ``.
+The get settings API can be used to get settings for more than one data stream or index with a
+single call. To get settings for all indices in a cluster, you can use `_all` or `*` for ``.
Wildcard expressions are also supported. The following are some examples:
[source,console]
diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc
index a16cf6e641919..8c97259e64109 100644
--- a/docs/reference/indices/recovery.asciidoc
+++ b/docs/reference/indices/recovery.asciidoc
@@ -5,7 +5,9 @@
++++
-Returns information about ongoing and completed shard recoveries.
+Returns information about ongoing and completed shard recoveries for one or more
+indices. For data streams, the API returns information for the stream's backing
+indices.
[source,console]
----
@@ -17,7 +19,7 @@ GET /twitter/_recovery
[[index-recovery-api-request]]
==== {api-request-title}
-`GET //_recovery`
+`GET //_recovery`
`GET /_recovery`
@@ -41,9 +43,13 @@ include::{es-repo-dir}/glossary.asciidoc[tag=recovery-triggers]
[[index-recovery-api-path-params]]
==== {api-path-parms-title}
-include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index]
+``::
+(Optional, string)
+Comma-separated list of data streams, indices, and index aliases used to limit
+the request. Wildcard expressions (`*`) are supported.
+
-Use a value of `_all` to retrieve information for all indices in the cluster.
+To target all data streams and indices in a cluster, omit this parameter or use
+`_all` or `*`.
[[index-recovery-api-query-params]]
@@ -165,7 +171,7 @@ Statistics about time to open and start the index.
[[index-recovery-api-multi-ex]]
-===== Get recovery information for several indices
+===== Get recovery information for several data streams and indices
[source,console]
--------------------------------------------------
@@ -175,7 +181,7 @@ GET index1,index2/_recovery?human
[[index-recovery-api-all-ex]]
-===== Get segment information for all indices
+===== Get segment information for all data streams and indices in a cluster
//////////////////////////
Here we create a repository and snapshot index1 in
diff --git a/docs/reference/indices/refresh.asciidoc b/docs/reference/indices/refresh.asciidoc
index 8d0c5655f4441..042f49d2ac0ca 100644
--- a/docs/reference/indices/refresh.asciidoc
+++ b/docs/reference/indices/refresh.asciidoc
@@ -4,7 +4,8 @@
Refresh
++++
-Refreshes one or more indices.
+Refreshes one or more indices. For data streams, the API refreshes the stream's
+backing indices.
[source,console]
----
@@ -16,9 +17,9 @@ POST /twitter/_refresh
[[refresh-api-request]]
==== {api-request-title}
-`POST /_refresh`
+`POST /_refresh`
-`GET /_refresh`
+`GET /_refresh`
`POST /_refresh`
@@ -29,6 +30,7 @@ POST /twitter/_refresh
==== {api-description-title}
Use the refresh API to explicitly refresh one or more indices.
+If the request targets a data stream, it refreshes the stream's backing indices.
A _refresh_ makes all operations performed on an index
since the last refresh
available for search.
@@ -87,7 +89,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailab
[[refresh-api-multiple-ex]]
-===== Refresh several indices
+===== Refresh several data streams and indices
[source,console]
----
@@ -97,7 +99,7 @@ POST /kimchy,elasticsearch/_refresh
[[refresh-api-all-ex]]
-===== Refresh all indices
+===== Refresh all data streams and indices in a cluster
[source,console]
----
diff --git a/docs/reference/indices/segments.asciidoc b/docs/reference/indices/segments.asciidoc
index f8d084ae34f42..d19d683ade050 100644
--- a/docs/reference/indices/segments.asciidoc
+++ b/docs/reference/indices/segments.asciidoc
@@ -5,7 +5,8 @@
++++
Returns low-level information about the https://lucene.apache.org/core/[Lucene]
-segments in index shards.
+segments in index shards. For data streams, the API returns information about
+the stream's backing indices.
[source,console]
----
@@ -17,7 +18,7 @@ GET /twitter/_segments
[[index-segments-api-request]]
==== {api-request-title}
-`GET //_segments`
+`GET //_segments`
`GET /_segments`
@@ -25,7 +26,13 @@ GET /twitter/_segments
[[index-segments-api-path-params]]
==== {api-path-parms-title}
-include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index]
+``::
+(Optional, string)
+Comma-separated list of data streams, indices, and index aliases used to limit
+the request. Wildcard expressions (`*`) are supported.
++
+To target all data streams and indices in a cluster, omit this parameter or use
+`_all` or `*`.
[[index-segments-api-query-params]]
@@ -102,7 +109,7 @@ Contains information about whether high compression was enabled.
==== {api-examples-title}
-===== Get segment information for a specific index
+===== Get segment information for a specific data stream or index
[source,console]
--------------------------------------------------
@@ -111,7 +118,7 @@ GET /test/_segments
// TEST[s/^/PUT test\n{"settings":{"number_of_shards":1, "number_of_replicas": 0}}\nPOST test\/_doc\?refresh\n{"test": "test"}\n/]
-===== Get segment information for several indices
+===== Get segment information for several data streams and indices
[source,console]
--------------------------------------------------
@@ -120,7 +127,7 @@ GET /test1,test2/_segments
// TEST[s/^/PUT test1\nPUT test2\n/]
-===== Get segment information for all indices
+===== Get segment information for all data streams and indices in a cluster
[source,console]
--------------------------------------------------
diff --git a/docs/reference/indices/stats.asciidoc b/docs/reference/indices/stats.asciidoc
index 88adc86e46e69..ce94c6d3cbba7 100644
--- a/docs/reference/indices/stats.asciidoc
+++ b/docs/reference/indices/stats.asciidoc
@@ -4,7 +4,8 @@
Index stats
++++
-Returns statistics for an index.
+Returns statistics for one or more indices. For data streams, the API retrieves
+statistics for the stream's backing indices.
[source,console]
----
@@ -16,9 +17,9 @@ GET /twitter/_stats
[[index-stats-api-request]]
==== {api-request-title}
-`GET //_stats/`
+`GET //_stats/`
-`GET //_stats`
+`GET //_stats`
`GET /_stats`
@@ -26,7 +27,8 @@ GET /twitter/_stats
[[index-stats-api-desc]]
==== {api-description-title}
-Use the index stats API to get high-level aggregation and statistics for an index.
+Use the index stats API to get high-level aggregation and statistics for one or
+more data streams and indices.
By default,
the returned statistics are index-level
@@ -51,10 +53,13 @@ to which the shard contributed.
[[index-stats-api-path-params]]
==== {api-path-parms-title}
-include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index]
+``::
+(Optional, string)
+Comma-separated list of data streams, indices, and index aliases used to limit
+the request. Wildcard expressions (`*`) are supported.
+
-To retrieve statistics for all indices,
-use a value of `_all` or `*` or omit this parameter.
+To target all data streams and indices in a cluster, omit this parameter or use
+`_all` or `*`.
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-metric]
@@ -91,7 +96,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=include-unloaded-segme
[[index-stats-api-multiple-ex]]
-===== Get statistics for multiple indices
+===== Get statistics for multiple data streams and indices
[source,console]
--------------------------------------------------
@@ -101,7 +106,7 @@ GET /index1,index2/_stats
[[index-stats-api-all-ex]]
-===== Get statistics for all indices
+===== Get statistics for all data streams and indices in a cluster
[source,console]
--------------------------------------------------
diff --git a/docs/reference/licensing/get-trial-status.asciidoc b/docs/reference/licensing/get-trial-status.asciidoc
index b411f17b9e3c9..8517c313a92e3 100644
--- a/docs/reference/licensing/get-trial-status.asciidoc
+++ b/docs/reference/licensing/get-trial-status.asciidoc
@@ -6,7 +6,7 @@
Get trial status
++++
-This API enables you to check the status of your trial license.
+Enables you to check the status of your trial.
[float]
==== Request
@@ -16,16 +16,14 @@ This API enables you to check the status of your trial license.
[float]
==== Description
-If you want to try the features that are included in a platinum license, you can
-start a 30-day trial.
+If you want to try all the subscription features, you can start a 30-day trial.
-NOTE: You are allowed to initiate a trial license only if your cluster has not
-already activated a trial license for the current major product version. For
-example, if you have already activated a trial for v6.0, you cannot start a new
-trial until v7.0. You can, however, contact `info@elastic.co` to request an
-extended trial license.
+NOTE: You are allowed to initiate a trial only if your cluster has not
+already activated a trial for the current major product version. For example, if
+you have already activated a trial for v6.0, you cannot start a new trial until
+v7.0. You can, however, request an extended trial at {extendtrial}.
-For more information about the different types of licenses, see
+For more information about features and subscriptions, see
https://www.elastic.co/subscriptions.
==== Authorization
diff --git a/docs/reference/licensing/start-trial.asciidoc b/docs/reference/licensing/start-trial.asciidoc
index 62123d2ab4253..4401feb3062b0 100644
--- a/docs/reference/licensing/start-trial.asciidoc
+++ b/docs/reference/licensing/start-trial.asciidoc
@@ -6,7 +6,7 @@
Start trial
++++
-This API starts a 30-day trial license.
+Starts a 30-day trial.
[float]
==== Request
@@ -16,19 +16,17 @@ This API starts a 30-day trial license.
[float]
==== Description
-The `start trial` API enables you to upgrade from a basic license to a 30-day
-trial license, which gives access to the platinum features.
+The `start trial` API enables you to start a 30-day trial, which gives access to
+all subscription features.
-NOTE: You are allowed to initiate a trial license only if your cluster has not
-already activated a trial license for the current major product version. For
-example, if you have already activated a trial for v6.0, you cannot start a new
-trial until v7.0. You can, however, contact `info@elastic.co` to request an
-extended trial license.
+NOTE: You are allowed to initiate a trial only if your cluster has not already
+activated a trial for the current major product version. For example, if you
+have already activated a trial for v6.0, you cannot start a new trial until v7.0.
+You can, however, request an extended trial at {extendtrial}.
-To check the status of your trial license, use the following API:
-<>.
+To check the status of your trial, use <>.
-For more information about the different types of licenses, see
+For more information about features and subscriptions, see
https://www.elastic.co/subscriptions.
==== Authorization
@@ -40,8 +38,8 @@ For more information, see
[float]
==== Examples
-The following example starts a 30-day trial license. The acknowledge
-parameter is required as you are initiating a license that will expire.
+The following example starts a 30-day trial. The acknowledge parameter is
+required as you are initiating a license that will expire.
[source,console]
------------------------------------------------------------
diff --git a/docs/reference/licensing/update-license.asciidoc b/docs/reference/licensing/update-license.asciidoc
index ec5d2b930656e..bd20f09aa17a4 100644
--- a/docs/reference/licensing/update-license.asciidoc
+++ b/docs/reference/licensing/update-license.asciidoc
@@ -21,7 +21,7 @@ Updates the license for your {es} cluster.
If {es} {security-features} are enabled, you need `manage` cluster privileges to
install the license.
-If {es} {security-features} are enabled and you are installing a gold or platinum
+If {es} {security-features} are enabled and you are installing a gold or higher
license, you must enable TLS on the transport networking layer before you
install the license. See <>.
diff --git a/docs/reference/mapping/fields/routing-field.asciidoc b/docs/reference/mapping/fields/routing-field.asciidoc
index 46a204ccddfde..c0a4c0b29a654 100644
--- a/docs/reference/mapping/fields/routing-field.asciidoc
+++ b/docs/reference/mapping/fields/routing-field.asciidoc
@@ -43,6 +43,9 @@ GET my_index/_search
<1> Querying on the `_routing` field (also see the <>)
+NOTE: Data streams do not support custom routing. Instead, target the
+appropriate backing index for the stream.
+
==== Searching with custom routing
Custom routing can reduce the impact of searches. Instead of having to fan
diff --git a/docs/reference/mapping/types/histogram.asciidoc b/docs/reference/mapping/types/histogram.asciidoc
index b1c98834d12ca..4e4b923d5a79a 100644
--- a/docs/reference/mapping/types/histogram.asciidoc
+++ b/docs/reference/mapping/types/histogram.asciidoc
@@ -41,6 +41,7 @@ following aggregations and queries:
* <> aggregation
* <> aggregation
* <> aggregation
+* <> aggregation
* <> query
[[mapping-types-histogram-building-histogram]]
diff --git a/docs/reference/migration/migrate_8_0/security.asciidoc b/docs/reference/migration/migrate_8_0/security.asciidoc
index 5d09c5b9568b8..2686793b0ec85 100644
--- a/docs/reference/migration/migrate_8_0/security.asciidoc
+++ b/docs/reference/migration/migrate_8_0/security.asciidoc
@@ -219,7 +219,7 @@ on startup.
[[builtin-users-changes]]
==== Changes to built-in users
-.The `kibana` user has been renamed `kibana_system`.
+.The `kibana` user has been replaced by `kibana_system`.
[%collapsible]
====
*Details* +
@@ -243,6 +243,9 @@ then you should update to use the new `kibana_system` user instead:
--------------------------------------------------
elasticsearch.username: kibana_system
--------------------------------------------------
+
+IMPORTANT: The new `kibana_system` user does not preserve the previous `kibana`
+user password. You must explicitly set a password for the `kibana_system` user.
====
[discrete]
diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc
index 1658af07d6d0b..c574260faa910 100644
--- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc
+++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc
@@ -326,6 +326,14 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=description-dfa]
`dest`::
(Required, object)
include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=dest]
+
+`max_num_threads`::
+(Optional, integer)
+The maximum number of threads to be used by the analysis.
+The default value is `1`. Using more threads may decrease the time
+necessary to complete the analysis at the cost of using more CPU.
+Note that the process may use additional threads for operational
+functionality other than the analysis itself.
`model_memory_limit`::
(Optional, string)
@@ -507,7 +515,8 @@ The API returns the following result:
"model_memory_limit": "1gb",
"create_time" : 1562265491319,
"version" : "8.0.0",
- "allow_lazy_start" : false
+ "allow_lazy_start" : false,
+ "max_num_threads": 1
}
----
// TESTRESPONSE[s/1562265491319/$body.$_path/]
diff --git a/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc
index 0c3a36e95e416..3cb11d7b32b06 100644
--- a/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc
+++ b/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc
@@ -71,6 +71,14 @@ the `starting` state until sufficient {ml} node capacity is available.
(Optional, string)
include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=description-dfa]
+`max_num_threads`::
+(Optional, integer)
+The maximum number of threads to be used by the analysis.
+The default value is `1`. Using more threads may decrease the time
+necessary to complete the analysis at the cost of using more CPU.
+Note that the process may use additional threads for operational
+functionality other than the analysis itself.
+
`model_memory_limit`::
(Optional, string)
The approximate maximum amount of memory resources that are permitted for
diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc
index ea405262e7542..c3d7041f2355e 100644
--- a/docs/reference/modules/node.asciidoc
+++ b/docs/reference/modules/node.asciidoc
@@ -17,6 +17,7 @@ requests to the appropriate node.
By default, a node is all of the following types: master-eligible, data, ingest,
and (if available) machine learning. All data nodes are also transform nodes.
// end::modules-node-description-tag[]
+
TIP: As the cluster grows and in particular if you have large {ml} jobs or
{ctransforms}, consider separating dedicated master-eligible nodes from
dedicated data nodes, {ml} nodes, and {transform} nodes.
diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc
index ecd8d3dbca0c1..cde3e82e90fde 100644
--- a/docs/reference/query-dsl/match-query.asciidoc
+++ b/docs/reference/query-dsl/match-query.asciidoc
@@ -78,7 +78,7 @@ expand. Defaults to `50`.
(Optional, integer) Number of beginning characters left unchanged for fuzzy
matching. Defaults to `0`.
-`transpositions`::
+`fuzzy_transpositions`::
(Optional, boolean) If `true`, edits for fuzzy matching include
transpositions of two adjacent characters (ab → ba). Defaults to `true`.
diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc
index 61abcdb7e3c9a..5365352f0d16e 100644
--- a/docs/reference/rest-api/info.asciidoc
+++ b/docs/reference/rest-api/info.asciidoc
@@ -142,6 +142,10 @@ Example response:
"watcher" : {
"available" : true,
"enabled" : true
+ },
+ "data_streams" : {
+ "available" : true,
+ "enabled" : true,
}
},
"tagline" : "You know, for X"
diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc
index 0c5677861984d..6bd467273265f 100644
--- a/docs/reference/rest-api/usage.asciidoc
+++ b/docs/reference/rest-api/usage.asciidoc
@@ -278,6 +278,12 @@ GET /_xpack/usage
"string_stats_usage" : 0,
"moving_percentiles_usage" : 0
}
+ },
+ "data_streams" : {
+ "available" : true,
+ "enabled" : true,
+ "data_streams" : 0,
+ "indices_count" : 0
}
}
------------------------------------------------------------
diff --git a/docs/reference/rollup/apis/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc
index 9fb770d33c657..a79048f7d3616 100644
--- a/docs/reference/rollup/apis/rollup-index-caps.asciidoc
+++ b/docs/reference/rollup/apis/rollup-index-caps.asciidoc
@@ -14,7 +14,7 @@ experimental[]
[[rollup-get-rollup-index-caps-request]]
==== {api-request-title}
-`GET /_rollup/data`
+`GET /_rollup/data`
[[rollup-get-rollup-index-caps-prereqs]]
==== {api-prereq-title}
@@ -38,9 +38,9 @@ and what aggregations can be performed on each job?
[[rollup-get-rollup-index-caps-path-params]]
==== {api-path-parms-title}
-``::
- (Required, string) Index or index-pattern of concrete rollup indices to check
- for capabilities.
+``::
+(Required, string) Data stream or index to check for rollup capabilities.
+Wildcard (`*`) expressions are supported.
[[rollup-get-rollup-index-caps-example]]
==== {api-examples-title}
diff --git a/docs/reference/rollup/apis/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc
index 3bb4d1ac2faa9..ab496a8f1116f 100644
--- a/docs/reference/rollup/apis/rollup-search.asciidoc
+++ b/docs/reference/rollup/apis/rollup-search.asciidoc
@@ -6,14 +6,14 @@
Rollup search
++++
-Enables searching rolled-up data using the standard query DSL.
+Enables searching rolled-up data using the standard query DSL.
experimental[]
[[rollup-search-request]]
==== {api-request-title}
-`GET /_rollup_search`
+`GET /_rollup_search`
[[rollup-search-desc]]
==== {api-description-title}
@@ -27,20 +27,28 @@ expect given the original query.
[[rollup-search-path-params]]
==== {api-path-parms-title}
-``::
- (Required, string) Index, indices or index-pattern to execute a rollup search
- against. This can include both rollup and non-rollup indices.
+``::
++
+--
+(Required, string)
+Comma-separated list of data streams and indices used to limit
+the request. Wildcard expressions (`*`) are supported.
-Rules for the `index` parameter:
+This target can include both rollup and non-rollup indices.
-- At least one index/index-pattern must be specified. This can be either a
-rollup or non-rollup index. Omitting the index parameter, or using `_all`, is
-not permitted.
-- Multiple non-rollup indices may be specified
+Rules for the `` parameter:
+
+- At least one data stream, index, or wildcard expression must be specified.
+This target can include a rollup or non-rollup index. For data streams, the
+stream's backing indices can only serve as non-rollup indices. Omitting the
+`` parameter or using `_all` is not permitted.
+- Multiple non-rollup indices may be specified.
- Only one rollup index may be specified. If more than one are supplied, an
exception occurs.
-- Index patterns may be used, but if they match more than one rollup index an
-exception occurs.
+- Wildcard expressions may be used, but, if they match more than one rollup index, an
+exception occurs. However, you can use an expression to match multiple non-rollup
+indices or data streams.
+--
[[rollup-search-request-body]]
==== {api-request-body-title}
diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc
index 4ece1b36d0362..944ec64bfab86 100644
--- a/docs/reference/search.asciidoc
+++ b/docs/reference/search.asciidoc
@@ -168,16 +168,12 @@ include::search/suggesters.asciidoc[]
include::search/multi-search.asciidoc[]
-ifdef::permanently-unreleased-branch[]
-
include::eql/eql-search-api.asciidoc[]
include::eql/get-async-eql-search-api.asciidoc[]
include::eql/delete-async-eql-search-api.asciidoc[]
-endif::[]
-
include::search/count.asciidoc[]
include::search/validate.asciidoc[]
diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc
index 7248ec50fe828..1c42a454bd0a1 100644
--- a/docs/reference/search/field-caps.asciidoc
+++ b/docs/reference/search/field-caps.asciidoc
@@ -2,6 +2,8 @@
=== Field Capabilities API
Allows you to retrieve the capabilities of fields among multiple indices.
+For data streams, the API returns field capabilities among the stream's backing
+indices.
[source,console]
--------------------------------------------------
@@ -16,9 +18,9 @@ GET /_field_caps?fields=rating
`POST /_field_caps`
-`GET //_field_caps`
+`GET //_field_caps`
-`POST //_field_caps`
+`POST //_field_caps`
[[search-field-caps-api-desc]]
@@ -32,7 +34,13 @@ fields among multiple indices.
[[search-field-caps-api-path-params]]
==== {api-path-parms-title}
-include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index]
+``::
+(Optional, string)
+Comma-separated list of data streams, indices, and index aliases used to limit
+the request. Wildcard expressions (`*`) are supported.
++
+To target all data streams and indices in a cluster, omit this parameter or use
+`_all` or `*`.
[[search-field-caps-api-query-params]]
@@ -104,7 +112,7 @@ field types are all described as the `keyword` family type.
==== {api-examples-title}
-The request can be restricted to specific indices:
+The request can be restricted to specific data streams and indices:
[source,console]
--------------------------------------------------
diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc
index 3e61f43b9b5fe..9231e1e854a17 100644
--- a/docs/reference/search/validate.asciidoc
+++ b/docs/reference/search/validate.asciidoc
@@ -13,7 +13,7 @@ GET twitter/_validate/query?q=user:foo
[[search-validate-api-request]]
==== {api-request-title}
-`GET //_validate/`
+`GET //_validate/`
[[search-validate-api-desc]]
@@ -27,7 +27,13 @@ request body.
[[search-validate-api-path-params]]
==== {api-path-parms-title}
-include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index]
+``::
+(Optional, string)
+Comma-separated list of data streams, indices, and index aliases to search.
+Wildcard (`*`) expressions are supported.
++
+To search all data streams or indices in a cluster, omit this parameter or use
+`_all` or `*`.
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=query]
diff --git a/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc b/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc
index 5ca9cc5a4e085..9081eac4c5523 100644
--- a/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc
+++ b/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc
@@ -15,7 +15,7 @@ Clear the cache of searchable snapshots.
`POST /_searchable_snapshots/cache/clear`
-`POST //_searchable_snapshots/cache/clear`
+`POST //_searchable_snapshots/cache/clear`
[[searchable-snapshots-api-clear-cache-prereqs]]
==== {api-prereq-title}
@@ -32,9 +32,9 @@ For more information, see <>.
[[searchable-snapshots-api-clear-cache-path-params]]
==== {api-path-parms-title}
-``::
+``::
(Optional, string)
-A comma-separated list of index names for which the
+A comma-separated list of data streams and indices for which the
searchable snapshots cache must be cleared.
diff --git a/docs/reference/searchable-snapshots/apis/get-stats.asciidoc b/docs/reference/searchable-snapshots/apis/get-stats.asciidoc
index 4a8914553a55f..c3b17a5ca1e48 100644
--- a/docs/reference/searchable-snapshots/apis/get-stats.asciidoc
+++ b/docs/reference/searchable-snapshots/apis/get-stats.asciidoc
@@ -15,7 +15,7 @@ Retrieve various statistics about searchable snapshots.
`GET /_searchable_snapshots/stats`
-`GET //_searchable_snapshots/stats`
+`GET //_searchable_snapshots/stats`
[[searchable-snapshots-api-stats-prereqs]]
==== {api-prereq-title}
@@ -32,9 +32,9 @@ For more information, see <>.
[[searchable-snapshots-api-stats-path-params]]
==== {api-path-parms-title}
-``::
+``::
(Optional, string)
-A comma-separated list of index names for which the
+A comma-separated list of data streams and indices for which the
statistics must be retrieved.
diff --git a/docs/reference/setup/restart-cluster.asciidoc b/docs/reference/setup/restart-cluster.asciidoc
index c5dad99e6411c..87734a86fd462 100644
--- a/docs/reference/setup/restart-cluster.asciidoc
+++ b/docs/reference/setup/restart-cluster.asciidoc
@@ -36,8 +36,7 @@ POST /_flush
. *Temporarily stop the tasks associated with active {ml} jobs and {dfeeds}.* (Optional)
+
--
-{ml-cap} features require a platinum license or higher. For more information about Elastic
-license levels, see https://www.elastic.co/subscriptions[the subscription page].
+{ml-cap} features require specific {subscriptions}[subscriptions].
You have two options to handle {ml} jobs and {dfeeds} when you shut down a
cluster:
diff --git a/docs/reference/slm/apis/index.asciidoc b/docs/reference/slm/apis/index.asciidoc
index 22920205edf1f..f3359ffaad19a 100644
--- a/docs/reference/slm/apis/index.asciidoc
+++ b/docs/reference/slm/apis/index.asciidoc
@@ -4,12 +4,12 @@
== Manage the snapshot lifecycle
You can set up snapshot lifecycle policies to automate the timing, frequency, and retention of snapshots.
-Snapshot policies can apply to multiple indices.
+Snapshot policies can apply to multiple data streams and indices.
The snapshot lifecycle management (SLM) <> provide
the building blocks for the snapshot policy features that are part of the Management application in {kib}.
-The Snapshot and Restore UI makes it easy to set up policies, register snapshot repositories,
-view and manage snapshots, and restore indices.
+The Snapshot and Restore UI makes it easy to set up policies, register snapshot repositories,
+view and manage snapshots, and restore data streams or indices.
You can stop and restart SLM to temporarily pause automatic backups while performing
upgrades or other maintenance.
diff --git a/docs/reference/slm/apis/slm-put.asciidoc b/docs/reference/slm/apis/slm-put.asciidoc
index b993a022eb38f..6c531c7da1e64 100644
--- a/docs/reference/slm/apis/slm-put.asciidoc
+++ b/docs/reference/slm/apis/slm-put.asciidoc
@@ -56,7 +56,7 @@ Configuration for each snapshot created by the policy.
====
`ignore_unavailable`::
(Optional, boolean)
-If `true`, missing indices do *not* cause snapshot creation to fail and return
+If `true`, missing data streams or indices do *not* cause snapshot creation to fail and return
an error. Defaults to `false`.
`include_global_state`::
@@ -65,14 +65,15 @@ If `true`, cluster states are included in snapshots. Defaults to `false`.
`indices`::
(Optional, array of strings)
-Array of index names or wildcard pattern of index names included in snapshots. It
-supports <> expressions.
+Array of data streams and indices to include in snapshots.
+<> and wildcard (`*`) expressions are
+supported.
====
`name`::
(Required, string)
-Name automatically assigned to each snapshot created by the policy. This value
-supports the same <> supported in index names.
+Name automatically assigned to each snapshot created by the policy.
+<> is supported.
To prevent conflicting snapshot names, a UUID is automatically appended to each
snapshot name.
@@ -141,7 +142,7 @@ PUT /_slm/policy/daily-snapshots
<2> The name each snapshot should be given
<3> Which repository to take the snapshot in
<4> Any extra snapshot configuration
-<5> Which indices the snapshot should contain
+<5> Data streams and indices the snapshot should contain
<6> Optional retention configuration
<7> Keep snapshots for 30 days
<8> Always keep at least 5 successful snapshots, even if they're more than 30 days old
diff --git a/docs/reference/slm/apis/slm-stop.asciidoc b/docs/reference/slm/apis/slm-stop.asciidoc
index a311d5359288e..c902f3b8123be 100644
--- a/docs/reference/slm/apis/slm-stop.asciidoc
+++ b/docs/reference/slm/apis/slm-stop.asciidoc
@@ -27,7 +27,7 @@ cluster privilege to use this API. For more information, see
Halts all {slm} ({slm-init}) operations and stops the {slm-init} plugin.
This is useful when you are performing maintenance on a cluster and need to
-prevent {slm-init} from performing any actions on your indices.
+prevent {slm-init} from performing any actions on your data streams or indices.
Stopping {slm-init} does not stop any snapshots that are in progress.
You can manually trigger snapshots with the <> even if {slm-init} is stopped.
diff --git a/docs/reference/slm/getting-started-slm.asciidoc b/docs/reference/slm/getting-started-slm.asciidoc
index 8f70946e39f2c..3c8e8cd6d2496 100644
--- a/docs/reference/slm/getting-started-slm.asciidoc
+++ b/docs/reference/slm/getting-started-slm.asciidoc
@@ -3,8 +3,8 @@
[[getting-started-snapshot-lifecycle-management]]
=== Tutorial: Automate backups with {slm-init}
-This tutorial demonstrates how to automate daily backups of {es} indices using an {slm-init} policy.
-The policy takes <> of all indices in the cluster
+This tutorial demonstrates how to automate daily backups of {es} data streams and indices using an {slm-init} policy.
+The policy takes <> of all data streams and indices in the cluster
and stores them in a local repository.
It also defines a retention policy and automatically deletes snapshots
when they are no longer needed.
@@ -47,7 +47,7 @@ PUT /_snapshot/my_repository
Once you have a repository in place,
you can define an {slm-init} policy to take snapshots automatically.
-The policy defines when to take snapshots, which indices should be included,
+The policy defines when to take snapshots, which data streams or indices should be included,
and what to name the snapshots.
A policy can also specify a <> and
automatically delete snapshots when they are no longer needed.
@@ -58,7 +58,7 @@ Snapshots are incremental and make efficient use of storage.
You can define and manage policies through {kib} Management or with the put policy API.
For example, you could define a `nightly-snapshots` policy
-to back up all of your indices daily at 2:30AM UTC.
+to back up all of your data streams and indices daily at 2:30AM UTC.
A put policy request defines the policy configuration in JSON:
@@ -86,13 +86,13 @@ PUT /_slm/policy/nightly-snapshots
<> to include the current date in the snapshot name
<3> Where to store the snapshot
<4> The configuration to be used for the snapshot requests (see below)
-<5> Which indices to include in the snapshot: all indices
+<5> Which data streams or indices to include in the snapshot: all data streams and indices
<6> Optional retention policy: keep snapshots for 30 days,
retaining at least 5 and no more than 50 snapshots regardless of age
You can specify additional snapshot configuration options to customize how snapshots are taken.
For example, you could configure the policy to fail the snapshot
-if one of the specified indices is missing.
+if one of the specified data streams or indices is missing.
For more information about snapshot options, see <>.
[discrete]
diff --git a/docs/reference/slm/index.asciidoc b/docs/reference/slm/index.asciidoc
index 34594910d99b7..a90489ff7589d 100644
--- a/docs/reference/slm/index.asciidoc
+++ b/docs/reference/slm/index.asciidoc
@@ -4,12 +4,12 @@
== {slm-init}: Manage the snapshot lifecycle
You can set up snapshot lifecycle policies to automate the timing, frequency, and retention of snapshots.
-Snapshot policies can apply to multiple indices.
+Snapshot policies can apply to multiple data streams and indices.
The {slm} ({slm-init}) <> provide
the building blocks for the snapshot policy features that are part of {kib} Management.
{kibana-ref}/snapshot-repositories.html[Snapshot and Restore] makes it easy to
-set up policies, register snapshot repositories, view and manage snapshots, and restore indices.
+set up policies, register snapshot repositories, view and manage snapshots, and restore data streams or indices.
You can stop and restart {slm-init} to temporarily pause automatic backups while performing
upgrades or other maintenance.
diff --git a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc
index 4bc544c6c2ac0..e6e05f83bf3be 100644
--- a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc
+++ b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc
@@ -45,11 +45,11 @@ cluster, as well as the cluster state. You can change this behavior by
specifying a list of data streams and indices to back up in the body of the
snapshot request.
-NOTE: You must register a snapshot before performing snapshot and restore operations. Use the <> to register new repositories and update existing ones.
+NOTE: You must register a snapshot repository before performing snapshot and restore operations. Use the <> to register new repositories and update existing ones.
The snapshot process is incremental. When creating a snapshot, {es} analyzes the list of files that are already stored in the repository and copies only files that were created or changed since the last snapshot. This process allows multiple snapshots to be preserved in the repository in a compact form.
-The snapshot process is executed in non-blocking fashion, so all indexing and searching operations can run concurrently against the data stream or index that {es} is snapshotting. Only one snapshot process can run in the cluster at any time.
+The snapshot process is executed in non-blocking fashion, so all indexing and searching operations can run concurrently against the data stream or index that {es} is snapshotting.
A snapshot represents a point-in-time view of the moment when the snapshot was created. No records that were added to a data stream or index after the snapshot process started will be present in the snapshot.
@@ -124,9 +124,6 @@ If `true`, allows taking a partial snapshot of indices with unavailable shards.
If `true`, the request returns a response when the snapshot is complete.
If `false`, the request returns a response when the snapshot initializes.
Defaults to `false`.
-+
-NOTE: During snapshot initialization, information about all
-previous snapshots is loaded into memory. In large repositories, this load time can cause requests to take several seconds (or even minutes) to return a response, even if the `wait_for_completion` parameter is `false`.
[[create-snapshot-api-example]]
==== {api-examples-title}
diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc
new file mode 100644
index 0000000000000..5ff01f6a201fe
--- /dev/null
+++ b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc
@@ -0,0 +1,240 @@
+[[get-snapshot-api]]
+=== Get snapshot API
+++++
+Get snapshot
+++++
+
+Retrieves information about one or more snapshots.
+
+////
+[source,console]
+----
+PUT /_snapshot/my_repository
+{
+ "type": "fs",
+ "settings": {
+ "location": "my_backup_location"
+ }
+}
+
+PUT /_snapshot/my_repository/my_snapshot?wait_for_completion=true
+
+PUT /_snapshot/my_repository/snapshot_2?wait_for_completion=true
+----
+// TESTSETUP
+////
+
+[source,console]
+----
+GET /_snapshot/my_repository/my_snapshot
+----
+
+[[get-snapshot-api-request]]
+==== {api-request-title}
+
+`GET /_snapshot//`
+
+[[get-snapshot-api-desc]]
+==== {api-description-title}
+
+Use the get snapshot API to return information about one or more snapshots, including:
+
+* Start and end time values
+* Version of {es} that created the snapshot
+* List of included indices
+* Current state of the snapshot
+* List of failures that occurred during the snapshot
+
+[[get-snapshot-api-path-params]]
+==== {api-path-parms-title}
+
+``::
+(Required, string)
+Comma-separated list of snapshot repository names used to limit the request.
+Wildcard (`*`) expressions are supported.
++
+To get information about all snapshot repositories registered in the
+cluster, omit this parameter or use `*` or `_all`.
+
+``::
+(Required, string)
+Comma-separated list of snapshot names to retrieve. Also accepts wildcards (`*`).
++
+* To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`.
+* To get information about any snapshots that are currently running, use `_current`.
++
+NOTE: Using `_all` in a request fails if any snapshots are unavailable.
+Set <> to `true` to return only available snapshots.
+
+[role="child_attributes"]
+[[get-snapshot-api-request-body]]
+==== {api-request-body-title}
+
+[[get-snapshot-api-ignore-unavailable]]
+`ignore_unavailable`::
+(Optional, boolean)
+If `false`, the request returns an error for any snapshots that are unavailable. Defaults to `false`.
++
+If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned.
+
+`verbose`::
+(Optional, boolean)
+If `true`, returns all available information about a snapshot. Defaults to `true`.
++
+If `false`, omits additional information about the snapshot, such as version information, start and end times, and the number of snapshotted shards.
+
+[role="child_attributes"]
+[[get-snapshot-api-response-body]]
+==== {api-response-body-title}
+
+`snapshot`::
+(string)
+Name of the snapshot.
+
+`uuid`::
+(string)
+Universally unique identifier (UUID) of the snapshot.
+
+`version_id`::
+(int)
+Build ID of the {es} version used to create the snapshot.
+
+`version`::
+(float)
+{es} version used to create the snapshot.
+
+`indices`::
+(array)
+List of indices included in the snapshot.
+
+`data_streams`::
+(array)
+List of <> included in the snapshot.
+
+`include_global_state`::
+(boolean)
+Indicates whether the current cluster state is included in the snapshot.
+
+`start_time`::
+(string)
+Date timestamp of when the snapshot creation process started.
+
+`start_time_in_millis`::
+(long)
+The time, in milliseconds, when the snapshot creation process started.
+
+`end_time`::
+(string)
+Date timestamp of when the snapshot creation process ended.
+
+`end_time_in_millis`::
+(long)
+The time, in milliseconds, when the snapshot creation process ended.
+
+`duration_in_millis`::
+(long)
+How long, in milliseconds, it took to create the snapshot.
+
+[[get-snapshot-api-response-failures]]
+`failures`::
+(array)
+Lists any failures that occurred when creating the snapshot.
+
+`shards`::
+(object)
+Contains a count of shards in the snapshot.
++
+.Properties of `shards`
+[%collapsible%open]
+====
+`total`::
+(integer)
+Total number of shards included in the snapshot.
+
+`successful`::
+(integer)
+Number of shards that were successfully included in the snapshot.
+
+`failed`::
+(integer)
+Number of shards that failed to be included in the snapshot.
+====
+
+`state`::
++
+--
+(string)
+The snapshot `state` can be one of the following values:
+
+.Values for `state`
+[%collapsible%open]
+====
+`IN_PROGRESS`::
+ The snapshot is currently running.
+
+`SUCCESS`::
+ The snapshot finished and all shards were stored successfully.
+
+`FAILED`::
+ The snapshot finished with an error and failed to store any data.
+
+`PARTIAL`::
+ The global cluster state was stored, but data of at least one shard was not stored successfully.
+ The <> section of the response contains more detailed information about shards
+ that were not processed correctly.
+====
+--
+
+[[get-snapshot-api-example]]
+==== {api-examples-title}
+
+The following request returns information for `snapshot_2` in the `my_repository` repository.
+
+[source,console]
+----
+GET /_snapshot/my_repository/snapshot_2
+----
+
+The API returns the following response:
+
+[source,console-result]
+----
+{
+ "responses": [
+ {
+ "repository": "my_repository",
+ "snapshots": [
+ {
+ "snapshot": "snapshot_2",
+ "uuid": "vdRctLCxSketdKb54xw67g",
+ "version_id": ,
+ "version": ,
+ "indices": [],
+ "data_streams": [],
+ "include_global_state": true,
+ "state": "SUCCESS",
+ "start_time": "2020-07-06T21:55:18.129Z",
+ "start_time_in_millis": 1593093628850,
+ "end_time": "2020-07-06T21:55:18.129Z",
+ "end_time_in_millis": 1593094752018,
+ "duration_in_millis": 0,
+ "failures": [],
+ "shards": {
+ "total": 0,
+ "failed": 0,
+ "successful": 0
+ }
+ }
+ ]
+ }
+ ]
+}
+----
+// TESTRESPONSE[s/"uuid": "vdRctLCxSketdKb54xw67g"/"uuid": $body.responses.0.snapshots.0.uuid/]
+// TESTRESPONSE[s/"version_id": /"version_id": $body.responses.0.snapshots.0.version_id/]
+// TESTRESPONSE[s/"version": /"version": $body.responses.0.snapshots.0.version/]
+// TESTRESPONSE[s/"start_time": "2020-07-06T21:55:18.129Z"/"start_time": $body.responses.0.snapshots.0.start_time/]
+// TESTRESPONSE[s/"start_time_in_millis": 1593093628850/"start_time_in_millis": $body.responses.0.snapshots.0.start_time_in_millis/]
+// TESTRESPONSE[s/"end_time": "2020-07-06T21:55:18.129Z"/"end_time": $body.responses.0.snapshots.0.end_time/]
+// TESTRESPONSE[s/"end_time_in_millis": 1593094752018/"end_time_in_millis": $body.responses.0.snapshots.0.end_time_in_millis/]
+// TESTRESPONSE[s/"duration_in_millis": 0/"duration_in_millis": $body.responses.0.snapshots.0.duration_in_millis/]
diff --git a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc
index 6310cb3917797..f30f04d754310 100644
--- a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc
+++ b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc
@@ -25,6 +25,7 @@ content may not be included yet.
[[snapshot-management-apis]]
=== Snapshot management APIs
* <>
+* <>
* <>
include::put-repo-api.asciidoc[]
@@ -33,4 +34,5 @@ include::get-repo-api.asciidoc[]
include::delete-repo-api.asciidoc[]
include::clean-up-repo-api.asciidoc[]
include::create-snapshot-api.asciidoc[]
+include::get-snapshot-api.asciidoc[]
include::delete-snapshot-api.asciidoc[]
diff --git a/docs/reference/snapshot-restore/index.asciidoc b/docs/reference/snapshot-restore/index.asciidoc
index ae0547db64095..16397a7d3f8b2 100644
--- a/docs/reference/snapshot-restore/index.asciidoc
+++ b/docs/reference/snapshot-restore/index.asciidoc
@@ -5,25 +5,28 @@
--
// tag::snapshot-intro[]
-A _snapshot_ is a backup taken from a running {es} cluster.
-You can take snapshots of individual indices or of the entire cluster.
-Snapshots can be stored in either local or remote repositories.
-Remote repositories can reside on S3, HDFS, Azure, Google Cloud Storage,
+A _snapshot_ is a backup taken from a running {es} cluster.
+You can take snapshots of an entire cluster, including all its data streams and
+indices. You can also take snapshots of only specific data streams or indices in
+the cluster.
+
+Snapshots can be stored in either local or remote repositories.
+Remote repositories can reside on S3, HDFS, Azure, Google Cloud Storage,
and other platforms supported by a repository plugin.
-Snapshots are incremental: each snapshot of an index only stores data that
-is not part of an earlier snapshot.
+Snapshots are incremental: each snapshot only stores data that
+is not part of an earlier snapshot.
This enables you to take frequent snapshots with minimal overhead.
-// end::snapshot-intro[]
+// end::snapshot-intro[]
// tag::restore-intro[]
-You can restore snapshots to a running cluster with the <>.
-By default, all indices in the snapshot are restored.
-Alternatively, you can restore specific indices or restore the cluster state from a snapshot.
-When restoring indices, you can modify the index name and selected index settings.
+You can restore snapshots to a running cluster with the <>.
+By default, all data streams and indices in the snapshot are restored.
+However, you can choose to restore only the cluster state or specific data
+streams or indices from a snapshot.
// end::restore-intro[]
-You must <>
+You must <>
before you can <>.
You can use <>
@@ -50,7 +53,7 @@ compatibility. Follow the <>
when migrating between versions.
A snapshot contains a copy of the on-disk data structures that make up an
-index. This means that snapshots can only be restored to versions of
+index or a data stream's backing indices. This means that snapshots can only be restored to versions of
{es} that can read the indices:
* A snapshot of an index created in 6.x can be restored to 7.x.
@@ -67,20 +70,21 @@ We do not recommend restoring snapshots from later {es} versions in earlier
versions. In some cases, the snapshots cannot be restored. For example, a
snapshot taken in 7.6.0 cannot be restored to 7.5.0.
-Each snapshot can contain indices created in various versions of {es},
-and when restoring a snapshot it must be possible to restore all of the indices
-into the target cluster. If any indices in a snapshot were created in an
-incompatible version, you will not be able restore the snapshot.
+Each snapshot can contain indices created in various versions of {es}. This
+includes backing indices created for data streams. When restoring a snapshot, it
+must be possible to restore all of these indices into the target cluster. If any
+indices in a snapshot were created in an incompatible version, you will not be
+able restore the snapshot.
IMPORTANT: When backing up your data prior to an upgrade, keep in mind that you
won't be able to restore snapshots after you upgrade if they contain indices
created in a version that's incompatible with the upgrade version.
-If you end up in a situation where you need to restore a snapshot of an index
+If you end up in a situation where you need to restore a snapshot of a data stream or index
that is incompatible with the version of the cluster you are currently running,
you can restore it on the latest compatible version and use
-<> to rebuild the index on the current
-version. Reindexing from remote is only possible if the original index has
+<> to rebuild the data stream or index on the current
+version. Reindexing from remote is only possible if the original data stream or index has
source enabled. Retrieving and reindexing the data can take significantly
longer than simply restoring a snapshot. If you have a large amount of data, we
recommend testing the reindex from remote process with a subset of your data to
diff --git a/docs/reference/snapshot-restore/monitor-snapshot-restore.asciidoc b/docs/reference/snapshot-restore/monitor-snapshot-restore.asciidoc
index 0f2c8619b06ac..77cf59b1afc41 100644
--- a/docs/reference/snapshot-restore/monitor-snapshot-restore.asciidoc
+++ b/docs/reference/snapshot-restore/monitor-snapshot-restore.asciidoc
@@ -76,15 +76,15 @@ DELETE /_snapshot/my_backup/snapshot_1
// TEST[continued]
The restore operation uses the standard shard recovery mechanism. Therefore, any currently running restore operation can
-be canceled by deleting indices that are being restored. Please note that data for all deleted indices will be removed
+be canceled by deleting data streams and indices that are being restored. Please note that data for all deleted data streams and indices will be removed
from the cluster as a result of this operation.
[float]
=== Effect of cluster blocks on snapshot and restore
Many snapshot and restore operations are affected by cluster and index blocks. For example, registering and unregistering
-repositories require write global metadata access. The snapshot operation requires that all indices and their metadata as
-well as the global metadata were readable. The restore operation requires the global metadata to be writable, however
+repositories require write global metadata access. The snapshot operation requires that all indices, backing indices, and their metadata as
+well as the global metadata be readable. The restore operation requires the global metadata to be writable, however
the index level blocks are ignored during restore because indices are essentially recreated during restore.
Please note that a repository content is not part of the cluster and therefore cluster blocks don't affect internal
repository operations such as listing or deleting snapshots from an already registered repository.
diff --git a/docs/reference/snapshot-restore/restore-snapshot.asciidoc b/docs/reference/snapshot-restore/restore-snapshot.asciidoc
index 1b0ec4585c73d..3ef4beee96116 100644
--- a/docs/reference/snapshot-restore/restore-snapshot.asciidoc
+++ b/docs/reference/snapshot-restore/restore-snapshot.asciidoc
@@ -1,9 +1,5 @@
[[snapshots-restore-snapshot]]
-== Restore indices from a snapshot
-
-++++
-Restore a snapshot
-++++
+== Restore a snapshot
////
[source,console]
@@ -29,15 +25,48 @@ A snapshot can be restored using the following command:
POST /_snapshot/my_backup/snapshot_1/_restore
-----------------------------------
-By default, all indices in the snapshot are restored, and the cluster state is
-*not* restored. It's possible to select indices that should be restored as well
+By default, all data streams and indices in the snapshot are restored, but the cluster state is
+*not* restored. It's possible to select specific data streams or indices that should be restored as well
as to allow the global cluster state from being restored by using `indices` and
-`include_global_state` options in the restore request body. The list of indices
-supports <>. The `rename_pattern`
-and `rename_replacement` options can be also used to rename indices on restore
+`include_global_state` options in the restore request body. The list
+supports <>.
+
+[WARNING]
+====
+Each data stream requires a matching
+<>. The stream uses this
+template to create new backing indices.
+
+When restoring a data stream, ensure a matching template exists for the stream.
+You can do this using one of the following methods:
+
+* Check for existing templates that match the stream. If no matching template
+ exists, <>.
+
+* Restore a global cluster state that includes a matching template for the
+ stream.
+
+If no index template matches a data stream, the stream cannot
+<> or create new backing indices.
+====
+
+The `rename_pattern`
+and `rename_replacement` options can be also used to rename data streams and indices on restore
using regular expression that supports referencing the original text as
explained
http://docs.oracle.com/javase/6/docs/api/java/util/regex/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[here].
+
+If you rename a restored data stream, its backing indices are also
+renamed. For example, if you rename the `logs` data stream to `restored-logs`,
+the backing index `.ds-logs-000005` is renamed to `.ds-restored-logs-000005`.
+
+[WARNING]
+====
+If you rename a restored stream, ensure an index template matches the new stream
+name. If no index template matches the stream, it cannot
+<> or create new backing indices.
+====
+
Set `include_aliases` to `false` to prevent aliases from being restored together
with associated indices.
@@ -45,7 +74,7 @@ with associated indices.
-----------------------------------
POST /_snapshot/my_backup/snapshot_1/_restore
{
- "indices": "index_1,index_2",
+ "indices": "data_stream_1,index_1,index_2",
"ignore_unavailable": true,
"include_global_state": false, <1>
"rename_pattern": "index_(.+)",
@@ -69,10 +98,22 @@ has the same number of shards as the index in the snapshot. The restore
operation automatically opens restored indices if they were closed and creates
new indices if they didn't exist in the cluster.
+If a data stream is restored, its backing indices are also restored. The restore
+operation automatically opens restored backing indices if they were closed.
+
+NOTE: You cannot restore a data stream if a stream with the same name already
+exists.
+
+In addition to entire data streams, you can restore only specific backing
+indices from a snapshot. However, restored backing indices are not automatically
+added to any existing data streams. For example, if only the `.ds-logs-000003`
+backing index is restored from a snapshot, it is not automatically added to the
+existing `logs` data stream.
+
[float]
=== Partial restore
-By default, the entire restore operation will fail if one or more indices participating in the operation don't have
+By default, the entire restore operation will fail if one or more indices or backing indices participating in the operation don't have
snapshots of all shards available. It can occur if some shards failed to snapshot for example. It is still possible to
restore such indices by setting `partial` to `true`. Please note, that only successfully snapshotted shards will be
restored in this case and all missing shards will be recreated empty.
@@ -102,6 +143,21 @@ POST /_snapshot/my_backup/snapshot_1/_restore
Please note, that some settings such as `index.number_of_shards` cannot be changed during restore operation.
+For data streams, these index settings are applied to the restored backing
+indices.
+
+[IMPORTANT]
+====
+The `index_settings` and `ignore_index_settings` parameters affect
+restored backing indices only. New backing indices created for a stream use the index
+settings specified in the stream's matching
+<>.
+
+If you change index settings during a restore, we recommend you make similar
+changes in the stream's matching index template. This ensures new backing
+indices created for the stream use the same index settings.
+====
+
[float]
=== Restoring to a different cluster
@@ -111,11 +167,11 @@ containing the snapshot in the new cluster and starting the restore process. The
same size or topology. However, the version of the new cluster should be the same or newer (only 1 major version newer) than the cluster that was used to create the snapshot. For example, you can restore a 1.x snapshot to a 2.x cluster, but not a 1.x snapshot to a 5.x cluster.
If the new cluster has a smaller size additional considerations should be made. First of all it's necessary to make sure
-that new cluster have enough capacity to store all indices in the snapshot. It's possible to change indices settings
+that new cluster have enough capacity to store all data streams and indices in the snapshot. It's possible to change index settings
during restore to reduce the number of replicas, which can help with restoring snapshots into smaller cluster. It's also
-possible to select only subset of the indices using the `indices` parameter.
+possible to select only subset of the data streams or indices using the `indices` parameter.
-If indices in the original cluster were assigned to particular nodes using
+If indices or backing indices in the original cluster were assigned to particular nodes using
<>, the same rules will be enforced in the new cluster. Therefore
if the new cluster doesn't contain nodes with appropriate attributes that a restored index can be allocated on, such
index will not be successfully restored unless these index allocation settings are changed during restore operation.
diff --git a/docs/reference/snapshot-restore/take-snapshot.asciidoc b/docs/reference/snapshot-restore/take-snapshot.asciidoc
index 495b107b60099..1d52928db538c 100644
--- a/docs/reference/snapshot-restore/take-snapshot.asciidoc
+++ b/docs/reference/snapshot-restore/take-snapshot.asciidoc
@@ -1,9 +1,5 @@
[[snapshots-take-snapshot]]
-== Take a snapshot of one or more indices
-
-++++
-Take a snapshot
-++++
+== Take a snapshot
A repository can contain multiple snapshots of the same cluster. Snapshots are identified by unique names within the
cluster. A snapshot with the name `snapshot_1` in the repository `my_backup` can be created by executing the following
@@ -33,14 +29,14 @@ initialization (default) or wait for snapshot completion. During snapshot initia
previous snapshots is loaded into the memory, which means that in large repositories it may take several seconds (or
even minutes) for this command to return even if the `wait_for_completion` parameter is set to `false`.
-By default a snapshot of all open and started indices in the cluster is created. This behavior can be changed by
-specifying the list of indices in the body of the snapshot request.
+By default a snapshot backs up all data streams and open indices in the cluster. This behavior can be changed by
+specifying the list of data streams and indices in the body of the snapshot request.
[source,console]
-----------------------------------
PUT /_snapshot/my_backup/snapshot_2?wait_for_completion=true
{
- "indices": "index_1,index_2",
+ "indices": "data_stream_1,index_1,index_2",
"ignore_unavailable": true,
"include_global_state": false,
"metadata": {
@@ -51,13 +47,31 @@ PUT /_snapshot/my_backup/snapshot_2?wait_for_completion=true
-----------------------------------
// TEST[skip:cannot complete subsequent snapshot]
-The list of indices that should be included into the snapshot can be specified using the `indices` parameter that
+The list of data streams and indices that should be included into the snapshot can be specified using the `indices` parameter that
supports <>, although the options which control the behavior of multi index syntax
-must be supplied in the body of the request, rather than as request parameters. The snapshot request also supports the
-`ignore_unavailable` option. Setting it to `true` will cause indices that do not exist to be ignored during snapshot
-creation. By default, when `ignore_unavailable` option is not set and an index is missing the snapshot request will fail.
+must be supplied in the body of the request, rather than as request parameters.
+
+Data stream backups include the stream's backing indices and metadata, such as
+the current <> and timestamp field.
+
+You can also choose to include only specific backing indices in a snapshot.
+However, these backups do not include the associated data stream's
+metadata or its other backing indices.
+
+The snapshot request also supports the
+`ignore_unavailable` option. Setting it to `true` will cause data streams and indices that do not exist to be ignored during snapshot
+creation. By default, when the `ignore_unavailable` option is not set and a data stream or index is missing, the snapshot request will fail.
+
By setting `include_global_state` to false it's possible to prevent the cluster global state to be stored as part of
-the snapshot. By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have
+the snapshot.
+
+IMPORTANT: The global cluster state includes the cluster's index
+templates, such as those <>. If your snapshot includes data streams, we recommend storing the
+cluster state as part of the snapshot. This lets you later restored any
+templates required for a data stream.
+
+By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have
all primary shards available. This behaviour can be changed by setting `partial` to `true`. The `expand_wildcards`
option can be used to control whether hidden and closed indices will be included in the snapshot, and defaults to `all`.
@@ -65,7 +79,7 @@ The `metadata` field can be used to attach arbitrary metadata to the snapshot. T
why it was taken, or any other data that might be useful.
Snapshot names can be automatically derived using <>, similarly as when creating
-new indices. Note that special characters need to be URI encoded.
+new data streams or indices. Note that special characters need to be URI encoded.
For example, creating a snapshot with the current day in the name, like `snapshot-2018.05.11`, can be achieved with
the following command:
@@ -78,18 +92,18 @@ PUT /_snapshot/my_backup/%3Csnapshot-%7Bnow%2Fd%7D%3E
// TEST[continued]
-The index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses
-the list of the index files that are already stored in the repository and copies only files that were created or
+The snapshot process is incremental. In the process of making the snapshot, {es} analyses
+the list of the data stream and index files that are already stored in the repository and copies only files that were created or
changed since the last snapshot. That allows multiple snapshots to be preserved in the repository in a compact form.
Snapshotting process is executed in non-blocking fashion. All indexing and searching operation can continue to be
-executed against the index that is being snapshotted. However, a snapshot represents the point-in-time view of the index
-at the moment when snapshot was created, so no records that were added to the index after the snapshot process was started
+executed against the data stream or index that is being snapshotted. However, a snapshot represents a point-in-time view
+at the moment when snapshot was created, so no records that were added to the data stream or index after the snapshot process was started
will be present in the snapshot. The snapshot process starts immediately for the primary shards that has been started
and are not relocating at the moment. Before version 1.2.0, the snapshot operation fails if the cluster has any relocating or
initializing primaries of indices participating in the snapshot. Starting with version 1.2.0, Elasticsearch waits for
relocation or initialization of shards to complete before snapshotting them.
-Besides creating a copy of each index the snapshot process can also store global cluster metadata, which includes persistent
+Besides creating a copy of each data stream and index, the snapshot process can also store global cluster metadata, which includes persistent
cluster settings and templates. The transient settings and registered snapshot repositories are not stored as part of
the snapshot.
@@ -107,35 +121,29 @@ GET /_snapshot/my_backup/snapshot_1
// TEST[continued]
This command returns basic information about the snapshot including start and end time, version of
-Elasticsearch that created the snapshot, the list of included indices, the current state of the
+Elasticsearch that created the snapshot, the list of included data streams and indices, the current state of the
snapshot and the list of failures that occurred during the snapshot. The snapshot `state` can be
[horizontal]
`IN_PROGRESS`::
-
The snapshot is currently running.
`SUCCESS`::
-
The snapshot finished and all shards were stored successfully.
`FAILED`::
-
The snapshot finished with an error and failed to store any data.
`PARTIAL`::
-
- The global cluster state was stored, but data of at least one shard wasn't stored successfully.
- The `failure` section in this case should contain more detailed information about shards
+ The global cluster state was stored, but data of at least one shard was not stored successfully.
+ The `failures` section of the response contains more detailed information about shards
that were not processed correctly.
`INCOMPATIBLE`::
-
- The snapshot was created with an old version of Elasticsearch and therefore is incompatible with
+ The snapshot was created with an old version of {es} and is incompatible with
the current version of the cluster.
-
-Similar as for repositories, information about multiple snapshots can be queried in one go, supporting wildcards as well:
+Similar as for repositories, information about multiple snapshots can be queried in a single request, supporting wildcards as well:
[source,console]
-----------------------------------
@@ -156,7 +164,7 @@ return all snapshots that are currently available.
Getting all snapshots in the repository can be costly on cloud-based repositories,
both from a cost and performance perspective. If the only information required is
-the snapshot names/uuids in the repository and the indices in each snapshot, then
+the snapshot names/uuids in the repository and the data streams and indices in each snapshot, then
the optional boolean parameter `verbose` can be set to `false` to execute a more
performant and cost-effective retrieval of the snapshots in the repository. Note
that setting `verbose` to `false` will omit all other information about the snapshot
diff --git a/docs/reference/sql/functions/aggs.asciidoc b/docs/reference/sql/functions/aggs.asciidoc
index 7cf08f1c5be10..a506eadd37192 100644
--- a/docs/reference/sql/functions/aggs.asciidoc
+++ b/docs/reference/sql/functions/aggs.asciidoc
@@ -599,6 +599,35 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggStddevPop]
include-tagged::{sql-specs}/docs/docs.csv-spec[aggStddevPopScalars]
--------------------------------------------------
+[[sql-functions-aggs-stddev-samp]]
+==== `STDDEV_SAMP`
+
+.Synopsis:
+[source, sql]
+--------------------------------------------------
+STDDEV_SAMP(field_name) <1>
+--------------------------------------------------
+
+*Input*:
+
+<1> a numeric field
+
+*Output*: `double` numeric value
+
+*Description*:
+
+Returns the https://en.wikipedia.org/wiki/Standard_deviations[sample standard deviation] of input values in the field `field_name`.
+
+["source","sql",subs="attributes,macros"]
+--------------------------------------------------
+include-tagged::{sql-specs}/docs/docs.csv-spec[aggStddevSamp]
+--------------------------------------------------
+
+["source","sql",subs="attributes,macros"]
+--------------------------------------------------
+include-tagged::{sql-specs}/docs/docs.csv-spec[aggStddevSampScalars]
+--------------------------------------------------
+
[[sql-functions-aggs-sum-squares]]
==== `SUM_OF_SQUARES`
@@ -657,3 +686,33 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggVarPop]
--------------------------------------------------
include-tagged::{sql-specs}/docs/docs.csv-spec[aggVarPopScalars]
--------------------------------------------------
+
+[[sql-functions-aggs-var-samp]]
+==== `VAR_SAMP`
+
+.Synopsis:
+[source, sql]
+--------------------------------------------------
+VAR_SAMP(field_name) <1>
+--------------------------------------------------
+
+*Input*:
+
+<1> a numeric field
+
+*Output*: `double` numeric value
+
+*Description*:
+
+Returns the https://en.wikipedia.org/wiki/Variance[sample variance] of input values in the field `field_name`.
+
+["source","sql",subs="attributes,macros"]
+--------------------------------------------------
+include-tagged::{sql-specs}/docs/docs.csv-spec[aggVarSamp]
+--------------------------------------------------
+
+
+["source","sql",subs="attributes,macros"]
+--------------------------------------------------
+include-tagged::{sql-specs}/docs/docs.csv-spec[aggVarSampScalars]
+--------------------------------------------------
diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc
index 393266fed8e95..c3c0cf27b2472 100644
--- a/docs/reference/sql/functions/index.asciidoc
+++ b/docs/reference/sql/functions/index.asciidoc
@@ -42,8 +42,10 @@
** <>
** <>
** <>
+** <>
** <>
** <>
+** <>
* <>
** <>
* <>
diff --git a/docs/reference/transform/painless-examples.asciidoc b/docs/reference/transform/painless-examples.asciidoc
index 3e9758992d5f9..c2c24614ef1cd 100644
--- a/docs/reference/transform/painless-examples.asciidoc
+++ b/docs/reference/transform/painless-examples.asciidoc
@@ -106,7 +106,7 @@ You can retrieve the last value in a similar way:
[discrete]
[[painless-time-features]]
-==== Getting time features as scripted fields
+==== Getting time features by using aggregations
This snippet shows how to extract time based features by using Painless in a
{transform}. The snippet uses an index where `@timestamp` is defined as a `date`
@@ -115,37 +115,39 @@ type field.
[source,js]
--------------------------------------------------
"aggregations": {
- "script_fields": {
- "hour_of_day": { <1>
- "script": {
- "lang": "painless",
- "source": """
- ZonedDateTime date = doc['@timestamp'].value; <2>
- return date.getHour(); <3>
- """
- }
- },
- "month_of_year": { <4>
- "script": {
- "lang": "painless",
- "source": """
- ZonedDateTime date = doc['@timestamp'].value; <5>
- return date.getMonthValue(); <6>
- """
- }
+ "avg_hour_of_day": { <1>
+ "avg":{
+ "script": { <2>
+ "source": """
+ ZonedDateTime date = doc['@timestamp'].value; <3>
+ return date.getHour(); <4>
+ """
}
- },
- ...
+ }
+ },
+ "avg_month_of_year": { <5>
+ "avg":{
+ "script": { <6>
+ "source": """
+ ZonedDateTime date = doc['@timestamp'].value; <7>
+ return date.getMonthValue(); <8>
+ """
+ }
+ }
+ },
+ ...
}
--------------------------------------------------
// NOTCONSOLE
-<1> Contains the Painless script that returns the hour of the day.
-<2> Sets `date` based on the timestamp of the document.
-<3> Returns the hour value from `date`.
-<4> Contains the Painless script that returns the month of the year.
-<5> Sets `date` based on the timestamp of the document.
-<6> Returns the month value from `date`.
+<1> Name of the aggregation.
+<2> Contains the Painless script that returns the hour of the day.
+<3> Sets `date` based on the timestamp of the document.
+<4> Returns the hour value from `date`.
+<5> Name of the aggregation.
+<6> Contains the Painless script that returns the month of the year.
+<7> Sets `date` based on the timestamp of the document.
+<8> Returns the month value from `date`.
[discrete]
diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java
index 988efecbc83c8..b9a5164e1942d 100644
--- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java
+++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java
@@ -210,6 +210,12 @@ public void declareLong(BiConsumer consumer, ParseField field) {
declareField(consumer, p -> p.longValue(), field, ValueType.LONG);
}
+ public void declareLongOrNull(BiConsumer consumer, long nullValue, ParseField field) {
+ // Using a method reference here angers some compilers
+ declareField(consumer, p -> p.currentToken() == XContentParser.Token.VALUE_NULL ? nullValue : p.longValue(),
+ field, ValueType.LONG_OR_NULL);
+ }
+
public void declareInt(BiConsumer consumer, ParseField field) {
// Using a method reference here angers some compilers
declareField(consumer, p -> p.intValue(), field, ValueType.INT);
diff --git a/modules/aggs-matrix-stats/build.gradle b/modules/aggs-matrix-stats/build.gradle
index 2a3bf9b22f0dd..aad781fb2cacd 100644
--- a/modules/aggs-matrix-stats/build.gradle
+++ b/modules/aggs-matrix-stats/build.gradle
@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
-apply plugin: 'elasticsearch.rest-resources'
+apply plugin: 'elasticsearch.yaml-rest-test'
esplugin {
description 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.'
@@ -28,3 +28,5 @@ restResources {
includeCore '_common', 'indices', 'cluster', 'index', 'search', 'nodes'
}
}
+
+integTest.enabled = false
diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java b/modules/aggs-matrix-stats/src/yamlRestTest/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java
similarity index 100%
rename from modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java
rename to modules/aggs-matrix-stats/src/yamlRestTest/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java
diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/10_basic.yml b/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/10_basic.yml
similarity index 100%
rename from modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/10_basic.yml
rename to modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/10_basic.yml
diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml b/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/20_empty_bucket.yml
similarity index 100%
rename from modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml
rename to modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/20_empty_bucket.yml
diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml b/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/30_single_value_field.yml
similarity index 100%
rename from modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml
rename to modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/30_single_value_field.yml
diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml b/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/40_multi_value_field.yml
similarity index 100%
rename from modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml
rename to modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/40_multi_value_field.yml
diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle
index 4af7cf09b4ca7..f631aa725d017 100644
--- a/modules/analysis-common/build.gradle
+++ b/modules/analysis-common/build.gradle
@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
-apply plugin: 'elasticsearch.rest-resources'
+apply plugin: 'elasticsearch.yaml-rest-test'
esplugin {
description 'Adds "built in" analyzers to Elasticsearch.'
diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java
similarity index 98%
rename from modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersTests.java
rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java
index 215bf3e9f0f2c..48554fe3d26bb 100644
--- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersTests.java
+++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java
@@ -32,7 +32,7 @@
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
-public class QueryStringWithAnalyzersTests extends ESIntegTestCase {
+public class QueryStringWithAnalyzersIT extends ESIntegTestCase {
@Override
protected Collection> nodePlugins() {
return Arrays.asList(CommonAnalysisPlugin.class);
diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisClientYamlTestSuiteIT.java b/modules/analysis-common/src/yamlRestTest/java/org/elasticsearch/analysis/common/CommonAnalysisClientYamlTestSuiteIT.java
similarity index 100%
rename from modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisClientYamlTestSuiteIT.java
rename to modules/analysis-common/src/yamlRestTest/java/org/elasticsearch/analysis/common/CommonAnalysisClientYamlTestSuiteIT.java
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/10_basic.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/10_basic.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/10_basic.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/10_basic.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/50_char_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/50_char_filters.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_analyze.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_analyze.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices/validate_query/10_synonyms.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices/validate_query/10_synonyms.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/indices/validate_query/10_synonyms.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices/validate_query/10_synonyms.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/10_match.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/10_match.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/10_match.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/10_match.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/40_query_string.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/40_query_string.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/60_synonym_graph.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/60_synonym_graph.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/70_intervals.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/70_intervals.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/20_phrase.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/20_phrase.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/30_synonyms.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/30_synonyms.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/30_synonyms.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/30_synonyms.yml
diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/termvectors/10_payloads.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/termvectors/10_payloads.yml
similarity index 100%
rename from modules/analysis-common/src/test/resources/rest-api-spec/test/termvectors/10_payloads.yml
rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/termvectors/10_payloads.yml
diff --git a/modules/geo/build.gradle b/modules/geo/build.gradle
index e0f5147df8b90..c089b395df062 100644
--- a/modules/geo/build.gradle
+++ b/modules/geo/build.gradle
@@ -16,13 +16,21 @@
* specific language governing permissions and limitations
* under the License.
*/
-apply plugin: 'elasticsearch.rest-resources'
+apply plugin: 'elasticsearch.yaml-rest-test'
esplugin {
description 'Placeholder plugin for geospatial features in ES. only registers geo_shape field mapper for now'
classname 'org.elasticsearch.geo.GeoPlugin'
}
+restResources {
+ restApi {
+ includeCore '_common', 'indices', 'index', 'search'
+ }
+}
artifacts {
- restTests(new File(projectDir, "src/test/resources/rest-api-spec/test"))
+ restTests(project.file('src/yamlRestTest/resources/rest-api-spec/test'))
}
+
+integTest.enabled = false
+test.enabled = false
diff --git a/modules/geo/src/test/java/org/elasticsearch/geo/GeoTests.java b/modules/geo/src/test/java/org/elasticsearch/geo/GeoTests.java
deleted file mode 100644
index 056b485393082..0000000000000
--- a/modules/geo/src/test/java/org/elasticsearch/geo/GeoTests.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.geo;
-
-import org.elasticsearch.test.ESTestCase;
-
-public class GeoTests extends ESTestCase {
-
- public void testStub() {
- // the build expects unit tests to exist in a module, so here one is.
- }
-}
diff --git a/modules/geo/src/test/java/org/elasticsearch/geo/GeoClientYamlTestSuiteIT.java b/modules/geo/src/yamlRestTest/java/org/elasticsearch/geo/GeoClientYamlTestSuiteIT.java
similarity index 100%
rename from modules/geo/src/test/java/org/elasticsearch/geo/GeoClientYamlTestSuiteIT.java
rename to modules/geo/src/yamlRestTest/java/org/elasticsearch/geo/GeoClientYamlTestSuiteIT.java
diff --git a/modules/geo/src/test/resources/rest-api-spec/test/geo_shape/10_basic.yml b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/10_basic.yml
similarity index 100%
rename from modules/geo/src/test/resources/rest-api-spec/test/geo_shape/10_basic.yml
rename to modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/10_basic.yml
diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle
index 3026670bb33d4..2edf77c0c972c 100644
--- a/modules/ingest-common/build.gradle
+++ b/modules/ingest-common/build.gradle
@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
-apply plugin: 'elasticsearch.rest-resources'
+apply plugin: 'elasticsearch.yaml-rest-test'
esplugin {
description 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources'
@@ -36,7 +36,7 @@ restResources {
}
}
-testClusters.integTest {
+testClusters.all {
// Needed in order to test ingest pipeline templating:
// (this is because the integTest node is not using default distribution, but only the minimal number of required modules)
module project(':modules:lang-mustache').tasks.bundlePlugin.archiveFile
diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java b/modules/ingest-common/src/yamlRestTest/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java
similarity index 100%
rename from modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java
rename to modules/ingest-common/src/yamlRestTest/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/100_date_index_name_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/100_date_index_name_processor.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/100_date_index_name_processor.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/100_date_index_name_processor.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/110_sort.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/110_sort.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/110_sort.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/110_sort.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/120_grok.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/120_grok.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/120_grok.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/120_grok.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/130_escape_dot.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/130_escape_dot.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/130_escape_dot.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/130_escape_dot.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/140_json.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/140_json.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/140_json.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/140_json.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/150_kv.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/150_kv.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/160_urldecode.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/160_urldecode.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/160_urldecode.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/160_urldecode.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/170_version.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/170_version.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/170_version.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/170_version.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/180_bytes_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/180_bytes_processor.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/180_bytes_processor.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/180_bytes_processor.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/190_script_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/190_script_processor.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_default_pipeline.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_default_pipeline.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_default_pipeline.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_default_pipeline.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_dissect_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_dissect_processor.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/20_crud.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/20_crud.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/20_crud.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/20_crud.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/210_conditional_processor.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/210_conditional_processor.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/220_drop_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/220_drop_processor.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/220_drop_processor.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/220_drop_processor.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/230_change_target_index.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/230_change_target_index.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/230_change_target_index.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/230_change_target_index.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/240_required_pipeline.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/240_required_pipeline.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/240_required_pipeline.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/240_required_pipeline.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/250_csv.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/250_csv.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/250_csv.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/250_csv.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/260_seq_no.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/260_seq_no.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/260_seq_no.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/260_seq_no.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/270_set_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/270_set_processor.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/30_date_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/30_date_processor.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/30_date_processor.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/30_date_processor.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/40_mutate.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/40_mutate.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/40_mutate.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/40_mutate.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/50_on_failure.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/50_on_failure.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/50_on_failure.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/50_on_failure.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/60_fail.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/60_fail.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/60_fail.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/60_fail.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/70_bulk.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/70_bulk.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/80_foreach.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_foreach.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/80_foreach.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_foreach.yml
diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/90_simulate.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml
similarity index 100%
rename from modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/90_simulate.yml
rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml
diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle
index 3c86246484219..f0525a05d6f9d 100644
--- a/modules/ingest-geoip/build.gradle
+++ b/modules/ingest-geoip/build.gradle
@@ -19,7 +19,8 @@
import org.apache.tools.ant.taskdefs.condition.Os
-apply plugin: 'elasticsearch.rest-resources'
+apply plugin: 'elasticsearch.yaml-rest-test'
+apply plugin: 'elasticsearch.internal-cluster-test'
esplugin {
description 'Ingest processor that uses lookup geo data based on IP adresses using the MaxMind geo database'
@@ -42,6 +43,8 @@ restResources {
}
}
+integTest.enabled = false
+
task copyDefaultGeoIp2DatabaseFiles(type: Copy) {
from { zipTree(configurations.testCompileClasspath.files.find { it.name.contains('geolite2-databases') }) }
into "${project.buildDir}/ingest-geoip"
diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java
similarity index 99%
rename from modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java
rename to modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java
index c79fdce16f346..d9e11c2090a16 100644
--- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java
+++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java
@@ -50,7 +50,7 @@
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
-public class GeoIpProcessorNonIngestNodeTests extends ESIntegTestCase {
+public class GeoIpProcessorNonIngestNodeIT extends ESIntegTestCase {
public static class IngestGeoIpSettingsPlugin extends Plugin {
diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java b/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java
similarity index 100%
rename from modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java
rename to modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java
diff --git a/modules/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/10_basic.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/10_basic.yml
similarity index 100%
rename from modules/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/10_basic.yml
rename to modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/10_basic.yml
diff --git a/modules/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml
similarity index 100%
rename from modules/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml
rename to modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml
diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle
index 9b12ab11df7c6..3d38ea9999574 100644
--- a/modules/ingest-user-agent/build.gradle
+++ b/modules/ingest-user-agent/build.gradle
@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
-apply plugin: 'elasticsearch.rest-resources'
+apply plugin: 'elasticsearch.yaml-rest-test'
esplugin {
description 'Ingest processor that extracts information from a user agent'
@@ -29,6 +29,8 @@ restResources {
}
}
-testClusters.integTest {
+testClusters.all {
extraConfigFile 'ingest-user-agent/test-regexes.yml', file('src/test/test-regexes.yml')
}
+
+integTest.enabled = false
diff --git a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/IngestUserAgentClientYamlTestSuiteIT.java b/modules/ingest-user-agent/src/yamlRestTest/java/org/elasticsearch/ingest/useragent/IngestUserAgentClientYamlTestSuiteIT.java
similarity index 100%
rename from modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/IngestUserAgentClientYamlTestSuiteIT.java
rename to modules/ingest-user-agent/src/yamlRestTest/java/org/elasticsearch/ingest/useragent/IngestUserAgentClientYamlTestSuiteIT.java
diff --git a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/10_basic.yml b/modules/ingest-user-agent/src/yamlRestTest/resources/rest-api-spec/test/ingest-useragent/10_basic.yml
similarity index 100%
rename from modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/10_basic.yml
rename to modules/ingest-user-agent/src/yamlRestTest/resources/rest-api-spec/test/ingest-useragent/10_basic.yml
diff --git a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml b/modules/ingest-user-agent/src/yamlRestTest/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml
similarity index 100%
rename from modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml
rename to modules/ingest-user-agent/src/yamlRestTest/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml
diff --git a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml b/modules/ingest-user-agent/src/yamlRestTest/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml
similarity index 100%
rename from modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml
rename to modules/ingest-user-agent/src/yamlRestTest/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml
diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle
index 41b31a65fd424..3b8c42fea93e6 100644
--- a/modules/lang-expression/build.gradle
+++ b/modules/lang-expression/build.gradle
@@ -16,7 +16,8 @@
* specific language governing permissions and limitations
* under the License.
*/
-apply plugin: 'elasticsearch.rest-resources'
+apply plugin: 'elasticsearch.yaml-rest-test'
+apply plugin: 'elasticsearch.internal-cluster-test'
esplugin {
description 'Lucene expressions integration for Elasticsearch'
@@ -36,6 +37,8 @@ restResources {
}
}
+integTest.enabled = false
+
tasks.named("dependencyLicenses").configure {
mapping from: /lucene-.*/, to: 'lucene'
mapping from: /asm-.*/, to: 'asm'
diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java
similarity index 99%
rename from modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java
rename to modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java
index 09802d5ec3f2c..294576c1c8230 100644
--- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java
+++ b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java
@@ -61,7 +61,7 @@
import static org.hamcrest.Matchers.notNullValue;
// TODO: please convert to unit tests!
-public class MoreExpressionTests extends ESIntegTestCase {
+public class MoreExpressionIT extends ESIntegTestCase {
@Override
protected Collection